aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS5
-rw-r--r--Documentation/Changes11
-rw-r--r--Documentation/DocBook/device-drivers.tmpl2
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-expbuf.xml8
-rw-r--r--Documentation/assoc_array.txt6
-rw-r--r--Documentation/block/null_blk.txt72
-rw-r--r--Documentation/device-mapper/cache.txt10
-rw-r--r--Documentation/devicetree/bindings/arm/omap/mpu.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/pmu.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/exynos4-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5250-clock.txt4
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5420-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5440-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/8xxx_gpio.txt66
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-omap.txt3
-rw-r--r--Documentation/devicetree/bindings/mmc/ti-omap.txt54
-rw-r--r--Documentation/devicetree/bindings/net/davinci_emac.txt2
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt2
-rw-r--r--Documentation/devicetree/bindings/net/smsc-lan91c111.txt4
-rw-r--r--Documentation/devicetree/bindings/rng/qcom,prng.txt17
-rw-r--r--Documentation/devicetree/bindings/spi/nvidia,tegra20-spi.txt5
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt2
-rw-r--r--Documentation/gpio/00-INDEX14
-rw-r--r--Documentation/gpio/board.txt115
-rw-r--r--Documentation/gpio/consumer.txt197
-rw-r--r--Documentation/gpio/driver.txt75
-rw-r--r--Documentation/gpio/gpio-legacy.txt (renamed from Documentation/gpio.txt)0
-rw-r--r--Documentation/gpio/gpio.txt119
-rw-r--r--Documentation/gpio/sysfs.txt155
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/mic/mpssd/mpssd.c18
-rw-r--r--Documentation/module-signing.txt240
-rw-r--r--Documentation/networking/ip-sysctl.txt8
-rw-r--r--Documentation/networking/packet_mmap.txt10
-rw-r--r--MAINTAINERS118
-rw-r--r--Makefile24
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/include/uapi/asm/unistd.h11
-rw-r--r--arch/arc/kernel/perf_event.c4
-rw-r--r--arch/arm/boot/dts/am335x-base0033.dts79
-rw-r--r--arch/arm/boot/dts/am335x-igep0033.dtsi29
-rw-r--r--arch/arm/boot/dts/am3517-evm.dts6
-rw-r--r--arch/arm/boot/dts/am3517.dtsi63
-rw-r--r--arch/arm/boot/dts/armada-370-db.dts28
-rw-r--r--arch/arm/boot/dts/armada-370-xp.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78230.dtsi24
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78260.dtsi109
-rw-r--r--arch/arm/boot/dts/at91sam9x5_usart3.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm2835.dtsi4
-rw-r--r--arch/arm/boot/dts/cros5250-common.dtsi12
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi2
-rw-r--r--arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi4
-rw-r--r--arch/arm/boot/dts/omap-zoom-common.dtsi2
-rw-r--r--arch/arm/boot/dts/omap2.dtsi96
-rw-r--r--arch/arm/boot/dts/omap2420.dtsi23
-rw-r--r--arch/arm/boot/dts/omap2430.dtsi49
-rw-r--r--arch/arm/boot/dts/omap3-beagle-xm.dts7
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts21
-rw-r--r--arch/arm/boot/dts/omap3-igep.dtsi85
-rw-r--r--arch/arm/boot/dts/omap3-igep0020.dts50
-rw-r--r--arch/arm/boot/dts/omap3-igep0030.dts4
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts25
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3.dtsi42
-rw-r--r--arch/arm/boot/dts/omap34xx-hs.dtsi16
-rw-r--r--arch/arm/boot/dts/omap36xx-hs.dtsi16
-rw-r--r--arch/arm/boot/dts/omap4-panda-common.dtsi20
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts12
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi28
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi7
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi27
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi42
-rw-r--r--arch/arm/configs/multi_v7_defconfig3
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/configs/sunxi_defconfig7
-rw-r--r--arch/arm/configs/u8500_defconfig3
-rw-r--r--arch/arm/crypto/aesbs-core.S_shipped2
-rw-r--r--arch/arm/crypto/bsaes-armv7.pl2
-rw-r--r--arch/arm/include/asm/io.h2
-rw-r--r--arch/arm/include/asm/memory.h34
-rw-r--r--arch/arm/include/asm/pgtable.h2
-rw-r--r--arch/arm/include/asm/xen/page.h2
-rw-r--r--arch/arm/kernel/head-nommu.S4
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/machine_kexec.c17
-rw-r--r--arch/arm/kernel/process.c7
-rw-r--r--arch/arm/kernel/relocate_kernel.S8
-rw-r--r--arch/arm/kernel/setup.c3
-rw-r--r--arch/arm/kernel/sigreturn_codes.S40
-rw-r--r--arch/arm/kernel/stacktrace.c2
-rw-r--r--arch/arm/kernel/traps.c11
-rw-r--r--arch/arm/lib/delay-loop.S1
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c7
-rw-r--r--arch/arm/mach-at91/pm.h4
-rw-r--r--arch/arm/mach-at91/sama5d3.c6
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c4
-rw-r--r--arch/arm/mach-davinci/dm355.c3
-rw-r--r--arch/arm/mach-davinci/dm365.c3
-rw-r--r--arch/arm/mach-davinci/dm644x.c3
-rw-r--r--arch/arm/mach-davinci/dm646x.c6
-rw-r--r--arch/arm/mach-footbridge/common.c3
-rw-r--r--arch/arm/mach-footbridge/dc21285-timer.c5
-rw-r--r--arch/arm/mach-footbridge/dc21285.c2
-rw-r--r--arch/arm/mach-footbridge/ebsa285.c22
-rw-r--r--arch/arm/mach-highbank/highbank.c23
-rw-r--r--arch/arm/mach-omap2/Makefile6
-rw-r--r--arch/arm/mach-omap2/board-generic.c18
-rw-r--r--arch/arm/mach-omap2/board-ldp.c7
-rw-r--r--arch/arm/mach-omap2/common.h1
-rw-r--r--arch/arm/mach-omap2/display.c40
-rw-r--r--arch/arm/mach-omap2/dss-common.c2
-rw-r--r--arch/arm/mach-omap2/gpmc.c58
-rw-r--r--arch/arm/mach-omap2/omap-secure.h7
-rw-r--r--arch/arm/mach-omap2/omap4-common.c57
-rw-r--r--arch/arm/mach-omap2/omap_device.c24
-rw-r--r--arch/arm/mach-omap2/omap_device.h1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c143
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c19
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c12
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c13
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c2
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c1
-rw-r--r--arch/arm/mach-omap2/pm34xx.c2
-rw-r--r--arch/arm/mach-omap2/powerdomain.c3
-rw-r--r--arch/arm/mach-omap2/prm44xx_54xx.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/lubbock.h2
-rw-r--r--arch/arm/mach-pxa/reset.c8
-rw-r--r--arch/arm/mach-pxa/tosa.c102
-rw-r--r--arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c11
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c11
-rw-r--r--arch/arm/mach-shmobile/board-bockw.c2
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g.c2
-rw-r--r--arch/arm/mach-shmobile/board-lager.c4
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c4
-rw-r--r--arch/arm/mach-socfpga/Kconfig1
-rw-r--r--arch/arm/mach-tegra/fuse.c12
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c4
-rw-r--r--arch/arm/mach-vexpress/spc.c40
-rw-r--r--arch/arm/mach-vexpress/spc.h1
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c66
-rw-r--r--arch/arm/mm/dma-mapping.c88
-rw-r--r--arch/arm/mm/flush.c6
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/mmap.c2
-rw-r--r--arch/arm/mm/pgd.c3
-rw-r--r--arch/arm/plat-omap/include/plat/dmtimer.h5
-rw-r--r--arch/arm/xen/enlighten.c6
-rw-r--r--arch/arm/xen/p2m.c5
-rw-r--r--arch/arm64/Kconfig3
-rw-r--r--arch/arm64/boot/dts/foundation-v8.dts2
-rw-r--r--arch/arm64/include/asm/io.h2
-rw-r--r--arch/arm64/include/asm/irqflags.h3
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h33
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h4
-rw-r--r--arch/arm64/kernel/debug-monitors.c20
-rw-r--r--arch/arm64/kernel/entry.S29
-rw-r--r--arch/arm64/kernel/head.S3
-rw-r--r--arch/arm64/kernel/ptrace.c78
-rw-r--r--arch/arm64/kernel/setup.c5
-rw-r--r--arch/arm64/kernel/smp.c1
-rw-r--r--arch/arm64/mm/proc.S2
-rw-r--r--arch/avr32/boards/favr-32/setup.c4
-rw-r--r--arch/avr32/configs/atngw100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_evklcd100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_evklcd101_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_mrmt_defconfig1
-rw-r--r--arch/avr32/configs/atngw100mkii_defconfig1
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd101_defconfig1
-rw-r--r--arch/avr32/configs/atstk1002_defconfig1
-rw-r--r--arch/avr32/configs/atstk1003_defconfig1
-rw-r--r--arch/avr32/configs/atstk1004_defconfig1
-rw-r--r--arch/avr32/configs/atstk1006_defconfig1
-rw-r--r--arch/avr32/configs/favr-32_defconfig1
-rw-r--r--arch/avr32/configs/hammerhead_defconfig1
-rw-r--r--arch/avr32/configs/merisc_defconfig1
-rw-r--r--arch/avr32/configs/mimc200_defconfig1
-rw-r--r--arch/avr32/kernel/time.c2
-rw-r--r--arch/avr32/mach-at32ap/pm.c2
-rw-r--r--arch/parisc/configs/c3000_defconfig2
-rw-r--r--arch/parisc/configs/c8000_defconfig36
-rw-r--r--arch/parisc/configs/generic-64bit_defconfig39
-rw-r--r--arch/parisc/include/asm/cacheflush.h12
-rw-r--r--arch/parisc/include/asm/page.h5
-rw-r--r--arch/parisc/include/asm/serial.h2
-rw-r--r--arch/parisc/kernel/cache.c35
-rw-r--r--arch/parisc/kernel/hardware.c7
-rw-r--r--arch/parisc/kernel/head.S6
-rw-r--r--arch/parisc/kernel/sys_parisc.c25
-rw-r--r--arch/parisc/kernel/unwind.c9
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S138
-rw-r--r--arch/parisc/mm/init.c19
-rw-r--r--arch/powerpc/Makefile7
-rw-r--r--arch/powerpc/boot/dts/mpc5121.dtsi1
-rw-r--r--arch/powerpc/boot/dts/mpc5125twr.dts6
-rw-r--r--arch/powerpc/boot/dts/xcalibur1501.dts4
-rw-r--r--arch/powerpc/boot/dts/xpedite5301.dts4
-rw-r--r--arch/powerpc/boot/dts/xpedite5330.dts4
-rw-r--r--arch/powerpc/boot/dts/xpedite5370.dts4
-rw-r--r--arch/powerpc/boot/util.S14
-rw-r--r--arch/powerpc/configs/52xx/cm5200_defconfig3
-rw-r--r--arch/powerpc/configs/52xx/lite5200b_defconfig3
-rw-r--r--arch/powerpc/configs/52xx/motionpro_defconfig3
-rw-r--r--arch/powerpc/configs/52xx/pcm030_defconfig3
-rw-r--r--arch/powerpc/configs/52xx/tqm5200_defconfig3
-rw-r--r--arch/powerpc/configs/mpc5200_defconfig3
-rw-r--r--arch/powerpc/configs/pasemi_defconfig7
-rw-r--r--arch/powerpc/include/asm/exception-64s.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h2
-rw-r--r--arch/powerpc/include/asm/opal.h4
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h6
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h7
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h2
-rw-r--r--arch/powerpc/include/asm/reg.h7
-rw-r--r--arch/powerpc/include/asm/switch_to.h2
-rw-r--r--arch/powerpc/include/asm/timex.h8
-rw-r--r--arch/powerpc/include/asm/unaligned.h7
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/crash_dump.c6
-rw-r--r--arch/powerpc/kernel/head_64.S2
-rw-r--r--arch/powerpc/kernel/machine_kexec.c14
-rw-r--r--arch/powerpc/kernel/misc_64.S5
-rw-r--r--arch/powerpc/kernel/nvram_64.c2
-rw-r--r--arch/powerpc/kernel/process.c32
-rw-r--r--arch/powerpc/kernel/ptrace.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/signal_32.c16
-rw-r--r--arch/powerpc/kernel/signal_64.c6
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S6
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c18
-rw-r--r--arch/powerpc/kvm/book3s_hv.c24
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c9
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S23
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S19
-rw-r--r--arch/powerpc/kvm/book3s_pr.c22
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S6
-rw-r--r--arch/powerpc/kvm/booke.c12
-rw-r--r--arch/powerpc/lib/copyuser_64.S53
-rw-r--r--arch/powerpc/mm/hugetlbpage-book3e.c3
-rw-r--r--arch/powerpc/mm/tlb_nohash.c2
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype20
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c20
-rw-r--r--arch/powerpc/platforms/powernv/opal-lpc.c12
-rw-r--r--arch/powerpc/platforms/powernv/opal-xscom.c4
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci.h4
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c12
-rw-r--r--arch/powerpc/platforms/pseries/msi.c28
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c46
-rw-r--r--arch/powerpc/platforms/pseries/pci.c8
-rw-r--r--arch/powerpc/sysdev/ppc4xx_ocm.c2
-rw-r--r--arch/s390/Kconfig9
-rw-r--r--arch/s390/crypto/aes_s390.c50
-rw-r--r--arch/s390/include/asm/page.h38
-rw-r--r--arch/s390/include/asm/sclp.h3
-rw-r--r--arch/s390/include/asm/smp.h2
-rw-r--r--arch/s390/include/asm/vdso.h5
-rw-r--r--arch/s390/kernel/asm-offsets.c4
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/pgm_check.S2
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c25
-rw-r--r--arch/s390/kernel/time.c46
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S31
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S9
-rw-r--r--arch/s390/kernel/vdso64/clock_getres.S4
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S24
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S9
-rw-r--r--arch/s390/lib/uaccess_pt.c3
-rw-r--r--arch/s390/pci/pci_event.c2
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c5
-rw-r--r--arch/sh/lib/Makefile2
-rw-r--r--arch/sparc/include/asm/pgtable_64.h4
-rw-r--r--arch/sparc/include/asm/uaccess_64.h4
-rw-r--r--arch/sparc/kernel/iommu.c2
-rw-r--r--arch/sparc/kernel/ioport.c5
-rw-r--r--arch/sparc/kernel/kgdb_64.c1
-rw-r--r--arch/sparc/kernel/smp_64.c3
-rw-r--r--arch/um/Makefile9
-rw-r--r--arch/um/kernel/sysrq.c4
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Makefile8
-rw-r--r--arch/x86/boot/Makefile6
-rw-r--r--arch/x86/boot/compressed/Makefile1
-rw-r--r--arch/x86/crypto/Makefile3
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c2
-rw-r--r--arch/x86/crypto/camellia_aesni_avx2_glue.c2
-rw-r--r--arch/x86/crypto/camellia_aesni_avx_glue.c2
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c2
-rw-r--r--arch/x86/crypto/cast6_avx_glue.c2
-rw-r--r--arch/x86/crypto/serpent_avx2_glue.c2
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c2
-rw-r--r--arch/x86/crypto/serpent_sse2_glue.c2
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c4
-rw-r--r--arch/x86/crypto/twofish_avx_glue.c2
-rw-r--r--arch/x86/include/asm/atomic.h4
-rw-r--r--arch/x86/include/asm/atomic64_64.h4
-rw-r--r--arch/x86/include/asm/bitops.h6
-rw-r--r--arch/x86/include/asm/local.h4
-rw-r--r--arch/x86/include/asm/pgtable.h11
-rw-r--r--arch/x86/include/asm/preempt.h11
-rw-r--r--arch/x86/include/asm/rmwcc.h8
-rw-r--r--arch/x86/include/asm/simd.h11
-rw-r--r--arch/x86/include/asm/trace/irq_vectors.h11
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event.h15
-rw-r--r--arch/x86/kernel/reboot.c11
-rw-r--r--arch/x86/kvm/lapic.c43
-rw-r--r--arch/x86/kvm/lapic.h4
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/kvm/x86.c40
-rw-r--r--arch/x86/mm/gup.c13
-rw-r--r--arch/x86/platform/efi/early_printk.c2
-rw-r--r--arch/x86/platform/efi/efi.c7
-rw-r--r--arch/x86/platform/uv/tlb_uv.c5
-rw-r--r--arch/x86/realmode/rm/Makefile3
-rw-r--r--block/blk-cgroup.h8
-rw-r--r--block/blk-flush.c19
-rw-r--r--block/blk-mq-sysfs.c13
-rw-r--r--block/blk-mq.c16
-rw-r--r--crypto/Kconfig23
-rw-r--r--crypto/Makefile8
-rw-r--r--crypto/ablk_helper.c (renamed from arch/x86/crypto/ablk_helper.c)13
-rw-r--r--crypto/ablkcipher.c21
-rw-r--r--crypto/algif_hash.c3
-rw-r--r--crypto/algif_skcipher.c3
-rw-r--r--crypto/ansi_cprng.c4
-rw-r--r--crypto/asymmetric_keys/rsa.c5
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c81
-rw-r--r--crypto/authenc.c61
-rw-r--r--crypto/authencesn.c34
-rw-r--r--crypto/ccm.c7
-rw-r--r--crypto/gcm.c2
-rw-r--r--crypto/memneq.c138
-rw-r--r--crypto/tcrypt.c4
-rw-r--r--crypto/testmgr.c26
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/ac.c4
-rw-r--r--drivers/acpi/acpi_lpss.c1
-rw-r--r--drivers/acpi/acpica/acresrc.h6
-rw-r--r--drivers/acpi/acpica/nsalloc.c18
-rw-r--r--drivers/acpi/acpica/nsutils.c18
-rw-r--r--drivers/acpi/acpica/rscalc.c9
-rw-r--r--drivers/acpi/acpica/rscreate.c36
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c31
-rw-r--r--drivers/acpi/apei/Kconfig1
-rw-r--r--drivers/acpi/apei/erst.c1
-rw-r--r--drivers/acpi/battery.c22
-rw-r--r--drivers/acpi/bus.c10
-rw-r--r--drivers/acpi/nvs.c1
-rw-r--r--drivers/acpi/pci_root.c3
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/sleep.c2
-rw-r--r--drivers/acpi/sysfs.c54
-rw-r--r--drivers/ata/ahci.c23
-rw-r--r--drivers/ata/ahci_imx.c3
-rw-r--r--drivers/ata/ahci_platform.c1
-rw-r--r--drivers/ata/libata-core.c22
-rw-r--r--drivers/ata/libata-scsi.c22
-rw-r--r--drivers/ata/libata-zpodd.c4
-rw-r--r--drivers/ata/pata_arasan_cf.c1
-rw-r--r--drivers/ata/sata_sis.c4
-rw-r--r--drivers/base/regmap/regmap-mmio.c11
-rw-r--r--drivers/base/regmap/regmap.c8
-rw-r--r--drivers/block/null_blk.c120
-rw-r--r--drivers/block/skd_main.c4
-rw-r--r--drivers/block/xen-blkfront.c7
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/char/hw_random/Kconfig25
-rw-r--r--drivers/char/hw_random/Makefile2
-rw-r--r--drivers/char/hw_random/msm-rng.c197
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c141
-rw-r--r--drivers/char/hw_random/pseries-rng.c5
-rw-r--r--drivers/char/hw_random/via-rng.c2
-rw-r--r--drivers/char/i8k.c7
-rw-r--r--drivers/char/tpm/tpm_ppi.c15
-rw-r--r--drivers/clk/clk-divider.c2
-rw-r--r--drivers/clk/clk-s2mps11.c6
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c10
-rw-r--r--drivers/clk/samsung/clk-exynos4.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c14
-rw-r--r--drivers/clocksource/Kconfig2
-rw-r--r--drivers/clocksource/clksrc-of.c1
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c7
-rw-r--r--drivers/clocksource/sh_mtu2.c16
-rw-r--r--drivers/clocksource/sh_tmu.c20
-rw-r--r--drivers/clocksource/sun4i_timer.c3
-rw-r--r--drivers/clocksource/time-armada-370-xp.c10
-rw-r--r--drivers/cpufreq/at32ap-cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq.c104
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c1
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c1
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c1
-rw-r--r--drivers/cpufreq/intel_pstate.c8
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c4
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c2
-rw-r--r--drivers/cpuidle/cpuidle.c2
-rw-r--r--drivers/crypto/caam/Kconfig25
-rw-r--r--drivers/crypto/caam/Makefile4
-rw-r--r--drivers/crypto/caam/caamalg.c134
-rw-r--r--drivers/crypto/caam/caamhash.c88
-rw-r--r--drivers/crypto/caam/caamrng.c29
-rw-r--r--drivers/crypto/caam/ctrl.c418
-rw-r--r--drivers/crypto/caam/desc.h17
-rw-r--r--drivers/crypto/caam/intern.h20
-rw-r--r--drivers/crypto/caam/jr.c340
-rw-r--r--drivers/crypto/caam/jr.h5
-rw-r--r--drivers/crypto/caam/regs.h14
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h34
-rw-r--r--drivers/crypto/dcp.c49
-rw-r--r--drivers/crypto/ixp4xx_crypto.c30
-rw-r--r--drivers/crypto/mv_cesa.c14
-rw-r--r--drivers/crypto/omap-aes.c6
-rw-r--r--drivers/crypto/omap-sham.c1
-rw-r--r--drivers/crypto/picoxcell_crypto.c32
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/crypto/talitos.c103
-rw-r--r--drivers/crypto/tegra-aes.c26
-rw-r--r--drivers/dma/Kconfig7
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/at_hdmac_regs.h4
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/dmatest.c8
-rw-r--r--drivers/dma/fsldma.c31
-rw-r--r--drivers/dma/ioat/dma.c11
-rw-r--r--drivers/dma/mmp_pdma.c1
-rw-r--r--drivers/dma/mv_xor.c101
-rw-r--r--drivers/dma/pl330.c5
-rw-r--r--drivers/dma/ppc4xx/adma.c27
-rw-r--r--drivers/dma/s3c24xx-dma.c33
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c11
-rw-r--r--drivers/dma/txx9dmac.c1
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/extcon/extcon-arizona.c4
-rw-r--r--drivers/extcon/extcon-class.c3
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/efi/Kconfig6
-rw-r--r--drivers/firmware/efi/Makefile2
-rw-r--r--drivers/firmware/efi/efi-pstore.c164
-rw-r--r--drivers/firmware/efi/efivars.c12
-rw-r--r--drivers/firmware/efi/vars.c12
-rw-r--r--drivers/gpio/gpio-bcm-kona.c2
-rw-r--r--drivers/gpio/gpio-davinci.c4
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c8
-rw-r--r--drivers/gpio/gpio-msm-v2.c6
-rw-r--r--drivers/gpio/gpio-mvebu.c2
-rw-r--r--drivers/gpio/gpio-pl061.c10
-rw-r--r--drivers/gpio/gpio-rcar.c5
-rw-r--r--drivers/gpio/gpio-tb10x.c1
-rw-r--r--drivers/gpio/gpio-twl4030.c26
-rw-r--r--drivers/gpio/gpio-ucb1400.c1
-rw-r--r--drivers/gpio/gpiolib.c78
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h1
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c7
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c20
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c7
-rw-r--r--drivers/gpu/drm/drm_edid.c12
-rw-r--r--drivers/gpu/drm/drm_stub.c6
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c20
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c41
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c88
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c17
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c47
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c34
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c26
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c58
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c1
-rw-r--r--drivers/gpu/drm/nouveau/Makefile1
-rw-r--r--drivers/gpu/drm/nouveau/core/core/subdev.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c445
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/qxl/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c87
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c15
-rw-r--r--drivers/gpu/drm/radeon/cik.c12
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c2
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c20
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/ni.c20
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c28
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h33
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman4
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen4
-rw-r--r--drivers/gpu/drm/radeon/rs690.c10
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c23
-rw-r--r--drivers/gpu/drm/tegra/drm.c34
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/rgb.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c6
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c118
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c5
-rw-r--r--drivers/gpu/host1x/bus.c5
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c4
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c4
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-appleir.c3
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-kye.c14
-rw-r--r--drivers/hid/hid-multitouch.c6
-rw-r--r--drivers/hid/hid-sensor-hub.c22
-rw-r--r--drivers/hid/hid-sony.c53
-rw-r--r--drivers/hid/hid-wiimote-core.c5
-rw-r--r--drivers/hid/uhid.c2
-rw-r--r--drivers/hwmon/asus_atk0110.c1
-rw-r--r--drivers/hwmon/hih6130.c16
-rw-r--r--drivers/hwmon/lm78.c2
-rw-r--r--drivers/hwmon/lm90.c4
-rw-r--r--drivers/hwmon/sis5595.c2
-rw-r--r--drivers/hwmon/vt8231.c2
-rw-r--r--drivers/hwmon/w83l786ng.c13
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c3
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c1
-rw-r--r--drivers/i2c/busses/i2c-davinci.c4
-rw-r--r--drivers/i2c/busses/i2c-diolan-u2c.c16
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/i2c/busses/i2c-omap.c30
-rw-r--r--drivers/i2c/i2c-mux.c2
-rw-r--r--drivers/idle/intel_idle.c19
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c5
-rw-r--r--drivers/iio/accel/kxsd9.c7
-rw-r--r--drivers/iio/adc/ad7887.c16
-rw-r--r--drivers/iio/adc/at91_adc.c1
-rw-r--r--drivers/iio/adc/mcp3422.c8
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c7
-rw-r--r--drivers/iio/common/hid-sensors/Kconfig9
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c29
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.h2
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c5
-rw-r--r--drivers/iio/imu/adis16400_core.c7
-rw-r--r--drivers/iio/light/Kconfig3
-rw-r--r--drivers/iio/light/cm36651.c2
-rw-r--r--drivers/iio/light/hid-sensor-als.c5
-rw-r--r--drivers/iio/magnetometer/Kconfig2
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c5
-rw-r--r--drivers/iio/magnetometer/mag3110.c7
-rw-r--r--drivers/infiniband/core/iwcm.c11
-rw-r--r--drivers/infiniband/core/uverbs.h10
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c17
-rw-r--r--drivers/infiniband/core/uverbs_main.c27
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c78
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c3
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c22
-rw-r--r--drivers/input/input.c4
-rw-r--r--drivers/input/keyboard/adp5588-keys.c3
-rw-r--r--drivers/input/keyboard/adp5589-keys.c3
-rw-r--r--drivers/input/keyboard/bf54x-keys.c3
-rw-r--r--drivers/input/misc/adxl34x.c2
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c5
-rw-r--r--drivers/input/misc/pcf8574_keypad.c7
-rw-r--r--drivers/input/mouse/alps.c206
-rw-r--r--drivers/input/mouse/alps.h1
-rw-r--r--drivers/input/mouse/elantech.c1
-rw-r--r--drivers/input/serio/serio.c24
-rw-r--r--drivers/input/touchscreen/Kconfig11
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/atmel-wm97xx.c2
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c3
-rw-r--r--drivers/input/touchscreen/sur40.c466
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c22
-rw-r--r--drivers/input/touchscreen/zforce_ts.c21
-rw-r--r--drivers/iommu/arm-smmu.c66
-rw-r--r--drivers/irqchip/irq-gic.c9
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c8
-rw-r--r--drivers/isdn/hisax/hfc_pci.c4
-rw-r--r--drivers/isdn/hisax/telespci.c4
-rw-r--r--drivers/leds/leds-lp5521.c12
-rw-r--r--drivers/leds/leds-lp5523.c12
-rw-r--r--drivers/leds/leds-pwm.c53
-rw-r--r--drivers/macintosh/Makefile1
-rw-r--r--drivers/md/bcache/alloc.c2
-rw-r--r--drivers/md/bcache/bcache.h12
-rw-r--r--drivers/md/bcache/btree.c27
-rw-r--r--drivers/md/bcache/movinggc.c21
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/bcache/sysfs.c50
-rw-r--r--drivers/md/bcache/util.c8
-rw-r--r--drivers/md/bcache/util.h2
-rw-r--r--drivers/md/bcache/writeback.c53
-rw-r--r--drivers/md/dm-bufio.c5
-rw-r--r--drivers/md/dm-cache-policy-mq.c13
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-delay.c23
-rw-r--r--drivers/md/dm-snap.c71
-rw-r--r--drivers/md/dm-stats.c1
-rw-r--r--drivers/md/dm-table.c5
-rw-r--r--drivers/md/dm-thin-metadata.c8
-rw-r--r--drivers/md/dm-thin-metadata.h1
-rw-r--r--drivers/md/dm-thin.c66
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/persistent-data/dm-array.c10
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c6
-rw-r--r--drivers/md/persistent-data/dm-block-manager.h7
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c32
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c8
-rw-r--r--drivers/md/raid5.c13
-rw-r--r--drivers/media/common/siano/smscoreapi.h4
-rw-r--r--drivers/media/common/siano/smsdvb.h2
-rw-r--r--drivers/media/dvb-core/dvb_demux.c9
-rw-r--r--drivers/media/dvb-frontends/af9033.c12
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_c.c2
-rw-r--r--drivers/media/dvb-frontends/dib8000.c4
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c18
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c1
-rw-r--r--drivers/media/i2c/adv7183_regs.h6
-rw-r--r--drivers/media/i2c/adv7604.c2
-rw-r--r--drivers/media/i2c/adv7842.c2
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c2
-rw-r--r--drivers/media/i2c/m5mols/m5mols_controls.c2
-rw-r--r--drivers/media/i2c/mt9p031.c1
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c2
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3.h2
-rw-r--r--drivers/media/i2c/saa7115.c2
-rw-r--r--drivers/media/i2c/soc_camera/ov5642.c2
-rw-r--r--drivers/media/i2c/ths7303.c3
-rw-r--r--drivers/media/i2c/wm8775.c4
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c3
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h2
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/pci/pluto2/pluto2.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c4
-rw-r--r--drivers/media/platform/coda.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c2
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c2
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c46
-rw-r--r--drivers/media/platform/omap3isp/isp.c2
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c7
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc.h2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c12
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer.h2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c4
-rw-r--r--drivers/media/platform/soc_camera/omap1_camera.c2
-rw-r--r--drivers/media/platform/vivi.c4
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c4
-rw-r--r--drivers/media/radio/radio-shark.c4
-rw-r--r--drivers/media/radio/radio-shark2.c4
-rw-r--r--drivers/media/radio/radio-si476x.c4
-rw-r--r--drivers/media/radio/radio-tea5764.c2
-rw-r--r--drivers/media/radio/tef6862.c2
-rw-r--r--drivers/media/rc/imon.c2
-rw-r--r--drivers/media/rc/redrat3.c2
-rw-r--r--drivers/media/tuners/mt2063.c4
-rw-r--r--drivers/media/tuners/tuner-xc2028-types.h2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c17
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c4
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c2
-rw-r--r--drivers/media/usb/gspca/gl860/gl860.c2
-rw-r--r--drivers/media/usb/gspca/pac207.c2
-rw-r--r--drivers/media/usb/gspca/pac7302.c2
-rw-r--r--drivers/media/usb/gspca/stk1135.c3
-rw-r--r--drivers/media/usb/gspca/stv0680.c2
-rw-r--r--drivers/media/usb/gspca/sunplus.c1
-rw-r--r--drivers/media/usb/gspca/zc3xx.c2
-rw-r--r--drivers/media/usb/pwc/pwc-if.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv.c174
-rw-r--r--drivers/media/usb/uvc/uvc_video.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c29
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c3
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/lpc_ich.c2
-rw-r--r--drivers/mfd/rtsx_pcr.c10
-rw-r--r--drivers/mfd/sec-core.c30
-rw-r--r--drivers/mfd/sec-irq.c6
-rw-r--r--drivers/mfd/ti-ssp.c2
-rw-r--r--drivers/misc/enclosure.c7
-rw-r--r--drivers/misc/mei/hw-me-regs.h5
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/misc/mic/card/mic_virtio.c33
-rw-r--r--drivers/misc/mic/card/mic_virtio.h7
-rw-r--r--drivers/misc/mic/host/mic_boot.c2
-rw-r--r--drivers/misc/mic/host/mic_virtio.c30
-rw-r--r--drivers/misc/mic/host/mic_x100.c4
-rw-r--r--drivers/mmc/host/omap.c45
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c6
-rw-r--r--drivers/net/bonding/bond_3ad.c45
-rw-r--r--drivers/net/bonding/bond_main.c13
-rw-r--r--drivers/net/bonding/bond_options.c13
-rw-r--r--drivers/net/bonding/bond_sysfs.c8
-rw-r--r--drivers/net/bonding/bonding.h7
-rw-r--r--drivers/net/can/c_can/c_can.c22
-rw-r--r--drivers/net/can/flexcan.c2
-rw-r--r--drivers/net/can/sja1000/sja1000.c17
-rw-r--r--drivers/net/can/usb/ems_usb.c3
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c3
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c5
-rw-r--r--drivers/net/ethernet/arc/emac_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h51
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c94
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c259
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c28
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c56
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h103
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c337
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c333
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h87
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c40
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c86
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c17
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h7
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c60
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/lantiq_etop.c3
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c6
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c54
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c65
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c60
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c22
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c8
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169.c5
-rw-r--r--drivers/net/ethernet/sfc/efx.c8
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c39
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h2
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c78
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h3
-rw-r--r--drivers/net/ethernet/sfc/nic.h2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c66
-rw-r--r--drivers/net/ethernet/sfc/rx.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c45
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c4
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c23
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c26
-rw-r--r--drivers/net/ethernet/tile/tilegx.c3
-rw-r--r--drivers/net/ethernet/via/via-velocity.c11
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c51
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/hyperv/netvsc_drv.c21
-rw-r--r--drivers/net/macvlan.c26
-rw-r--r--drivers/net/macvtap.c25
-rw-r--r--drivers/net/phy/micrel.c15
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/phy/vitesse.c15
-rw-r--r--drivers/net/team/team.c7
-rw-r--r--drivers/net/tun.c21
-rw-r--r--drivers/net/usb/Kconfig6
-rw-r--r--drivers/net/usb/dm9601.c44
-rw-r--r--drivers/net/usb/hso.c13
-rw-r--r--drivers/net/usb/mcs7830.c19
-rw-r--r--drivers/net/virtio_net.c164
-rw-r--r--drivers/net/vxlan.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c52
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c19
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c7
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c27
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c7
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c3
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c6
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c18
-rw-r--r--drivers/net/wireless/mwifiex/main.c3
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c4
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c4
-rw-r--r--drivers/net/xen-netback/common.h19
-rw-r--r--drivers/net/xen-netback/interface.c31
-rw-r--r--drivers/net/xen-netback/netback.c288
-rw-r--r--drivers/ntb/ntb_hw.c121
-rw-r--r--drivers/ntb/ntb_hw.h7
-rw-r--r--drivers/ntb/ntb_regs.h16
-rw-r--r--drivers/ntb/ntb_transport.c77
-rw-r--r--drivers/of/Kconfig2
-rw-r--r--drivers/of/address.c8
-rw-r--r--drivers/of/fdt.c12
-rw-r--r--drivers/of/irq.c5
-rw-r--r--drivers/pci/host/pci-mvebu.c5
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c30
-rw-r--r--drivers/pci/pci-acpi.c21
-rw-r--r--drivers/pci/pci-driver.c38
-rw-r--r--drivers/pci/pci.c8
-rw-r--r--drivers/pci/quirks.c4
-rw-r--r--drivers/pci/remove.c4
-rw-r--r--drivers/phy/Kconfig4
-rw-r--r--drivers/phy/phy-core.c26
-rw-r--r--drivers/pinctrl/pinctrl-abx500.c6
-rw-r--r--drivers/pinctrl/pinctrl-abx500.h2
-rw-r--r--drivers/pinctrl/pinctrl-baytrail.c1
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7740.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7372.c2
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h2
-rw-r--r--drivers/platform/Kconfig1
-rw-r--r--drivers/platform/Makefile1
-rw-r--r--drivers/platform/chrome/Kconfig28
-rw-r--r--drivers/platform/chrome/Makefile2
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c (renamed from drivers/platform/x86/chromeos_laptop.c)0
-rw-r--r--drivers/platform/x86/Kconfig11
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/asus-laptop.c5
-rw-r--r--drivers/platform/x86/dell-laptop.c288
-rw-r--r--drivers/platform/x86/dell-wmi.c7
-rw-r--r--drivers/platform/x86/eeepc-laptop.c4
-rw-r--r--drivers/platform/x86/hp-wmi.c14
-rw-r--r--drivers/platform/x86/ideapad-laptop.c4
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c4
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c117
-rw-r--r--drivers/platform/x86/panasonic-laptop.c5
-rw-r--r--drivers/platform/x86/sony-laptop.c47
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c8
-rw-r--r--drivers/platform/x86/topstar-laptop.c4
-rw-r--r--drivers/platform/x86/toshiba_acpi.c4
-rw-r--r--drivers/platform/x86/wmi.c6
-rw-r--r--drivers/pnp/driver.c12
-rw-r--r--drivers/power/Kconfig1
-rw-r--r--drivers/power/power_supply_core.c12
-rw-r--r--drivers/powercap/intel_rapl.c13
-rw-r--r--drivers/powercap/powercap_sys.c7
-rw-r--r--drivers/regulator/arizona-micsupp.c54
-rw-r--r--drivers/regulator/as3722-regulator.c2
-rw-r--r--drivers/regulator/core.c14
-rw-r--r--drivers/regulator/gpio-regulator.c7
-rw-r--r--drivers/regulator/pfuze100-regulator.c14
-rw-r--r--drivers/regulator/s2mps11.c2
-rw-r--r--drivers/regulator/s5m8767.c2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c2
-rw-r--r--drivers/rtc/rtc-s5m.c118
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/dasd_genhd.c1
-rw-r--r--drivers/s390/char/sclp_early.c5
-rw-r--r--drivers/s390/char/tty3270.c2
-rw-r--r--drivers/scsi/3w-9xxx.c3
-rw-r--r--drivers/scsi/3w-sas.c3
-rw-r--r--drivers/scsi/3w-xxxx.c3
-rw-r--r--drivers/scsi/aacraid/linit.c1
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c1
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h1
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c14
-rw-r--r--drivers/scsi/bfa/bfad_attr.c7
-rw-r--r--drivers/scsi/gdth.c1
-rw-r--r--drivers/scsi/hosts.c1
-rw-r--r--drivers/scsi/hpsa.c5
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/ips.c1
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/megaraid.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c91
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h9
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c2
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h2
-rw-r--r--drivers/scsi/pmcraid.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c10
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/storvsc_drv.c1
-rw-r--r--drivers/spi/spi-bcm2835.c2
-rw-r--r--drivers/spi/spi-bcm63xx.c2
-rw-r--r--drivers/spi/spi-mpc512x-psc.c2
-rw-r--r--drivers/spi/spi-mxs.c2
-rw-r--r--drivers/spi/spi-pxa2xx.c5
-rw-r--r--drivers/spi/spi-rspi.c3
-rw-r--r--drivers/spi/spi-ti-qspi.c23
-rw-r--r--drivers/spi/spi-txx9.c2
-rw-r--r--drivers/spi/spi.c30
-rw-r--r--drivers/staging/bcm/Bcmnet.c3
-rw-r--r--drivers/staging/btmtk_usb/btmtk_usb.c3
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c15
-rw-r--r--drivers/staging/comedi/drivers/pcl730.c6
-rw-r--r--drivers/staging/comedi/drivers/s626.c2
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c2
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_download.c3
-rw-r--r--drivers/staging/iio/magnetometer/Kconfig2
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c7
-rw-r--r--drivers/staging/imx-drm/Makefile4
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c40
-rw-r--r--drivers/staging/imx-drm/imx-tve.c9
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-common.c32
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c4
-rw-r--r--drivers/staging/media/go7007/go7007-usb.c28
-rw-r--r--drivers/staging/netlogic/xlr_net.c3
-rw-r--r--drivers/staging/nvec/nvec.c3
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c3
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c3
-rw-r--r--drivers/staging/tidspbridge/Kconfig2
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c13
-rw-r--r--drivers/staging/vt6655/hostap.c3
-rw-r--r--drivers/staging/vt6656/baseband.c11
-rw-r--r--drivers/staging/vt6656/hostap.c3
-rw-r--r--drivers/staging/vt6656/rndis.h2
-rw-r--r--drivers/staging/zram/zram_drv.c19
-rw-r--r--drivers/staging/zsmalloc/zsmalloc-main.c17
-rw-r--r--drivers/target/iscsi/iscsi_target.c27
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c6
-rw-r--r--drivers/target/target_core_device.c5
-rw-r--r--drivers/target/target_core_file.c8
-rw-r--r--drivers/target/target_core_file.h5
-rw-r--r--drivers/target/target_core_tpg.c10
-rw-r--r--drivers/tty/amiserial.c3
-rw-r--r--drivers/tty/n_tty.c27
-rw-r--r--drivers/tty/serial/8250/8250_dw.c8
-rw-r--r--drivers/tty/serial/8250/Kconfig2
-rw-r--r--drivers/tty/serial/pmac_zilog.c3
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/tty_io.c1
-rw-r--r--drivers/tty/tty_ldsem.c16
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/usb/chipidea/core.c4
-rw-r--r--drivers/usb/chipidea/host.c3
-rw-r--r--drivers/usb/chipidea/udc.c3
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/class/cdc-wdm.c8
-rw-r--r--drivers/usb/core/hub.c5
-rw-r--r--drivers/usb/dwc3/core.c8
-rw-r--r--drivers/usb/dwc3/ep0.c2
-rw-r--r--drivers/usb/dwc3/gadget.c5
-rw-r--r--drivers/usb/gadget/Kconfig1
-rw-r--r--drivers/usb/gadget/composite.c1
-rw-r--r--drivers/usb/gadget/f_fs.c2
-rw-r--r--drivers/usb/gadget/f_mass_storage.c27
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c1
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c7
-rw-r--r--drivers/usb/gadget/storage_common.h4
-rw-r--r--drivers/usb/gadget/tcm_usb_gadget.c2
-rw-r--r--drivers/usb/gadget/zero.c6
-rw-r--r--drivers/usb/host/ohci-at91.c26
-rw-r--r--drivers/usb/host/ohci-pxa27x.c1
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/host/xhci-ring.c54
-rw-r--r--drivers/usb/musb/musb_core.c9
-rw-r--r--drivers/usb/musb/musb_cppi41.c164
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/phy/phy-am335x.c5
-rw-r--r--drivers/usb/phy/phy-generic.c68
-rw-r--r--drivers/usb/phy/phy-generic.h4
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c2
-rw-r--r--drivers/usb/phy/phy-rcar-gen2-usb.c4
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c37
-rw-r--r--drivers/usb/serial/generic.c12
-rw-r--r--drivers/usb/serial/mos7840.c32
-rw-r--r--drivers/usb/serial/option.c29
-rw-r--r--drivers/usb/serial/pl2303.c30
-rw-r--r--drivers/usb/serial/spcp8x5.c30
-rw-r--r--drivers/usb/serial/zte_ev.c3
-rw-r--r--drivers/usb/wusbcore/devconnect.c72
-rw-r--r--drivers/usb/wusbcore/security.c98
-rw-r--r--drivers/usb/wusbcore/wusbhc.h6
-rw-r--r--drivers/video/atmel_lcdfb.c1
-rw-r--r--drivers/video/kyro/fbdev.c6
-rw-r--r--drivers/video/offb.c29
-rw-r--r--drivers/video/omap2/displays-new/panel-sony-acx565akm.c5
-rw-r--r--drivers/video/sh_mobile_meram.c2
-rw-r--r--drivers/video/vt8500lcdfb.c25
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/watchdog/bcm2835_wdt.c1
-rw-r--r--drivers/watchdog/ep93xx_wdt.c1
-rw-r--r--drivers/watchdog/ie6xx_wdt.c1
-rw-r--r--drivers/watchdog/jz4740_wdt.c1
-rw-r--r--drivers/watchdog/kempld_wdt.c1
-rw-r--r--drivers/watchdog/max63xx_wdt.c1
-rw-r--r--drivers/watchdog/orion_wdt.c1
-rw-r--r--drivers/watchdog/pnx4008_wdt.c1
-rw-r--r--drivers/watchdog/rt2880_wdt.c1
-rw-r--r--drivers/watchdog/sc1200wdt.c3
-rw-r--r--drivers/watchdog/shwdt.c1
-rw-r--r--drivers/watchdog/softdog.c1
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c1
-rw-r--r--drivers/watchdog/txx9wdt.c1
-rw-r--r--drivers/watchdog/ux500_wdt.c1
-rw-r--r--drivers/xen/balloon.c63
-rw-r--r--drivers/xen/grant-table.c9
-rw-r--r--drivers/xen/privcmd.c9
-rw-r--r--drivers/xen/swiotlb-xen.c5
-rw-r--r--fs/affs/Changes2
-rw-r--r--fs/aio.c115
-rw-r--r--fs/btrfs/check-integrity.c32
-rw-r--r--fs/btrfs/check-integrity.h2
-rw-r--r--fs/btrfs/extent-tree.c22
-rw-r--r--fs/btrfs/extent_io.c12
-rw-r--r--fs/btrfs/ioctl.c3
-rw-r--r--fs/btrfs/relocation.c81
-rw-r--r--fs/btrfs/scrub.c33
-rw-r--r--fs/btrfs/send.c4
-rw-r--r--fs/btrfs/super.c5
-rw-r--r--fs/ceph/addr.c10
-rw-r--r--fs/ceph/cache.c3
-rw-r--r--fs/ceph/caps.c27
-rw-r--r--fs/ceph/dir.c11
-rw-r--r--fs/ceph/inode.c183
-rw-r--r--fs/ceph/mds_client.c61
-rw-r--r--fs/ceph/mds_client.h1
-rw-r--r--fs/ceph/super.h8
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/cifsproto.h7
-rw-r--r--fs/cifs/cifssmb.c6
-rw-r--r--fs/cifs/dir.c11
-rw-r--r--fs/cifs/inode.c6
-rw-r--r--fs/cifs/ioctl.c6
-rw-r--r--fs/cifs/link.c26
-rw-r--r--fs/cifs/smb2ops.c99
-rw-r--r--fs/cifs/smb2pdu.c92
-rw-r--r--fs/cifs/smb2pdu.h12
-rw-r--r--fs/cifs/smb2proto.h1
-rw-r--r--fs/cifs/smbfsctl.h2
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/eventpoll.c7
-rw-r--r--fs/ext2/super.c1
-rw-r--r--fs/ext4/ext4.h10
-rw-r--r--fs/ext4/ext4_jbd2.c9
-rw-r--r--fs/ext4/extents.c45
-rw-r--r--fs/ext4/inode.c12
-rw-r--r--fs/ext4/mballoc.c17
-rw-r--r--fs/ext4/super.c21
-rw-r--r--fs/gfs2/aops.c30
-rw-r--r--fs/gfs2/glock.c2
-rw-r--r--fs/gfs2/glops.c10
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/meta_io.c5
-rw-r--r--fs/gfs2/ops_fstype.c12
-rw-r--r--fs/hfsplus/wrapper.c17
-rw-r--r--fs/jbd2/journal.c18
-rw-r--r--fs/jbd2/recovery.c2
-rw-r--r--fs/jbd2/transaction.c16
-rw-r--r--fs/logfs/dev_bdev.c13
-rw-r--r--fs/namei.c10
-rw-r--r--fs/nfs/blocklayout/blocklayout.h1
-rw-r--r--fs/nfs/blocklayout/extents.c2
-rw-r--r--fs/nfs/dns_resolve.c2
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/internal.h15
-rw-r--r--fs/nfs/nfs4_fs.h8
-rw-r--r--fs/nfs/nfs4proc.c30
-rw-r--r--fs/nfsd/nfscache.c9
-rw-r--r--fs/pipe.c39
-rw-r--r--fs/proc/inode.c14
-rw-r--r--fs/pstore/platform.c7
-rw-r--r--fs/squashfs/file_direct.c5
-rw-r--r--fs/sysfs/file.c18
-rw-r--r--fs/xfs/xfs_attr_remote.c2
-rw-r--r--fs/xfs/xfs_bmap.c32
-rw-r--r--fs/xfs/xfs_bmap_util.c15
-rw-r--r--fs/xfs/xfs_buf.c37
-rw-r--r--fs/xfs/xfs_buf.h11
-rw-r--r--fs/xfs/xfs_buf_item.c21
-rw-r--r--fs/xfs/xfs_dir2_node.c26
-rw-r--r--fs/xfs/xfs_discard.c5
-rw-r--r--fs/xfs/xfs_fsops.c6
-rw-r--r--fs/xfs/xfs_ioctl.c3
-rw-r--r--fs/xfs/xfs_ioctl32.c3
-rw-r--r--fs/xfs/xfs_iops.c3
-rw-r--r--fs/xfs/xfs_log_recover.c13
-rw-r--r--fs/xfs/xfs_qm.c80
-rw-r--r--fs/xfs/xfs_trans_buf.c13
-rw-r--r--include/acpi/acconfig.h2
-rw-r--r--include/acpi/acpi_bus.h5
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/asm-generic/pgtable.h7
-rw-r--r--include/asm-generic/preempt.h35
-rw-r--r--include/asm-generic/simd.h14
-rw-r--r--include/asm-generic/word-at-a-time.h8
-rw-r--r--include/crypto/ablk_helper.h (renamed from arch/x86/include/asm/crypto/ablk_helper.h)0
-rw-r--r--include/crypto/algapi.h18
-rw-r--r--include/crypto/authenc.h12
-rw-r--r--include/crypto/scatterwalk.h3
-rw-r--r--include/drm/drm_pciids.h2
-rw-r--r--include/linux/assoc_array.h6
-rw-r--r--include/linux/auxvec.h2
-rw-r--r--include/linux/compiler-intel.h2
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/efi.h4
-rw-r--r--include/linux/ftrace_event.h16
-rw-r--r--include/linux/gpio/driver.h14
-rw-r--r--include/linux/hid-sensor-hub.h5
-rw-r--r--include/linux/hid-sensor-ids.h12
-rw-r--r--include/linux/hugetlb.h5
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/irqreturn.h2
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/kexec.h3
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/lockref.h2
-rw-r--r--include/linux/math64.h30
-rw-r--r--include/linux/mfd/samsung/core.h3
-rw-r--r--include/linux/micrel_phy.h2
-rw-r--r--include/linux/migrate.h12
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mm_types.h52
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/netdevice.h36
-rw-r--r--include/linux/nfs4.h10
-rw-r--r--include/linux/nfs_fs.h18
-rw-r--r--include/linux/padata.h3
-rw-r--r--include/linux/pci.h31
-rw-r--r--include/linux/percpu-defs.h1
-rw-r--r--include/linux/pstore.h3
-rw-r--r--include/linux/reboot.h1
-rw-r--r--include/linux/rtnetlink.h5
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/linux/shmem_fs.h2
-rw-r--r--include/linux/skbuff.h48
-rw-r--r--include/linux/slab.h102
-rw-r--r--include/linux/spi/spi.h12
-rw-r--r--include/linux/tegra-powergate.h27
-rw-r--r--include/linux/tracepoint.h4
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/wusb.h2
-rw-r--r--include/media/videobuf2-core.h2
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ipv6.h9
-rw-r--r--include/net/llc_pdu.h2
-rw-r--r--include/net/ping.h3
-rw-r--r--include/net/sctp/structs.h10
-rw-r--r--include/net/sock.h6
-rw-r--r--include/rdma/ib_verbs.h2
-rw-r--r--include/scsi/scsi_host.h6
-rw-r--r--include/sound/memalloc.h2
-rw-r--r--include/sound/soc-dapm.h3
-rw-r--r--include/target/target_core_base.h5
-rw-r--r--include/trace/ftrace.h12
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/uapi/drm/vmwgfx_drm.h1
-rw-r--r--include/uapi/linux/eventpoll.h13
-rw-r--r--include/uapi/linux/genetlink.h1
-rw-r--r--include/uapi/linux/if_link.h4
-rw-r--r--include/uapi/linux/input.h6
-rw-r--r--include/uapi/linux/mic_common.h40
-rw-r--r--include/uapi/linux/netlink_diag.h1
-rw-r--r--include/uapi/linux/packet_diag.h1
-rw-r--r--include/uapi/linux/perf_event.h1
-rw-r--r--include/uapi/linux/unix_diag.h1
-rw-r--r--include/uapi/sound/compress_offload.h6
-rw-r--r--include/xen/interface/io/blkif.h10
-rw-r--r--init/Kconfig6
-rw-r--r--kernel/.gitignore1
-rw-r--r--kernel/Makefile7
-rw-r--r--kernel/bounds.c2
-rw-r--r--kernel/cgroup.c85
-rw-r--r--kernel/cpuset.c8
-rw-r--r--kernel/events/core.c29
-rw-r--r--kernel/extable.c4
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/freezer.c6
-rw-r--r--kernel/futex.c7
-rw-r--r--kernel/irq/pm.c2
-rw-r--r--kernel/kexec.c5
-rw-r--r--kernel/padata.c9
-rw-r--r--kernel/power/console.c1
-rw-r--r--kernel/rcu/tree_plugin.h4
-rw-r--r--kernel/reboot.c2
-rw-r--r--kernel/sched/core.c10
-rw-r--r--kernel/sched/fair.c178
-rw-r--r--kernel/sched/rt.c14
-rw-r--r--kernel/system_certificates.S14
-rw-r--r--kernel/system_keyring.c4
-rw-r--r--kernel/time/tick-common.c15
-rw-r--r--kernel/time/tick-sched.c25
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/timer.c5
-rw-r--r--kernel/trace/ftrace.c66
-rw-r--r--kernel/trace/trace_event_perf.c8
-rw-r--r--kernel/trace/trace_events.c3
-rw-r--r--kernel/trace/trace_syscalls.c10
-rw-r--r--kernel/user.c6
-rw-r--r--kernel/workqueue.c82
-rw-r--r--lib/assoc_array.c4
-rw-r--r--lib/lockref.c9
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/compaction.c4
-rw-r--r--mm/fremap.c8
-rw-r--r--mm/huge_memory.c60
-rw-r--r--mm/memcontrol.c43
-rw-r--r--mm/memory-failure.c24
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mempolicy.c16
-rw-r--r--mm/migrate.c82
-rw-r--r--mm/mlock.c44
-rw-r--r--mm/mprotect.c13
-rw-r--r--mm/page_alloc.c19
-rw-r--r--mm/pgtable-generic.c8
-rw-r--r--mm/rmap.c4
-rw-r--r--mm/shmem.c36
-rw-r--r--net/8021q/vlan_dev.c19
-rw-r--r--net/batman-adv/bat_iv_ogm.c36
-rw-r--r--net/batman-adv/distributed-arp-table.c6
-rw-r--r--net/batman-adv/fragmentation.c8
-rw-r--r--net/batman-adv/icmp_socket.c6
-rw-r--r--net/batman-adv/main.c16
-rw-r--r--net/batman-adv/network-coding.c22
-rw-r--r--net/batman-adv/packet.h124
-rw-r--r--net/batman-adv/routing.c30
-rw-r--r--net/batman-adv/send.c10
-rw-r--r--net/batman-adv/soft-interface.c18
-rw-r--r--net/batman-adv/translation-table.c6
-rw-r--r--net/bluetooth/hci_sock.c26
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/br_private.h10
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/compat.c2
-rw-r--r--net/core/dev.c31
-rw-r--r--net/core/drop_monitor.c1
-rw-r--r--net/core/flow_dissector.c10
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/netpoll.c13
-rw-r--r--net/core/pktgen.c7
-rw-r--r--net/core/skbuff.c1
-rw-r--r--net/core/sock.c2
-rw-r--r--net/dccp/ipv6.c1
-rw-r--r--net/dccp/probe.c19
-rw-r--r--net/hsr/hsr_framereg.c3
-rw-r--r--net/hsr/hsr_netlink.c28
-rw-r--r--net/ieee802154/6lowpan.c2
-rw-r--r--net/ipv4/fib_rules.c5
-rw-r--r--net/ipv4/gre_offload.c11
-rw-r--r--net/ipv4/inet_diag.c16
-rw-r--r--net/ipv4/ip_gre.c1
-rw-r--r--net/ipv4/ip_output.c5
-rw-r--r--net/ipv4/ip_sockglue.c3
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c1
-rw-r--r--net/ipv4/netfilter/nft_reject_ipv4.c2
-rw-r--r--net/ipv4/ping.c7
-rw-r--r--net/ipv4/protocol.c8
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_memcontrol.c9
-rw-r--r--net/ipv4/tcp_offload.c31
-rw-r--r--net/ipv4/udp.c53
-rw-r--r--net/ipv4/udp_offload.c37
-rw-r--r--net/ipv6/addrconf.c25
-rw-r--r--net/ipv6/datagram.c9
-rw-r--r--net/ipv6/fib6_rules.c6
-rw-r--r--net/ipv6/ip6_output.c40
-rw-r--r--net/ipv6/ip6_tunnel.c21
-rw-r--r--net/ipv6/ip6_vti.c30
-rw-r--r--net/ipv6/ndisc.c3
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c1
-rw-r--r--net/ipv6/ping.c3
-rw-r--r--net/ipv6/protocol.c4
-rw-r--r--net/ipv6/raw.c5
-rw-r--r--net/ipv6/route.c34
-rw-r--r--net/ipv6/sit.c58
-rw-r--r--net/ipv6/tcp_ipv6.c1
-rw-r--r--net/ipv6/tcpv6_offload.c32
-rw-r--r--net/ipv6/udp.c5
-rw-r--r--net/l2tp/l2tp_ip6.c3
-rw-r--r--net/llc/af_llc.c5
-rw-r--r--net/mac80211/cfg.c15
-rw-r--r--net/mac80211/ibss.c4
-rw-r--r--net/mac80211/ieee80211_i.h1
-rw-r--r--net/mac80211/iface.c7
-rw-r--r--net/mac80211/main.c3
-rw-r--r--net/mac80211/mesh.c20
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c7
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/scan.c2
-rw-r--r--net/mac80211/spectmgmt.c2
-rw-r--r--net/mac80211/tx.c23
-rw-r--r--net/mac80211/util.c11
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_nfct.c6
-rw-r--r--net/netfilter/nf_conntrack_seqadj.c5
-rw-r--r--net/netfilter/nf_conntrack_timestamp.c1
-rw-r--r--net/netfilter/nf_nat_irc.c32
-rw-r--r--net/netfilter/nf_tables_api.c72
-rw-r--r--net/netfilter/nfnetlink_log.c1
-rw-r--r--net/netfilter/nft_exthdr.c2
-rw-r--r--net/netfilter/xt_hashlimit.c25
-rw-r--r--net/netlink/genetlink.c13
-rw-r--r--net/nfc/core.c2
-rw-r--r--net/packet/af_packet.c69
-rw-r--r--net/rds/ib.c3
-rw-r--r--net/rds/ib_send.c5
-rw-r--r--net/rose/af_rose.c16
-rw-r--r--net/sched/act_api.c26
-rw-r--r--net/sched/act_csum.c12
-rw-r--r--net/sched/act_gact.c9
-rw-r--r--net/sched/act_ipt.c12
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/act_nat.c12
-rw-r--r--net/sched/act_pedit.c10
-rw-r--r--net/sched/act_police.c5
-rw-r--r--net/sched/act_simple.c10
-rw-r--r--net/sched/act_skbedit.c8
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_htb.c20
-rw-r--r--net/sched/sch_netem.c7
-rw-r--r--net/sched/sch_tbf.c139
-rw-r--r--net/sctp/associola.c5
-rw-r--r--net/sctp/output.c6
-rw-r--r--net/sctp/outqueue.c38
-rw-r--r--net/sctp/probe.c17
-rw-r--r--net/sctp/sm_statefuns.c12
-rw-r--r--net/sctp/socket.c36
-rw-r--r--net/sctp/sysctl.c76
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c2
-rw-r--r--net/tipc/core.c7
-rw-r--r--net/tipc/handler.c11
-rw-r--r--net/tipc/link.c1
-rw-r--r--net/tipc/port.c45
-rw-r--r--net/tipc/port.h6
-rw-r--r--net/tipc/socket.c46
-rw-r--r--net/unix/af_unix.c16
-rw-r--r--net/wireless/core.c9
-rw-r--r--net/wireless/ibss.c18
-rw-r--r--net/wireless/nl80211.c60
-rw-r--r--net/wireless/radiotap.c4
-rw-r--r--net/wireless/sme.c22
-rw-r--r--scripts/link-vmlinux.sh4
-rwxr-xr-xscripts/recordmcount.pl3
-rw-r--r--scripts/sortextable.c5
-rw-r--r--security/integrity/digsig.c30
-rw-r--r--security/integrity/ima/Kconfig8
-rw-r--r--security/integrity/ima/ima.h7
-rw-r--r--security/integrity/ima/ima_api.c22
-rw-r--r--security/integrity/ima/ima_appraise.c11
-rw-r--r--security/integrity/ima/ima_crypto.c17
-rw-r--r--security/integrity/ima/ima_fs.c14
-rw-r--r--security/integrity/ima/ima_init.c3
-rw-r--r--security/integrity/ima/ima_template.c21
-rw-r--r--security/integrity/ima/ima_template_lib.c6
-rw-r--r--security/integrity/integrity.h7
-rw-r--r--security/keys/big_key.c2
-rw-r--r--security/keys/key.c8
-rw-r--r--security/keys/keyring.c17
-rw-r--r--security/selinux/hooks.c205
-rw-r--r--security/selinux/include/objsec.h5
-rw-r--r--security/selinux/include/xfrm.h8
-rw-r--r--security/selinux/ss/services.c42
-rw-r--r--security/selinux/xfrm.c62
-rw-r--r--sound/atmel/abdac.c3
-rw-r--r--sound/core/pcm_lib.c2
-rw-r--r--sound/firewire/amdtp.c15
-rw-r--r--sound/firewire/dice.c4
-rw-r--r--sound/pci/hda/hda_codec.h1
-rw-r--r--sound/pci/hda/hda_generic.c126
-rw-r--r--sound/pci/hda/hda_generic.h3
-rw-r--r--sound/pci/hda/hda_intel.c16
-rw-r--r--sound/pci/hda/patch_analog.c16
-rw-r--r--sound/pci/hda/patch_conexant.c24
-rw-r--r--sound/pci/hda/patch_hdmi.c32
-rw-r--r--sound/pci/hda/patch_realtek.c145
-rw-r--r--sound/pci/hda/patch_sigmatel.c3
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c30
-rw-r--r--sound/soc/atmel/sam9x5_wm8731.c4
-rw-r--r--sound/soc/codecs/wm5110.c27
-rw-r--r--sound/soc/codecs/wm8731.c4
-rw-r--r--sound/soc/codecs/wm8904.c2
-rw-r--r--sound/soc/codecs/wm8962.c13
-rw-r--r--sound/soc/codecs/wm8990.c2
-rw-r--r--sound/soc/codecs/wm_adsp.c10
-rw-r--r--sound/soc/fsl/imx-wm8962.c2
-rw-r--r--sound/soc/fsl/pcm030-audio-fabric.c3
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c46
-rw-r--r--sound/soc/omap/n810.c4
-rw-r--r--sound/soc/sh/Kconfig1
-rw-r--r--sound/soc/soc-core.c4
-rw-r--r--sound/soc/soc-devres.c4
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c38
-rw-r--r--sound/soc/soc-pcm.c23
-rw-r--r--sound/soc/tegra/tegra20_i2s.c6
-rw-r--r--sound/soc/tegra/tegra20_spdif.c10
-rw-r--r--sound/soc/tegra/tegra30_i2s.c6
-rw-r--r--sound/usb/endpoint.c16
-rw-r--r--sound/usb/mixer_quirks.c2
-rw-r--r--tools/lib/traceevent/event-parse.c25
-rw-r--r--tools/perf/util/header.c6
-rw-r--r--tools/perf/util/thread.c11
-rw-r--r--tools/power/cpupower/man/cpupower-idle-info.13
-rw-r--r--tools/power/cpupower/man/cpupower-idle-set.171
-rw-r--r--tools/power/cpupower/utils/cpupower-set.c6
-rw-r--r--tools/power/cpupower/utils/helpers/sysfs.c4
-rw-r--r--tools/usb/Makefile5
-rw-r--r--virt/kvm/kvm_main.c3
1503 files changed, 17720 insertions, 8367 deletions
diff --git a/CREDITS b/CREDITS
index 4fc997d58ab2..4c7738f49357 100644
--- a/CREDITS
+++ b/CREDITS
@@ -655,6 +655,11 @@ S: Stanford University
655S: Stanford, California 94305 655S: Stanford, California 94305
656S: USA 656S: USA
657 657
658N: Carlos Chinea
659E: carlos.chinea@nokia.com
660E: cch.devel@gmail.com
661D: Author of HSI Subsystem
662
658N: Randolph Chung 663N: Randolph Chung
659E: tausq@debian.org 664E: tausq@debian.org
660D: Linux/PA-RISC hacker 665D: Linux/PA-RISC hacker
diff --git a/Documentation/Changes b/Documentation/Changes
index b17580885273..07c75d18154e 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -196,13 +196,6 @@ chmod 0644 /dev/cpu/microcode
196as root before you can use this. You'll probably also want to 196as root before you can use this. You'll probably also want to
197get the user-space microcode_ctl utility to use with this. 197get the user-space microcode_ctl utility to use with this.
198 198
199Powertweak
200----------
201
202If you are running v0.1.17 or earlier, you should upgrade to
203version v0.99.0 or higher. Running old versions may cause problems
204with programs using shared memory.
205
206udev 199udev
207---- 200----
208udev is a userspace application for populating /dev dynamically with 201udev is a userspace application for populating /dev dynamically with
@@ -366,10 +359,6 @@ Intel P6 microcode
366------------------ 359------------------
367o <http://www.urbanmyth.org/microcode/> 360o <http://www.urbanmyth.org/microcode/>
368 361
369Powertweak
370----------
371o <http://powertweak.sourceforge.net/>
372
373udev 362udev
374---- 363----
375o <http://www.kernel.org/pub/linux/utils/kernel/hotplug/udev.html> 364o <http://www.kernel.org/pub/linux/utils/kernel/hotplug/udev.html>
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index 6c9d9d37c83a..f5170082bdb3 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -58,7 +58,7 @@
58 </sect1> 58 </sect1>
59 <sect1><title>Wait queues and Wake events</title> 59 <sect1><title>Wait queues and Wake events</title>
60!Iinclude/linux/wait.h 60!Iinclude/linux/wait.h
61!Ekernel/wait.c 61!Ekernel/sched/wait.c
62 </sect1> 62 </sect1>
63 <sect1><title>High-resolution timers</title> 63 <sect1><title>High-resolution timers</title>
64!Iinclude/linux/ktime.h 64!Iinclude/linux/ktime.h
diff --git a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
index e287c8fc803b..4165e7bfa4ff 100644
--- a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
@@ -73,7 +73,8 @@ range from zero to the maximal number of valid planes for the currently active
73format. For the single-planar API, applications must set <structfield> plane 73format. For the single-planar API, applications must set <structfield> plane
74</structfield> to zero. Additional flags may be posted in the <structfield> 74</structfield> to zero. Additional flags may be posted in the <structfield>
75flags </structfield> field. Refer to a manual for open() for details. 75flags </structfield> field. Refer to a manual for open() for details.
76Currently only O_CLOEXEC is supported. All other fields must be set to zero. 76Currently only O_CLOEXEC, O_RDONLY, O_WRONLY, and O_RDWR are supported. All
77other fields must be set to zero.
77In the case of multi-planar API, every plane is exported separately using 78In the case of multi-planar API, every plane is exported separately using
78multiple <constant> VIDIOC_EXPBUF </constant> calls. </para> 79multiple <constant> VIDIOC_EXPBUF </constant> calls. </para>
79 80
@@ -170,8 +171,9 @@ multi-planar API. Otherwise this value must be set to zero. </entry>
170 <entry>__u32</entry> 171 <entry>__u32</entry>
171 <entry><structfield>flags</structfield></entry> 172 <entry><structfield>flags</structfield></entry>
172 <entry>Flags for the newly created file, currently only <constant> 173 <entry>Flags for the newly created file, currently only <constant>
173O_CLOEXEC </constant> is supported, refer to the manual of open() for more 174O_CLOEXEC </constant>, <constant>O_RDONLY</constant>, <constant>O_WRONLY
174details.</entry> 175</constant>, and <constant>O_RDWR</constant> are supported, refer to the manual
176of open() for more details.</entry>
175 </row> 177 </row>
176 <row> 178 <row>
177 <entry>__s32</entry> 179 <entry>__s32</entry>
diff --git a/Documentation/assoc_array.txt b/Documentation/assoc_array.txt
index f4faec0f66e4..2f2c6cdd73c0 100644
--- a/Documentation/assoc_array.txt
+++ b/Documentation/assoc_array.txt
@@ -164,10 +164,10 @@ This points to a number of methods, all of which need to be provided:
164 164
165 (4) Diff the index keys of two objects. 165 (4) Diff the index keys of two objects.
166 166
167 int (*diff_objects)(const void *a, const void *b); 167 int (*diff_objects)(const void *object, const void *index_key);
168 168
169 Return the bit position at which the index keys of two objects differ or 169 Return the bit position at which the index key of the specified object
170 -1 if they are the same. 170 differs from the given index key or -1 if they are the same.
171 171
172 172
173 (5) Free an object. 173 (5) Free an object.
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt
new file mode 100644
index 000000000000..b2830b435895
--- /dev/null
+++ b/Documentation/block/null_blk.txt
@@ -0,0 +1,72 @@
1Null block device driver
2================================================================================
3
4I. Overview
5
6The null block device (/dev/nullb*) is used for benchmarking the various
7block-layer implementations. It emulates a block device of X gigabytes in size.
8The following instances are possible:
9
10 Single-queue block-layer
11 - Request-based.
12 - Single submission queue per device.
13 - Implements IO scheduling algorithms (CFQ, Deadline, noop).
14 Multi-queue block-layer
15 - Request-based.
16 - Configurable submission queues per device.
17 No block-layer (Known as bio-based)
18 - Bio-based. IO requests are submitted directly to the device driver.
19 - Directly accepts bio data structure and returns them.
20
21All of them have a completion queue for each core in the system.
22
23II. Module parameters applicable for all instances:
24
25queue_mode=[0-2]: Default: 2-Multi-queue
26 Selects which block-layer the module should instantiate with.
27
28 0: Bio-based.
29 1: Single-queue.
30 2: Multi-queue.
31
32home_node=[0--nr_nodes]: Default: NUMA_NO_NODE
33 Selects what CPU node the data structures are allocated from.
34
35gb=[Size in GB]: Default: 250GB
36 The size of the device reported to the system.
37
38bs=[Block size (in bytes)]: Default: 512 bytes
39 The block size reported to the system.
40
41nr_devices=[Number of devices]: Default: 2
42 Number of block devices instantiated. They are instantiated as /dev/nullb0,
43 etc.
44
45irq_mode=[0-2]: Default: 1-Soft-irq
46 The completion mode used for completing IOs to the block-layer.
47
48 0: None.
49 1: Soft-irq. Uses IPI to complete IOs across CPU nodes. Simulates the overhead
50 when IOs are issued from another CPU node than the home the device is
51 connected to.
52 2: Timer: Waits a specific period (completion_nsec) for each IO before
53 completion.
54
55completion_nsec=[ns]: Default: 10.000ns
56 Combined with irq_mode=2 (timer). The time each completion event must wait.
57
58submit_queues=[0..nr_cpus]:
59 The number of submission queues attached to the device driver. If unset, it
60 defaults to 1 on single-queue and bio-based instances. For multi-queue,
61 it is ignored when use_per_node_hctx module parameter is 1.
62
63hw_queue_depth=[0..qdepth]: Default: 64
64 The hardware queue depth of the device.
65
66III: Multi-queue specific parameters
67
68use_per_node_hctx=[0/1]: Default: 0
69 0: The number of submit queues are set to the value of the submit_queues
70 parameter.
71 1: The multi-queue block layer is instantiated with a hardware dispatch
72 queue for each CPU node in the system.
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
index 274752f8bdf9..719320b5ed3f 100644
--- a/Documentation/device-mapper/cache.txt
+++ b/Documentation/device-mapper/cache.txt
@@ -266,10 +266,12 @@ E.g.
266Invalidation is removing an entry from the cache without writing it 266Invalidation is removing an entry from the cache without writing it
267back. Cache blocks can be invalidated via the invalidate_cblocks 267back. Cache blocks can be invalidated via the invalidate_cblocks
268message, which takes an arbitrary number of cblock ranges. Each cblock 268message, which takes an arbitrary number of cblock ranges. Each cblock
269must be expressed as a decimal value, in the future a variant message 269range's end value is "one past the end", meaning 5-10 expresses a range
270that takes cblock ranges expressed in hexidecimal may be needed to 270of values from 5 to 9. Each cblock must be expressed as a decimal
271better support efficient invalidation of larger caches. The cache must 271value, in the future a variant message that takes cblock ranges
272be in passthrough mode when invalidate_cblocks is used. 272expressed in hexidecimal may be needed to better support efficient
273invalidation of larger caches. The cache must be in passthrough mode
274when invalidate_cblocks is used.
273 275
274 invalidate_cblocks [<cblock>|<cblock begin>-<cblock end>]* 276 invalidate_cblocks [<cblock>|<cblock begin>-<cblock end>]*
275 277
diff --git a/Documentation/devicetree/bindings/arm/omap/mpu.txt b/Documentation/devicetree/bindings/arm/omap/mpu.txt
index 1a5a42ce21bb..83f405bde138 100644
--- a/Documentation/devicetree/bindings/arm/omap/mpu.txt
+++ b/Documentation/devicetree/bindings/arm/omap/mpu.txt
@@ -7,10 +7,18 @@ The MPU contain CPUs, GIC, L2 cache and a local PRCM.
7Required properties: 7Required properties:
8- compatible : Should be "ti,omap3-mpu" for OMAP3 8- compatible : Should be "ti,omap3-mpu" for OMAP3
9 Should be "ti,omap4-mpu" for OMAP4 9 Should be "ti,omap4-mpu" for OMAP4
10 Should be "ti,omap5-mpu" for OMAP5
10- ti,hwmods: "mpu" 11- ti,hwmods: "mpu"
11 12
12Examples: 13Examples:
13 14
15- For an OMAP5 SMP system:
16
17mpu {
18 compatible = "ti,omap5-mpu";
19 ti,hwmods = "mpu"
20};
21
14- For an OMAP4 SMP system: 22- For an OMAP4 SMP system:
15 23
16mpu { 24mpu {
diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt
index 343781b9f246..3e1e498fea96 100644
--- a/Documentation/devicetree/bindings/arm/pmu.txt
+++ b/Documentation/devicetree/bindings/arm/pmu.txt
@@ -7,6 +7,7 @@ representation in the device tree should be done as under:-
7Required properties: 7Required properties:
8 8
9- compatible : should be one of 9- compatible : should be one of
10 "arm,armv8-pmuv3"
10 "arm,cortex-a15-pmu" 11 "arm,cortex-a15-pmu"
11 "arm,cortex-a9-pmu" 12 "arm,cortex-a9-pmu"
12 "arm,cortex-a8-pmu" 13 "arm,cortex-a8-pmu"
diff --git a/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt b/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
index 47ada1dff216..5d49f2b37f68 100644
--- a/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
+++ b/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
@@ -49,7 +49,7 @@ adc@12D10000 {
49 /* NTC thermistor is a hwmon device */ 49 /* NTC thermistor is a hwmon device */
50 ncp15wb473@0 { 50 ncp15wb473@0 {
51 compatible = "ntc,ncp15wb473"; 51 compatible = "ntc,ncp15wb473";
52 pullup-uV = <1800000>; 52 pullup-uv = <1800000>;
53 pullup-ohm = <47000>; 53 pullup-ohm = <47000>;
54 pulldown-ohm = <0>; 54 pulldown-ohm = <0>;
55 io-channels = <&adc 4>; 55 io-channels = <&adc 4>;
diff --git a/Documentation/devicetree/bindings/clock/exynos4-clock.txt b/Documentation/devicetree/bindings/clock/exynos4-clock.txt
index c6bf8a6c8f52..a2ac2d9ac71a 100644
--- a/Documentation/devicetree/bindings/clock/exynos4-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos4-clock.txt
@@ -6,7 +6,7 @@ SoC's in the Exynos4 family.
6 6
7Required Properties: 7Required Properties:
8 8
9- comptible: should be one of the following. 9- compatible: should be one of the following.
10 - "samsung,exynos4210-clock" - controller compatible with Exynos4210 SoC. 10 - "samsung,exynos4210-clock" - controller compatible with Exynos4210 SoC.
11 - "samsung,exynos4412-clock" - controller compatible with Exynos4412 SoC. 11 - "samsung,exynos4412-clock" - controller compatible with Exynos4412 SoC.
12 12
diff --git a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
index 24765c146e31..0f2f920e8734 100644
--- a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
@@ -5,7 +5,7 @@ controllers within the Exynos5250 SoC.
5 5
6Required Properties: 6Required Properties:
7 7
8- comptible: should be one of the following. 8- compatible: should be one of the following.
9 - "samsung,exynos5250-clock" - controller compatible with Exynos5250 SoC. 9 - "samsung,exynos5250-clock" - controller compatible with Exynos5250 SoC.
10 10
11- reg: physical base address of the controller and length of memory mapped 11- reg: physical base address of the controller and length of memory mapped
@@ -159,6 +159,8 @@ clock which they consume.
159 mixer 343 159 mixer 343
160 hdmi 344 160 hdmi 344
161 g2d 345 161 g2d 345
162 mdma0 346
163 smmu_mdma0 347
162 164
163 165
164 [Clock Muxes] 166 [Clock Muxes]
diff --git a/Documentation/devicetree/bindings/clock/exynos5420-clock.txt b/Documentation/devicetree/bindings/clock/exynos5420-clock.txt
index 32aa34ecad36..458f34789e5d 100644
--- a/Documentation/devicetree/bindings/clock/exynos5420-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5420-clock.txt
@@ -5,7 +5,7 @@ controllers within the Exynos5420 SoC.
5 5
6Required Properties: 6Required Properties:
7 7
8- comptible: should be one of the following. 8- compatible: should be one of the following.
9 - "samsung,exynos5420-clock" - controller compatible with Exynos5420 SoC. 9 - "samsung,exynos5420-clock" - controller compatible with Exynos5420 SoC.
10 10
11- reg: physical base address of the controller and length of memory mapped 11- reg: physical base address of the controller and length of memory mapped
diff --git a/Documentation/devicetree/bindings/clock/exynos5440-clock.txt b/Documentation/devicetree/bindings/clock/exynos5440-clock.txt
index 4499e9966bc9..9955dc9c7d96 100644
--- a/Documentation/devicetree/bindings/clock/exynos5440-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5440-clock.txt
@@ -5,7 +5,7 @@ controllers within the Exynos5440 SoC.
5 5
6Required Properties: 6Required Properties:
7 7
8- comptible: should be "samsung,exynos5440-clock". 8- compatible: should be "samsung,exynos5440-clock".
9 9
10- reg: physical base address of the controller and length of memory mapped 10- reg: physical base address of the controller and length of memory mapped
11 region. 11 region.
diff --git a/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt b/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
index b0019eb5330e..798cfc9d3839 100644
--- a/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
@@ -5,16 +5,42 @@ This is for the non-QE/CPM/GUTs GPIO controllers as found on
5 5
6Every GPIO controller node must have #gpio-cells property defined, 6Every GPIO controller node must have #gpio-cells property defined,
7this information will be used to translate gpio-specifiers. 7this information will be used to translate gpio-specifiers.
8See bindings/gpio/gpio.txt for details of how to specify GPIO
9information for devices.
10
11The GPIO module usually is connected to the SoC's internal interrupt
12controller, see bindings/interrupt-controller/interrupts.txt (the
13interrupt client nodes section) for details how to specify this GPIO
14module's interrupt.
15
16The GPIO module may serve as another interrupt controller (cascaded to
17the SoC's internal interrupt controller). See the interrupt controller
18nodes section in bindings/interrupt-controller/interrupts.txt for
19details.
8 20
9Required properties: 21Required properties:
10- compatible : "fsl,<CHIP>-gpio" followed by "fsl,mpc8349-gpio" for 22- compatible: "fsl,<chip>-gpio" followed by "fsl,mpc8349-gpio"
11 83xx, "fsl,mpc8572-gpio" for 85xx and "fsl,mpc8610-gpio" for 86xx. 23 for 83xx, "fsl,mpc8572-gpio" for 85xx, or
12- #gpio-cells : Should be two. The first cell is the pin number and the 24 "fsl,mpc8610-gpio" for 86xx.
13 second cell is used to specify optional parameters (currently unused). 25- #gpio-cells: Should be two. The first cell is the pin number
14 - interrupts : Interrupt mapping for GPIO IRQ. 26 and the second cell is used to specify optional
15 - interrupt-parent : Phandle for the interrupt controller that 27 parameters (currently unused).
16 services interrupts for this device. 28- interrupt-parent: Phandle for the interrupt controller that
17- gpio-controller : Marks the port as GPIO controller. 29 services interrupts for this device.
30- interrupts: Interrupt mapping for GPIO IRQ.
31- gpio-controller: Marks the port as GPIO controller.
32
33Optional properties:
34- interrupt-controller: Empty boolean property which marks the GPIO
35 module as an IRQ controller.
36- #interrupt-cells: Should be two. Defines the number of integer
37 cells required to specify an interrupt within
38 this interrupt controller. The first cell
39 defines the pin number, the second cell
40 defines additional flags (trigger type,
41 trigger polarity). Note that the available
42 set of trigger conditions supported by the
43 GPIO module depends on the actual SoC.
18 44
19Example of gpio-controller nodes for a MPC8347 SoC: 45Example of gpio-controller nodes for a MPC8347 SoC:
20 46
@@ -22,39 +48,27 @@ Example of gpio-controller nodes for a MPC8347 SoC:
22 #gpio-cells = <2>; 48 #gpio-cells = <2>;
23 compatible = "fsl,mpc8347-gpio", "fsl,mpc8349-gpio"; 49 compatible = "fsl,mpc8347-gpio", "fsl,mpc8349-gpio";
24 reg = <0xc00 0x100>; 50 reg = <0xc00 0x100>;
25 interrupts = <74 0x8>;
26 interrupt-parent = <&ipic>; 51 interrupt-parent = <&ipic>;
52 interrupts = <74 0x8>;
27 gpio-controller; 53 gpio-controller;
54 interrupt-controller;
55 #interrupt-cells = <2>;
28 }; 56 };
29 57
30 gpio2: gpio-controller@d00 { 58 gpio2: gpio-controller@d00 {
31 #gpio-cells = <2>; 59 #gpio-cells = <2>;
32 compatible = "fsl,mpc8347-gpio", "fsl,mpc8349-gpio"; 60 compatible = "fsl,mpc8347-gpio", "fsl,mpc8349-gpio";
33 reg = <0xd00 0x100>; 61 reg = <0xd00 0x100>;
34 interrupts = <75 0x8>;
35 interrupt-parent = <&ipic>; 62 interrupt-parent = <&ipic>;
63 interrupts = <75 0x8>;
36 gpio-controller; 64 gpio-controller;
37 }; 65 };
38 66
39See booting-without-of.txt for details of how to specify GPIO 67Example of a peripheral using the GPIO module as an IRQ controller:
40information for devices.
41
42To use GPIO pins as interrupt sources for peripherals, specify the
43GPIO controller as the interrupt parent and define GPIO number +
44trigger mode using the interrupts property, which is defined like
45this:
46
47interrupts = <number trigger>, where:
48 - number: GPIO pin (0..31)
49 - trigger: trigger mode:
50 2 = trigger on falling edge
51 3 = trigger on both edges
52
53Example of device using this is:
54 68
55 funkyfpga@0 { 69 funkyfpga@0 {
56 compatible = "funky-fpga"; 70 compatible = "funky-fpga";
57 ... 71 ...
58 interrupts = <4 3>;
59 interrupt-parent = <&gpio1>; 72 interrupt-parent = <&gpio1>;
73 interrupts = <4 3>;
60 }; 74 };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-omap.txt b/Documentation/devicetree/bindings/i2c/i2c-omap.txt
index 56564aa4b444..7e49839d4124 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-omap.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-omap.txt
@@ -1,7 +1,8 @@
1I2C for OMAP platforms 1I2C for OMAP platforms
2 2
3Required properties : 3Required properties :
4- compatible : Must be "ti,omap3-i2c" or "ti,omap4-i2c" 4- compatible : Must be "ti,omap2420-i2c", "ti,omap2430-i2c", "ti,omap3-i2c"
5 or "ti,omap4-i2c"
5- ti,hwmods : Must be "i2c<n>", n being the instance number (1-based) 6- ti,hwmods : Must be "i2c<n>", n being the instance number (1-based)
6- #address-cells = <1>; 7- #address-cells = <1>;
7- #size-cells = <0>; 8- #size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/mmc/ti-omap.txt b/Documentation/devicetree/bindings/mmc/ti-omap.txt
new file mode 100644
index 000000000000..8de579969763
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/ti-omap.txt
@@ -0,0 +1,54 @@
1* TI MMC host controller for OMAP1 and 2420
2
3The MMC Host Controller on TI OMAP1 and 2420 family provides
4an interface for MMC, SD, and SDIO types of memory cards.
5
6This file documents differences between the core properties described
7by mmc.txt and the properties used by the omap mmc driver.
8
9Note that this driver will not work with omap2430 or later omaps,
10please see the omap hsmmc driver for the current omaps.
11
12Required properties:
13- compatible: Must be "ti,omap2420-mmc", for OMAP2420 controllers
14- ti,hwmods: For 2420, must be "msdi<n>", where n is controller
15 instance starting 1
16
17Examples:
18
19 msdi1: mmc@4809c000 {
20 compatible = "ti,omap2420-mmc";
21 ti,hwmods = "msdi1";
22 reg = <0x4809c000 0x80>;
23 interrupts = <83>;
24 dmas = <&sdma 61 &sdma 62>;
25 dma-names = "tx", "rx";
26 };
27
28* TI MMC host controller for OMAP1 and 2420
29
30The MMC Host Controller on TI OMAP1 and 2420 family provides
31an interface for MMC, SD, and SDIO types of memory cards.
32
33This file documents differences between the core properties described
34by mmc.txt and the properties used by the omap mmc driver.
35
36Note that this driver will not work with omap2430 or later omaps,
37please see the omap hsmmc driver for the current omaps.
38
39Required properties:
40- compatible: Must be "ti,omap2420-mmc", for OMAP2420 controllers
41- ti,hwmods: For 2420, must be "msdi<n>", where n is controller
42 instance starting 1
43
44Examples:
45
46 msdi1: mmc@4809c000 {
47 compatible = "ti,omap2420-mmc";
48 ti,hwmods = "msdi1";
49 reg = <0x4809c000 0x80>;
50 interrupts = <83>;
51 dmas = <&sdma 61 &sdma 62>;
52 dma-names = "tx", "rx";
53 };
54
diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt
index 48b259e29e87..bad381faf036 100644
--- a/Documentation/devicetree/bindings/net/davinci_emac.txt
+++ b/Documentation/devicetree/bindings/net/davinci_emac.txt
@@ -4,7 +4,7 @@ This file provides information, what the device node
4for the davinci_emac interface contains. 4for the davinci_emac interface contains.
5 5
6Required properties: 6Required properties:
7- compatible: "ti,davinci-dm6467-emac"; 7- compatible: "ti,davinci-dm6467-emac" or "ti,am3517-emac"
8- reg: Offset and length of the register set for the device 8- reg: Offset and length of the register set for the device
9- ti,davinci-ctrl-reg-offset: offset to control register 9- ti,davinci-ctrl-reg-offset: offset to control register
10- ti,davinci-ctrl-mod-reg-offset: offset to control module register 10- ti,davinci-ctrl-mod-reg-offset: offset to control module register
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index d53639221403..845ff848d895 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -15,6 +15,7 @@ Optional properties:
15 only if property "phy-reset-gpios" is available. Missing the property 15 only if property "phy-reset-gpios" is available. Missing the property
16 will have the duration be 1 millisecond. Numbers greater than 1000 are 16 will have the duration be 1 millisecond. Numbers greater than 1000 are
17 invalid and 1 millisecond will be used instead. 17 invalid and 1 millisecond will be used instead.
18- phy-supply: regulator that powers the Ethernet PHY.
18 19
19Example: 20Example:
20 21
@@ -25,4 +26,5 @@ ethernet@83fec000 {
25 phy-mode = "mii"; 26 phy-mode = "mii";
26 phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */ 27 phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */
27 local-mac-address = [00 04 9F 01 1B B9]; 28 local-mac-address = [00 04 9F 01 1B B9];
29 phy-supply = <&reg_fec_supply>;
28}; 30};
diff --git a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
index 953049b4248a..5a41a8658daa 100644
--- a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
+++ b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
@@ -8,3 +8,7 @@ Required properties:
8Optional properties: 8Optional properties:
9- phy-device : phandle to Ethernet phy 9- phy-device : phandle to Ethernet phy
10- local-mac-address : Ethernet mac address to use 10- local-mac-address : Ethernet mac address to use
11- reg-io-width : Mask of sizes (in bytes) of the IO accesses that
12 are supported on the device. Valid value for SMSC LAN91c111 are
13 1, 2 or 4. If it's omitted or invalid, the size would be 2 meaning
14 16-bit access only.
diff --git a/Documentation/devicetree/bindings/rng/qcom,prng.txt b/Documentation/devicetree/bindings/rng/qcom,prng.txt
new file mode 100644
index 000000000000..8e5853c2879b
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/qcom,prng.txt
@@ -0,0 +1,17 @@
1Qualcomm MSM pseudo random number generator.
2
3Required properties:
4
5- compatible : should be "qcom,prng"
6- reg : specifies base physical address and size of the registers map
7- clocks : phandle to clock-controller plus clock-specifier pair
8- clock-names : "core" clocks all registers, FIFO and circuits in PRNG IP block
9
10Example:
11
12 rng@f9bff000 {
13 compatible = "qcom,prng";
14 reg = <0xf9bff000 0x200>;
15 clocks = <&clock GCC_PRNG_AHB_CLK>;
16 clock-names = "core";
17 };
diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra20-spi.txt b/Documentation/devicetree/bindings/spi/nvidia,tegra20-spi.txt
deleted file mode 100644
index 6b9e51896693..000000000000
--- a/Documentation/devicetree/bindings/spi/nvidia,tegra20-spi.txt
+++ /dev/null
@@ -1,5 +0,0 @@
1NVIDIA Tegra 2 SPI device
2
3Required properties:
4- compatible : should be "nvidia,tegra20-spi".
5- gpios : should specify GPIOs used for chipselect.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index ce95ed1c6d3e..edbb8d88c85e 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -32,12 +32,14 @@ est ESTeem Wireless Modems
32fsl Freescale Semiconductor 32fsl Freescale Semiconductor
33GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc. 33GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc.
34gef GE Fanuc Intelligent Platforms Embedded Systems, Inc. 34gef GE Fanuc Intelligent Platforms Embedded Systems, Inc.
35gmt Global Mixed-mode Technology, Inc.
35hisilicon Hisilicon Limited. 36hisilicon Hisilicon Limited.
36hp Hewlett Packard 37hp Hewlett Packard
37ibm International Business Machines (IBM) 38ibm International Business Machines (IBM)
38idt Integrated Device Technologies, Inc. 39idt Integrated Device Technologies, Inc.
39img Imagination Technologies Ltd. 40img Imagination Technologies Ltd.
40intercontrol Inter Control Group 41intercontrol Inter Control Group
42lg LG Corporation
41linux Linux-specific binding 43linux Linux-specific binding
42lsi LSI Corp. (LSI Logic) 44lsi LSI Corp. (LSI Logic)
43marvell Marvell Technology Group Ltd. 45marvell Marvell Technology Group Ltd.
diff --git a/Documentation/gpio/00-INDEX b/Documentation/gpio/00-INDEX
new file mode 100644
index 000000000000..1de43ae46ae6
--- /dev/null
+++ b/Documentation/gpio/00-INDEX
@@ -0,0 +1,14 @@
100-INDEX
2 - This file
3gpio.txt
4 - Introduction to GPIOs and their kernel interfaces
5consumer.txt
6 - How to obtain and use GPIOs in a driver
7driver.txt
8 - How to write a GPIO driver
9board.txt
10 - How to assign GPIOs to a consumer device and a function
11sysfs.txt
12 - Information about the GPIO sysfs interface
13gpio-legacy.txt
14 - Historical documentation of the deprecated GPIO integer interface
diff --git a/Documentation/gpio/board.txt b/Documentation/gpio/board.txt
new file mode 100644
index 000000000000..0d03506f2cc5
--- /dev/null
+++ b/Documentation/gpio/board.txt
@@ -0,0 +1,115 @@
1GPIO Mappings
2=============
3
4This document explains how GPIOs can be assigned to given devices and functions.
5Note that it only applies to the new descriptor-based interface. For a
6description of the deprecated integer-based GPIO interface please refer to
7gpio-legacy.txt (actually, there is no real mapping possible with the old
8interface; you just fetch an integer from somewhere and request the
9corresponding GPIO.
10
11Platforms that make use of GPIOs must select ARCH_REQUIRE_GPIOLIB (if GPIO usage
12is mandatory) or ARCH_WANT_OPTIONAL_GPIOLIB (if GPIO support can be omitted) in
13their Kconfig. Then, how GPIOs are mapped depends on what the platform uses to
14describe its hardware layout. Currently, mappings can be defined through device
15tree, ACPI, and platform data.
16
17Device Tree
18-----------
19GPIOs can easily be mapped to devices and functions in the device tree. The
20exact way to do it depends on the GPIO controller providing the GPIOs, see the
21device tree bindings for your controller.
22
23GPIOs mappings are defined in the consumer device's node, in a property named
24<function>-gpios, where <function> is the function the driver will request
25through gpiod_get(). For example:
26
27 foo_device {
28 compatible = "acme,foo";
29 ...
30 led-gpios = <&gpio 15 GPIO_ACTIVE_HIGH>, /* red */
31 <&gpio 16 GPIO_ACTIVE_HIGH>, /* green */
32 <&gpio 17 GPIO_ACTIVE_HIGH>; /* blue */
33
34 power-gpio = <&gpio 1 GPIO_ACTIVE_LOW>;
35 };
36
37This property will make GPIOs 15, 16 and 17 available to the driver under the
38"led" function, and GPIO 1 as the "power" GPIO:
39
40 struct gpio_desc *red, *green, *blue, *power;
41
42 red = gpiod_get_index(dev, "led", 0);
43 green = gpiod_get_index(dev, "led", 1);
44 blue = gpiod_get_index(dev, "led", 2);
45
46 power = gpiod_get(dev, "power");
47
48The led GPIOs will be active-high, while the power GPIO will be active-low (i.e.
49gpiod_is_active_low(power) will be true).
50
51ACPI
52----
53ACPI does not support function names for GPIOs. Therefore, only the "idx"
54argument of gpiod_get_index() is useful to discriminate between GPIOs assigned
55to a device. The "con_id" argument can still be set for debugging purposes (it
56will appear under error messages as well as debug and sysfs nodes).
57
58Platform Data
59-------------
60Finally, GPIOs can be bound to devices and functions using platform data. Board
61files that desire to do so need to include the following header:
62
63 #include <linux/gpio/driver.h>
64
65GPIOs are mapped by the means of tables of lookups, containing instances of the
66gpiod_lookup structure. Two macros are defined to help declaring such mappings:
67
68 GPIO_LOOKUP(chip_label, chip_hwnum, dev_id, con_id, flags)
69 GPIO_LOOKUP_IDX(chip_label, chip_hwnum, dev_id, con_id, idx, flags)
70
71where
72
73 - chip_label is the label of the gpiod_chip instance providing the GPIO
74 - chip_hwnum is the hardware number of the GPIO within the chip
75 - dev_id is the identifier of the device that will make use of this GPIO. If
76 NULL, the GPIO will be available to all devices.
77 - con_id is the name of the GPIO function from the device point of view. It
78 can be NULL.
79 - idx is the index of the GPIO within the function.
80 - flags is defined to specify the following properties:
81 * GPIOF_ACTIVE_LOW - to configure the GPIO as active-low
82 * GPIOF_OPEN_DRAIN - GPIO pin is open drain type.
83 * GPIOF_OPEN_SOURCE - GPIO pin is open source type.
84
85In the future, these flags might be extended to support more properties.
86
87Note that GPIO_LOOKUP() is just a shortcut to GPIO_LOOKUP_IDX() where idx = 0.
88
89A lookup table can then be defined as follows:
90
91 struct gpiod_lookup gpios_table[] = {
92 GPIO_LOOKUP_IDX("gpio.0", 15, "foo.0", "led", 0, GPIO_ACTIVE_HIGH),
93 GPIO_LOOKUP_IDX("gpio.0", 16, "foo.0", "led", 1, GPIO_ACTIVE_HIGH),
94 GPIO_LOOKUP_IDX("gpio.0", 17, "foo.0", "led", 2, GPIO_ACTIVE_HIGH),
95 GPIO_LOOKUP("gpio.0", 1, "foo.0", "power", GPIO_ACTIVE_LOW),
96 };
97
98And the table can be added by the board code as follows:
99
100 gpiod_add_table(gpios_table, ARRAY_SIZE(gpios_table));
101
102The driver controlling "foo.0" will then be able to obtain its GPIOs as follows:
103
104 struct gpio_desc *red, *green, *blue, *power;
105
106 red = gpiod_get_index(dev, "led", 0);
107 green = gpiod_get_index(dev, "led", 1);
108 blue = gpiod_get_index(dev, "led", 2);
109
110 power = gpiod_get(dev, "power");
111 gpiod_direction_output(power, 1);
112
113Since the "power" GPIO is mapped as active-low, its actual signal will be 0
114after this code. Contrary to the legacy integer GPIO interface, the active-low
115property is handled during mapping and is thus transparent to GPIO consumers.
diff --git a/Documentation/gpio/consumer.txt b/Documentation/gpio/consumer.txt
new file mode 100644
index 000000000000..07c74a3765a0
--- /dev/null
+++ b/Documentation/gpio/consumer.txt
@@ -0,0 +1,197 @@
1GPIO Descriptor Consumer Interface
2==================================
3
4This document describes the consumer interface of the GPIO framework. Note that
5it describes the new descriptor-based interface. For a description of the
6deprecated integer-based GPIO interface please refer to gpio-legacy.txt.
7
8
9Guidelines for GPIOs consumers
10==============================
11
12Drivers that can't work without standard GPIO calls should have Kconfig entries
13that depend on GPIOLIB. The functions that allow a driver to obtain and use
14GPIOs are available by including the following file:
15
16 #include <linux/gpio/consumer.h>
17
18All the functions that work with the descriptor-based GPIO interface are
19prefixed with gpiod_. The gpio_ prefix is used for the legacy interface. No
20other function in the kernel should use these prefixes.
21
22
23Obtaining and Disposing GPIOs
24=============================
25
26With the descriptor-based interface, GPIOs are identified with an opaque,
27non-forgeable handler that must be obtained through a call to one of the
28gpiod_get() functions. Like many other kernel subsystems, gpiod_get() takes the
29device that will use the GPIO and the function the requested GPIO is supposed to
30fulfill:
31
32 struct gpio_desc *gpiod_get(struct device *dev, const char *con_id)
33
34If a function is implemented by using several GPIOs together (e.g. a simple LED
35device that displays digits), an additional index argument can be specified:
36
37 struct gpio_desc *gpiod_get_index(struct device *dev,
38 const char *con_id, unsigned int idx)
39
40Both functions return either a valid GPIO descriptor, or an error code checkable
41with IS_ERR(). They will never return a NULL pointer.
42
43Device-managed variants of these functions are also defined:
44
45 struct gpio_desc *devm_gpiod_get(struct device *dev, const char *con_id)
46
47 struct gpio_desc *devm_gpiod_get_index(struct device *dev,
48 const char *con_id,
49 unsigned int idx)
50
51A GPIO descriptor can be disposed of using the gpiod_put() function:
52
53 void gpiod_put(struct gpio_desc *desc)
54
55It is strictly forbidden to use a descriptor after calling this function. The
56device-managed variant is, unsurprisingly:
57
58 void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
59
60
61Using GPIOs
62===========
63
64Setting Direction
65-----------------
66The first thing a driver must do with a GPIO is setting its direction. This is
67done by invoking one of the gpiod_direction_*() functions:
68
69 int gpiod_direction_input(struct gpio_desc *desc)
70 int gpiod_direction_output(struct gpio_desc *desc, int value)
71
72The return value is zero for success, else a negative errno. It should be
73checked, since the get/set calls don't return errors and since misconfiguration
74is possible. You should normally issue these calls from a task context. However,
75for spinlock-safe GPIOs it is OK to use them before tasking is enabled, as part
76of early board setup.
77
78For output GPIOs, the value provided becomes the initial output value. This
79helps avoid signal glitching during system startup.
80
81A driver can also query the current direction of a GPIO:
82
83 int gpiod_get_direction(const struct gpio_desc *desc)
84
85This function will return either GPIOF_DIR_IN or GPIOF_DIR_OUT.
86
87Be aware that there is no default direction for GPIOs. Therefore, **using a GPIO
88without setting its direction first is illegal and will result in undefined
89behavior!**
90
91
92Spinlock-Safe GPIO Access
93-------------------------
94Most GPIO controllers can be accessed with memory read/write instructions. Those
95don't need to sleep, and can safely be done from inside hard (non-threaded) IRQ
96handlers and similar contexts.
97
98Use the following calls to access GPIOs from an atomic context:
99
100 int gpiod_get_value(const struct gpio_desc *desc);
101 void gpiod_set_value(struct gpio_desc *desc, int value);
102
103The values are boolean, zero for low, nonzero for high. When reading the value
104of an output pin, the value returned should be what's seen on the pin. That
105won't always match the specified output value, because of issues including
106open-drain signaling and output latencies.
107
108The get/set calls do not return errors because "invalid GPIO" should have been
109reported earlier from gpiod_direction_*(). However, note that not all platforms
110can read the value of output pins; those that can't should always return zero.
111Also, using these calls for GPIOs that can't safely be accessed without sleeping
112(see below) is an error.
113
114
115GPIO Access That May Sleep
116--------------------------
117Some GPIO controllers must be accessed using message based buses like I2C or
118SPI. Commands to read or write those GPIO values require waiting to get to the
119head of a queue to transmit a command and get its response. This requires
120sleeping, which can't be done from inside IRQ handlers.
121
122Platforms that support this type of GPIO distinguish them from other GPIOs by
123returning nonzero from this call:
124
125 int gpiod_cansleep(const struct gpio_desc *desc)
126
127To access such GPIOs, a different set of accessors is defined:
128
129 int gpiod_get_value_cansleep(const struct gpio_desc *desc)
130 void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
131
132Accessing such GPIOs requires a context which may sleep, for example a threaded
133IRQ handler, and those accessors must be used instead of spinlock-safe
134accessors without the cansleep() name suffix.
135
136Other than the fact that these accessors might sleep, and will work on GPIOs
137that can't be accessed from hardIRQ handlers, these calls act the same as the
138spinlock-safe calls.
139
140
141Active-low State and Raw GPIO Values
142------------------------------------
143Device drivers like to manage the logical state of a GPIO, i.e. the value their
144device will actually receive, no matter what lies between it and the GPIO line.
145In some cases, it might make sense to control the actual GPIO line value. The
146following set of calls ignore the active-low property of a GPIO and work on the
147raw line value:
148
149 int gpiod_get_raw_value(const struct gpio_desc *desc)
150 void gpiod_set_raw_value(struct gpio_desc *desc, int value)
151 int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
152 void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value)
153
154The active-low state of a GPIO can also be queried using the following call:
155
156 int gpiod_is_active_low(const struct gpio_desc *desc)
157
158Note that these functions should only be used with great moderation ; a driver
159should not have to care about the physical line level.
160
161GPIOs mapped to IRQs
162--------------------
163GPIO lines can quite often be used as IRQs. You can get the IRQ number
164corresponding to a given GPIO using the following call:
165
166 int gpiod_to_irq(const struct gpio_desc *desc)
167
168It will return an IRQ number, or an negative errno code if the mapping can't be
169done (most likely because that particular GPIO cannot be used as IRQ). It is an
170unchecked error to use a GPIO that wasn't set up as an input using
171gpiod_direction_input(), or to use an IRQ number that didn't originally come
172from gpiod_to_irq(). gpiod_to_irq() is not allowed to sleep.
173
174Non-error values returned from gpiod_to_irq() can be passed to request_irq() or
175free_irq(). They will often be stored into IRQ resources for platform devices,
176by the board-specific initialization code. Note that IRQ trigger options are
177part of the IRQ interface, e.g. IRQF_TRIGGER_FALLING, as are system wakeup
178capabilities.
179
180
181Interacting With the Legacy GPIO Subsystem
182==========================================
183Many kernel subsystems still handle GPIOs using the legacy integer-based
184interface. Although it is strongly encouraged to upgrade them to the safer
185descriptor-based API, the following two functions allow you to convert a GPIO
186descriptor into the GPIO integer namespace and vice-versa:
187
188 int desc_to_gpio(const struct gpio_desc *desc)
189 struct gpio_desc *gpio_to_desc(unsigned gpio)
190
191The GPIO number returned by desc_to_gpio() can be safely used as long as the
192GPIO descriptor has not been freed. All the same, a GPIO number passed to
193gpio_to_desc() must have been properly acquired, and usage of the returned GPIO
194descriptor is only possible after the GPIO number has been released.
195
196Freeing a GPIO obtained by one API with the other API is forbidden and an
197unchecked error.
diff --git a/Documentation/gpio/driver.txt b/Documentation/gpio/driver.txt
new file mode 100644
index 000000000000..9da0bfa74781
--- /dev/null
+++ b/Documentation/gpio/driver.txt
@@ -0,0 +1,75 @@
1GPIO Descriptor Driver Interface
2================================
3
4This document serves as a guide for GPIO chip drivers writers. Note that it
5describes the new descriptor-based interface. For a description of the
6deprecated integer-based GPIO interface please refer to gpio-legacy.txt.
7
8Each GPIO controller driver needs to include the following header, which defines
9the structures used to define a GPIO driver:
10
11 #include <linux/gpio/driver.h>
12
13
14Internal Representation of GPIOs
15================================
16
17Inside a GPIO driver, individual GPIOs are identified by their hardware number,
18which is a unique number between 0 and n, n being the number of GPIOs managed by
19the chip. This number is purely internal: the hardware number of a particular
20GPIO descriptor is never made visible outside of the driver.
21
22On top of this internal number, each GPIO also need to have a global number in
23the integer GPIO namespace so that it can be used with the legacy GPIO
24interface. Each chip must thus have a "base" number (which can be automatically
25assigned), and for each GPIO the global number will be (base + hardware number).
26Although the integer representation is considered deprecated, it still has many
27users and thus needs to be maintained.
28
29So for example one platform could use numbers 32-159 for GPIOs, with a
30controller defining 128 GPIOs at a "base" of 32 ; while another platform uses
31numbers 0..63 with one set of GPIO controllers, 64-79 with another type of GPIO
32controller, and on one particular board 80-95 with an FPGA. The numbers need not
33be contiguous; either of those platforms could also use numbers 2000-2063 to
34identify GPIOs in a bank of I2C GPIO expanders.
35
36
37Controller Drivers: gpio_chip
38=============================
39
40In the gpiolib framework each GPIO controller is packaged as a "struct
41gpio_chip" (see linux/gpio/driver.h for its complete definition) with members
42common to each controller of that type:
43
44 - methods to establish GPIO direction
45 - methods used to access GPIO values
46 - method to return the IRQ number associated to a given GPIO
47 - flag saying whether calls to its methods may sleep
48 - optional debugfs dump method (showing extra state like pullup config)
49 - optional base number (will be automatically assigned if omitted)
50 - label for diagnostics and GPIOs mapping using platform data
51
52The code implementing a gpio_chip should support multiple instances of the
53controller, possibly using the driver model. That code will configure each
54gpio_chip and issue gpiochip_add(). Removing a GPIO controller should be rare;
55use gpiochip_remove() when it is unavoidable.
56
57Most often a gpio_chip is part of an instance-specific structure with state not
58exposed by the GPIO interfaces, such as addressing, power management, and more.
59Chips such as codecs will have complex non-GPIO state.
60
61Any debugfs dump method should normally ignore signals which haven't been
62requested as GPIOs. They can use gpiochip_is_requested(), which returns either
63NULL or the label associated with that GPIO when it was requested.
64
65Locking IRQ usage
66-----------------
67Input GPIOs can be used as IRQ signals. When this happens, a driver is requested
68to mark the GPIO as being used as an IRQ:
69
70 int gpiod_lock_as_irq(struct gpio_desc *desc)
71
72This will prevent the use of non-irq related GPIO APIs until the GPIO IRQ lock
73is released:
74
75 void gpiod_unlock_as_irq(struct gpio_desc *desc)
diff --git a/Documentation/gpio.txt b/Documentation/gpio/gpio-legacy.txt
index 6f83fa965b4b..6f83fa965b4b 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio/gpio-legacy.txt
diff --git a/Documentation/gpio/gpio.txt b/Documentation/gpio/gpio.txt
new file mode 100644
index 000000000000..cd9b356e88cd
--- /dev/null
+++ b/Documentation/gpio/gpio.txt
@@ -0,0 +1,119 @@
1GPIO Interfaces
2===============
3
4The documents in this directory give detailed instructions on how to access
5GPIOs in drivers, and how to write a driver for a device that provides GPIOs
6itself.
7
8Due to the history of GPIO interfaces in the kernel, there are two different
9ways to obtain and use GPIOs:
10
11 - The descriptor-based interface is the preferred way to manipulate GPIOs,
12and is described by all the files in this directory excepted gpio-legacy.txt.
13 - The legacy integer-based interface which is considered deprecated (but still
14usable for compatibility reasons) is documented in gpio-legacy.txt.
15
16The remainder of this document applies to the new descriptor-based interface.
17gpio-legacy.txt contains the same information applied to the legacy
18integer-based interface.
19
20
21What is a GPIO?
22===============
23
24A "General Purpose Input/Output" (GPIO) is a flexible software-controlled
25digital signal. They are provided from many kinds of chip, and are familiar
26to Linux developers working with embedded and custom hardware. Each GPIO
27represents a bit connected to a particular pin, or "ball" on Ball Grid Array
28(BGA) packages. Board schematics show which external hardware connects to
29which GPIOs. Drivers can be written generically, so that board setup code
30passes such pin configuration data to drivers.
31
32System-on-Chip (SOC) processors heavily rely on GPIOs. In some cases, every
33non-dedicated pin can be configured as a GPIO; and most chips have at least
34several dozen of them. Programmable logic devices (like FPGAs) can easily
35provide GPIOs; multifunction chips like power managers, and audio codecs
36often have a few such pins to help with pin scarcity on SOCs; and there are
37also "GPIO Expander" chips that connect using the I2C or SPI serial buses.
38Most PC southbridges have a few dozen GPIO-capable pins (with only the BIOS
39firmware knowing how they're used).
40
41The exact capabilities of GPIOs vary between systems. Common options:
42
43 - Output values are writable (high=1, low=0). Some chips also have
44 options about how that value is driven, so that for example only one
45 value might be driven, supporting "wire-OR" and similar schemes for the
46 other value (notably, "open drain" signaling).
47
48 - Input values are likewise readable (1, 0). Some chips support readback
49 of pins configured as "output", which is very useful in such "wire-OR"
50 cases (to support bidirectional signaling). GPIO controllers may have
51 input de-glitch/debounce logic, sometimes with software controls.
52
53 - Inputs can often be used as IRQ signals, often edge triggered but
54 sometimes level triggered. Such IRQs may be configurable as system
55 wakeup events, to wake the system from a low power state.
56
57 - Usually a GPIO will be configurable as either input or output, as needed
58 by different product boards; single direction ones exist too.
59
60 - Most GPIOs can be accessed while holding spinlocks, but those accessed
61 through a serial bus normally can't. Some systems support both types.
62
63On a given board each GPIO is used for one specific purpose like monitoring
64MMC/SD card insertion/removal, detecting card write-protect status, driving
65a LED, configuring a transceiver, bit-banging a serial bus, poking a hardware
66watchdog, sensing a switch, and so on.
67
68
69Common GPIO Properties
70======================
71
72These properties are met through all the other documents of the GPIO interface
73and it is useful to understand them, especially if you need to define GPIO
74mappings.
75
76Active-High and Active-Low
77--------------------------
78It is natural to assume that a GPIO is "active" when its output signal is 1
79("high"), and inactive when it is 0 ("low"). However in practice the signal of a
80GPIO may be inverted before is reaches its destination, or a device could decide
81to have different conventions about what "active" means. Such decisions should
82be transparent to device drivers, therefore it is possible to define a GPIO as
83being either active-high ("1" means "active", the default) or active-low ("0"
84means "active") so that drivers only need to worry about the logical signal and
85not about what happens at the line level.
86
87Open Drain and Open Source
88--------------------------
89Sometimes shared signals need to use "open drain" (where only the low signal
90level is actually driven), or "open source" (where only the high signal level is
91driven) signaling. That term applies to CMOS transistors; "open collector" is
92used for TTL. A pullup or pulldown resistor causes the high or low signal level.
93This is sometimes called a "wire-AND"; or more practically, from the negative
94logic (low=true) perspective this is a "wire-OR".
95
96One common example of an open drain signal is a shared active-low IRQ line.
97Also, bidirectional data bus signals sometimes use open drain signals.
98
99Some GPIO controllers directly support open drain and open source outputs; many
100don't. When you need open drain signaling but your hardware doesn't directly
101support it, there's a common idiom you can use to emulate it with any GPIO pin
102that can be used as either an input or an output:
103
104 LOW: gpiod_direction_output(gpio, 0) ... this drives the signal and overrides
105 the pullup.
106
107 HIGH: gpiod_direction_input(gpio) ... this turns off the output, so the pullup
108 (or some other device) controls the signal.
109
110The same logic can be applied to emulate open source signaling, by driving the
111high signal and configuring the GPIO as input for low. This open drain/open
112source emulation can be handled transparently by the GPIO framework.
113
114If you are "driving" the signal high but gpiod_get_value(gpio) reports a low
115value (after the appropriate rise time passes), you know some other component is
116driving the shared signal low. That's not necessarily an error. As one common
117example, that's how I2C clocks are stretched: a slave that needs a slower clock
118delays the rising edge of SCK, and the I2C master adjusts its signaling rate
119accordingly.
diff --git a/Documentation/gpio/sysfs.txt b/Documentation/gpio/sysfs.txt
new file mode 100644
index 000000000000..c2c3a97f8ff7
--- /dev/null
+++ b/Documentation/gpio/sysfs.txt
@@ -0,0 +1,155 @@
1GPIO Sysfs Interface for Userspace
2==================================
3
4Platforms which use the "gpiolib" implementors framework may choose to
5configure a sysfs user interface to GPIOs. This is different from the
6debugfs interface, since it provides control over GPIO direction and
7value instead of just showing a gpio state summary. Plus, it could be
8present on production systems without debugging support.
9
10Given appropriate hardware documentation for the system, userspace could
11know for example that GPIO #23 controls the write protect line used to
12protect boot loader segments in flash memory. System upgrade procedures
13may need to temporarily remove that protection, first importing a GPIO,
14then changing its output state, then updating the code before re-enabling
15the write protection. In normal use, GPIO #23 would never be touched,
16and the kernel would have no need to know about it.
17
18Again depending on appropriate hardware documentation, on some systems
19userspace GPIO can be used to determine system configuration data that
20standard kernels won't know about. And for some tasks, simple userspace
21GPIO drivers could be all that the system really needs.
22
23Note that standard kernel drivers exist for common "LEDs and Buttons"
24GPIO tasks: "leds-gpio" and "gpio_keys", respectively. Use those
25instead of talking directly to the GPIOs; they integrate with kernel
26frameworks better than your userspace code could.
27
28
29Paths in Sysfs
30--------------
31There are three kinds of entry in /sys/class/gpio:
32
33 - Control interfaces used to get userspace control over GPIOs;
34
35 - GPIOs themselves; and
36
37 - GPIO controllers ("gpio_chip" instances).
38
39That's in addition to standard files including the "device" symlink.
40
41The control interfaces are write-only:
42
43 /sys/class/gpio/
44
45 "export" ... Userspace may ask the kernel to export control of
46 a GPIO to userspace by writing its number to this file.
47
48 Example: "echo 19 > export" will create a "gpio19" node
49 for GPIO #19, if that's not requested by kernel code.
50
51 "unexport" ... Reverses the effect of exporting to userspace.
52
53 Example: "echo 19 > unexport" will remove a "gpio19"
54 node exported using the "export" file.
55
56GPIO signals have paths like /sys/class/gpio/gpio42/ (for GPIO #42)
57and have the following read/write attributes:
58
59 /sys/class/gpio/gpioN/
60
61 "direction" ... reads as either "in" or "out". This value may
62 normally be written. Writing as "out" defaults to
63 initializing the value as low. To ensure glitch free
64 operation, values "low" and "high" may be written to
65 configure the GPIO as an output with that initial value.
66
67 Note that this attribute *will not exist* if the kernel
68 doesn't support changing the direction of a GPIO, or
69 it was exported by kernel code that didn't explicitly
70 allow userspace to reconfigure this GPIO's direction.
71
72 "value" ... reads as either 0 (low) or 1 (high). If the GPIO
73 is configured as an output, this value may be written;
74 any nonzero value is treated as high.
75
76 If the pin can be configured as interrupt-generating interrupt
77 and if it has been configured to generate interrupts (see the
78 description of "edge"), you can poll(2) on that file and
79 poll(2) will return whenever the interrupt was triggered. If
80 you use poll(2), set the events POLLPRI and POLLERR. If you
81 use select(2), set the file descriptor in exceptfds. After
82 poll(2) returns, either lseek(2) to the beginning of the sysfs
83 file and read the new value or close the file and re-open it
84 to read the value.
85
86 "edge" ... reads as either "none", "rising", "falling", or
87 "both". Write these strings to select the signal edge(s)
88 that will make poll(2) on the "value" file return.
89
90 This file exists only if the pin can be configured as an
91 interrupt generating input pin.
92
93 "active_low" ... reads as either 0 (false) or 1 (true). Write
94 any nonzero value to invert the value attribute both
95 for reading and writing. Existing and subsequent
96 poll(2) support configuration via the edge attribute
97 for "rising" and "falling" edges will follow this
98 setting.
99
100GPIO controllers have paths like /sys/class/gpio/gpiochip42/ (for the
101controller implementing GPIOs starting at #42) and have the following
102read-only attributes:
103
104 /sys/class/gpio/gpiochipN/
105
106 "base" ... same as N, the first GPIO managed by this chip
107
108 "label" ... provided for diagnostics (not always unique)
109
110 "ngpio" ... how many GPIOs this manges (N to N + ngpio - 1)
111
112Board documentation should in most cases cover what GPIOs are used for
113what purposes. However, those numbers are not always stable; GPIOs on
114a daughtercard might be different depending on the base board being used,
115or other cards in the stack. In such cases, you may need to use the
116gpiochip nodes (possibly in conjunction with schematics) to determine
117the correct GPIO number to use for a given signal.
118
119
120Exporting from Kernel code
121--------------------------
122Kernel code can explicitly manage exports of GPIOs which have already been
123requested using gpio_request():
124
125 /* export the GPIO to userspace */
126 int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
127
128 /* reverse gpio_export() */
129 void gpiod_unexport(struct gpio_desc *desc);
130
131 /* create a sysfs link to an exported GPIO node */
132 int gpiod_export_link(struct device *dev, const char *name,
133 struct gpio_desc *desc);
134
135 /* change the polarity of a GPIO node in sysfs */
136 int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value);
137
138After a kernel driver requests a GPIO, it may only be made available in
139the sysfs interface by gpiod_export(). The driver can control whether the
140signal direction may change. This helps drivers prevent userspace code
141from accidentally clobbering important system state.
142
143This explicit exporting can help with debugging (by making some kinds
144of experiments easier), or can provide an always-there interface that's
145suitable for documenting as part of a board support package.
146
147After the GPIO has been exported, gpiod_export_link() allows creating
148symlinks from elsewhere in sysfs to the GPIO sysfs node. Drivers can
149use this to provide the interface under their own device in sysfs with
150a descriptive name.
151
152Drivers can use gpiod_sysfs_set_active_low() to hide GPIO line polarity
153differences between boards from user space. Polarity change can be done both
154before and after gpiod_export(), and previously enabled poll(2) support for
155either rising or falling edge will be reconfigured to follow this setting.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 50680a59a2ff..b9e9bd854298 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1529,6 +1529,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1529 1529
1530 * atapi_dmadir: Enable ATAPI DMADIR bridge support 1530 * atapi_dmadir: Enable ATAPI DMADIR bridge support
1531 1531
1532 * disable: Disable this device.
1533
1532 If there are multiple matching configurations changing 1534 If there are multiple matching configurations changing
1533 the same attribute, the last one is used. 1535 the same attribute, the last one is used.
1534 1536
diff --git a/Documentation/mic/mpssd/mpssd.c b/Documentation/mic/mpssd/mpssd.c
index 0c980ad40b17..4d17487d5ad9 100644
--- a/Documentation/mic/mpssd/mpssd.c
+++ b/Documentation/mic/mpssd/mpssd.c
@@ -313,7 +313,7 @@ static struct mic_device_desc *get_device_desc(struct mic_info *mic, int type)
313 int i; 313 int i;
314 void *dp = get_dp(mic, type); 314 void *dp = get_dp(mic, type);
315 315
316 for (i = mic_aligned_size(struct mic_bootparam); i < PAGE_SIZE; 316 for (i = sizeof(struct mic_bootparam); i < PAGE_SIZE;
317 i += mic_total_desc_size(d)) { 317 i += mic_total_desc_size(d)) {
318 d = dp + i; 318 d = dp + i;
319 319
@@ -445,8 +445,8 @@ init_vr(struct mic_info *mic, int fd, int type,
445 __func__, mic->name, vr0->va, vr0->info, vr_size, 445 __func__, mic->name, vr0->va, vr0->info, vr_size,
446 vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN)); 446 vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN));
447 mpsslog("magic 0x%x expected 0x%x\n", 447 mpsslog("magic 0x%x expected 0x%x\n",
448 vr0->info->magic, MIC_MAGIC + type); 448 le32toh(vr0->info->magic), MIC_MAGIC + type);
449 assert(vr0->info->magic == MIC_MAGIC + type); 449 assert(le32toh(vr0->info->magic) == MIC_MAGIC + type);
450 if (vr1) { 450 if (vr1) {
451 vr1->va = (struct mic_vring *) 451 vr1->va = (struct mic_vring *)
452 &va[MIC_DEVICE_PAGE_END + vr_size]; 452 &va[MIC_DEVICE_PAGE_END + vr_size];
@@ -458,8 +458,8 @@ init_vr(struct mic_info *mic, int fd, int type,
458 __func__, mic->name, vr1->va, vr1->info, vr_size, 458 __func__, mic->name, vr1->va, vr1->info, vr_size,
459 vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN)); 459 vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN));
460 mpsslog("magic 0x%x expected 0x%x\n", 460 mpsslog("magic 0x%x expected 0x%x\n",
461 vr1->info->magic, MIC_MAGIC + type + 1); 461 le32toh(vr1->info->magic), MIC_MAGIC + type + 1);
462 assert(vr1->info->magic == MIC_MAGIC + type + 1); 462 assert(le32toh(vr1->info->magic) == MIC_MAGIC + type + 1);
463 } 463 }
464done: 464done:
465 return va; 465 return va;
@@ -520,7 +520,7 @@ static void *
520virtio_net(void *arg) 520virtio_net(void *arg)
521{ 521{
522 static __u8 vnet_hdr[2][sizeof(struct virtio_net_hdr)]; 522 static __u8 vnet_hdr[2][sizeof(struct virtio_net_hdr)];
523 static __u8 vnet_buf[2][MAX_NET_PKT_SIZE] __aligned(64); 523 static __u8 vnet_buf[2][MAX_NET_PKT_SIZE] __attribute__ ((aligned(64)));
524 struct iovec vnet_iov[2][2] = { 524 struct iovec vnet_iov[2][2] = {
525 { { .iov_base = vnet_hdr[0], .iov_len = sizeof(vnet_hdr[0]) }, 525 { { .iov_base = vnet_hdr[0], .iov_len = sizeof(vnet_hdr[0]) },
526 { .iov_base = vnet_buf[0], .iov_len = sizeof(vnet_buf[0]) } }, 526 { .iov_base = vnet_buf[0], .iov_len = sizeof(vnet_buf[0]) } },
@@ -1412,6 +1412,12 @@ mic_config(void *arg)
1412 } 1412 }
1413 1413
1414 do { 1414 do {
1415 ret = lseek(fd, 0, SEEK_SET);
1416 if (ret < 0) {
1417 mpsslog("%s: Failed to seek to file start '%s': %s\n",
1418 mic->name, pathname, strerror(errno));
1419 goto close_error1;
1420 }
1415 ret = read(fd, value, sizeof(value)); 1421 ret = read(fd, value, sizeof(value));
1416 if (ret < 0) { 1422 if (ret < 0) {
1417 mpsslog("%s: Failed to read sysfs entry '%s': %s\n", 1423 mpsslog("%s: Failed to read sysfs entry '%s': %s\n",
diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt
new file mode 100644
index 000000000000..2b40e04d3c49
--- /dev/null
+++ b/Documentation/module-signing.txt
@@ -0,0 +1,240 @@
1 ==============================
2 KERNEL MODULE SIGNING FACILITY
3 ==============================
4
5CONTENTS
6
7 - Overview.
8 - Configuring module signing.
9 - Generating signing keys.
10 - Public keys in the kernel.
11 - Manually signing modules.
12 - Signed modules and stripping.
13 - Loading signed modules.
14 - Non-valid signatures and unsigned modules.
15 - Administering/protecting the private key.
16
17
18========
19OVERVIEW
20========
21
22The kernel module signing facility cryptographically signs modules during
23installation and then checks the signature upon loading the module. This
24allows increased kernel security by disallowing the loading of unsigned modules
25or modules signed with an invalid key. Module signing increases security by
26making it harder to load a malicious module into the kernel. The module
27signature checking is done by the kernel so that it is not necessary to have
28trusted userspace bits.
29
30This facility uses X.509 ITU-T standard certificates to encode the public keys
31involved. The signatures are not themselves encoded in any industrial standard
32type. The facility currently only supports the RSA public key encryption
33standard (though it is pluggable and permits others to be used). The possible
34hash algorithms that can be used are SHA-1, SHA-224, SHA-256, SHA-384, and
35SHA-512 (the algorithm is selected by data in the signature).
36
37
38==========================
39CONFIGURING MODULE SIGNING
40==========================
41
42The module signing facility is enabled by going to the "Enable Loadable Module
43Support" section of the kernel configuration and turning on
44
45 CONFIG_MODULE_SIG "Module signature verification"
46
47This has a number of options available:
48
49 (1) "Require modules to be validly signed" (CONFIG_MODULE_SIG_FORCE)
50
51 This specifies how the kernel should deal with a module that has a
52 signature for which the key is not known or a module that is unsigned.
53
54 If this is off (ie. "permissive"), then modules for which the key is not
55 available and modules that are unsigned are permitted, but the kernel will
56 be marked as being tainted.
57
58 If this is on (ie. "restrictive"), only modules that have a valid
59 signature that can be verified by a public key in the kernel's possession
60 will be loaded. All other modules will generate an error.
61
62 Irrespective of the setting here, if the module has a signature block that
63 cannot be parsed, it will be rejected out of hand.
64
65
66 (2) "Automatically sign all modules" (CONFIG_MODULE_SIG_ALL)
67
68 If this is on then modules will be automatically signed during the
69 modules_install phase of a build. If this is off, then the modules must
70 be signed manually using:
71
72 scripts/sign-file
73
74
75 (3) "Which hash algorithm should modules be signed with?"
76
77 This presents a choice of which hash algorithm the installation phase will
78 sign the modules with:
79
80 CONFIG_SIG_SHA1 "Sign modules with SHA-1"
81 CONFIG_SIG_SHA224 "Sign modules with SHA-224"
82 CONFIG_SIG_SHA256 "Sign modules with SHA-256"
83 CONFIG_SIG_SHA384 "Sign modules with SHA-384"
84 CONFIG_SIG_SHA512 "Sign modules with SHA-512"
85
86 The algorithm selected here will also be built into the kernel (rather
87 than being a module) so that modules signed with that algorithm can have
88 their signatures checked without causing a dependency loop.
89
90
91=======================
92GENERATING SIGNING KEYS
93=======================
94
95Cryptographic keypairs are required to generate and check signatures. A
96private key is used to generate a signature and the corresponding public key is
97used to check it. The private key is only needed during the build, after which
98it can be deleted or stored securely. The public key gets built into the
99kernel so that it can be used to check the signatures as the modules are
100loaded.
101
102Under normal conditions, the kernel build will automatically generate a new
103keypair using openssl if one does not exist in the files:
104
105 signing_key.priv
106 signing_key.x509
107
108during the building of vmlinux (the public part of the key needs to be built
109into vmlinux) using parameters in the:
110
111 x509.genkey
112
113file (which is also generated if it does not already exist).
114
115It is strongly recommended that you provide your own x509.genkey file.
116
117Most notably, in the x509.genkey file, the req_distinguished_name section
118should be altered from the default:
119
120 [ req_distinguished_name ]
121 O = Magrathea
122 CN = Glacier signing key
123 emailAddress = slartibartfast@magrathea.h2g2
124
125The generated RSA key size can also be set with:
126
127 [ req ]
128 default_bits = 4096
129
130
131It is also possible to manually generate the key private/public files using the
132x509.genkey key generation configuration file in the root node of the Linux
133kernel sources tree and the openssl command. The following is an example to
134generate the public/private key files:
135
136 openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \
137 -config x509.genkey -outform DER -out signing_key.x509 \
138 -keyout signing_key.priv
139
140
141=========================
142PUBLIC KEYS IN THE KERNEL
143=========================
144
145The kernel contains a ring of public keys that can be viewed by root. They're
146in a keyring called ".system_keyring" that can be seen by:
147
148 [root@deneb ~]# cat /proc/keys
149 ...
150 223c7853 I------ 1 perm 1f030000 0 0 keyring .system_keyring: 1
151 302d2d52 I------ 1 perm 1f010000 0 0 asymmetri Fedora kernel signing key: d69a84e6bce3d216b979e9505b3e3ef9a7118079: X509.RSA a7118079 []
152 ...
153
154Beyond the public key generated specifically for module signing, any file
155placed in the kernel source root directory or the kernel build root directory
156whose name is suffixed with ".x509" will be assumed to be an X.509 public key
157and will be added to the keyring.
158
159Further, the architecture code may take public keys from a hardware store and
160add those in also (e.g. from the UEFI key database).
161
162Finally, it is possible to add additional public keys by doing:
163
164 keyctl padd asymmetric "" [.system_keyring-ID] <[key-file]
165
166e.g.:
167
168 keyctl padd asymmetric "" 0x223c7853 <my_public_key.x509
169
170Note, however, that the kernel will only permit keys to be added to
171.system_keyring _if_ the new key's X.509 wrapper is validly signed by a key
172that is already resident in the .system_keyring at the time the key was added.
173
174
175=========================
176MANUALLY SIGNING MODULES
177=========================
178
179To manually sign a module, use the scripts/sign-file tool available in
180the Linux kernel source tree. The script requires 4 arguments:
181
182 1. The hash algorithm (e.g., sha256)
183 2. The private key filename
184 3. The public key filename
185 4. The kernel module to be signed
186
187The following is an example to sign a kernel module:
188
189 scripts/sign-file sha512 kernel-signkey.priv \
190 kernel-signkey.x509 module.ko
191
192The hash algorithm used does not have to match the one configured, but if it
193doesn't, you should make sure that hash algorithm is either built into the
194kernel or can be loaded without requiring itself.
195
196
197============================
198SIGNED MODULES AND STRIPPING
199============================
200
201A signed module has a digital signature simply appended at the end. The string
202"~Module signature appended~." at the end of the module's file confirms that a
203signature is present but it does not confirm that the signature is valid!
204
205Signed modules are BRITTLE as the signature is outside of the defined ELF
206container. Thus they MAY NOT be stripped once the signature is computed and
207attached. Note the entire module is the signed payload, including any and all
208debug information present at the time of signing.
209
210
211======================
212LOADING SIGNED MODULES
213======================
214
215Modules are loaded with insmod, modprobe, init_module() or finit_module(),
216exactly as for unsigned modules as no processing is done in userspace. The
217signature checking is all done within the kernel.
218
219
220=========================================
221NON-VALID SIGNATURES AND UNSIGNED MODULES
222=========================================
223
224If CONFIG_MODULE_SIG_FORCE is enabled or enforcemodulesig=1 is supplied on
225the kernel command line, the kernel will only load validly signed modules
226for which it has a public key. Otherwise, it will also load modules that are
227unsigned. Any module for which the kernel has a key, but which proves to have
228a signature mismatch will not be permitted to load.
229
230Any module that has an unparseable signature will be rejected.
231
232
233=========================================
234ADMINISTERING/PROTECTING THE PRIVATE KEY
235=========================================
236
237Since the private key is used to sign modules, viruses and malware could use
238the private key to sign modules and compromise the operating system. The
239private key must be either destroyed or moved to a secure location and not kept
240in the root node of the kernel source tree.
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 3c12d9a7ed00..8a984e994e61 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -16,8 +16,12 @@ ip_default_ttl - INTEGER
16 Default: 64 (as recommended by RFC1700) 16 Default: 64 (as recommended by RFC1700)
17 17
18ip_no_pmtu_disc - BOOLEAN 18ip_no_pmtu_disc - BOOLEAN
19 Disable Path MTU Discovery. 19 Disable Path MTU Discovery. If enabled and a
20 default FALSE 20 fragmentation-required ICMP is received, the PMTU to this
21 destination will be set to min_pmtu (see below). You will need
22 to raise min_pmtu to the smallest interface MTU on your system
23 manually if you want to avoid locally generated fragments.
24 Default: FALSE
21 25
22min_pmtu - INTEGER 26min_pmtu - INTEGER
23 default 552 - minimum discovered Path MTU 27 default 552 - minimum discovered Path MTU
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index c01223628a87..8e48e3b14227 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -123,6 +123,16 @@ Transmission process is similar to capture as shown below.
123[shutdown] close() --------> destruction of the transmission socket and 123[shutdown] close() --------> destruction of the transmission socket and
124 deallocation of all associated resources. 124 deallocation of all associated resources.
125 125
126Socket creation and destruction is also straight forward, and is done
127the same way as in capturing described in the previous paragraph:
128
129 int fd = socket(PF_PACKET, mode, 0);
130
131The protocol can optionally be 0 in case we only want to transmit
132via this socket, which avoids an expensive call to packet_rcv().
133In this case, you also need to bind(2) the TX_RING with sll_protocol = 0
134set. Otherwise, htons(ETH_P_ALL) or any other protocol, for example.
135
126Binding the socket to your network interface is mandatory (with zero copy) to 136Binding the socket to your network interface is mandatory (with zero copy) to
127know the header size of frames used in the circular buffer. 137know the header size of frames used in the circular buffer.
128 138
diff --git a/MAINTAINERS b/MAINTAINERS
index 8285ed4676b6..31a046213274 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -783,7 +783,7 @@ F: arch/arm/boot/dts/sama*.dts
783F: arch/arm/boot/dts/sama*.dtsi 783F: arch/arm/boot/dts/sama*.dtsi
784 784
785ARM/CALXEDA HIGHBANK ARCHITECTURE 785ARM/CALXEDA HIGHBANK ARCHITECTURE
786M: Rob Herring <rob.herring@calxeda.com> 786M: Rob Herring <robh@kernel.org>
787L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 787L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
788S: Maintained 788S: Maintained
789F: arch/arm/mach-highbank/ 789F: arch/arm/mach-highbank/
@@ -893,20 +893,15 @@ F: arch/arm/include/asm/hardware/dec21285.h
893F: arch/arm/mach-footbridge/ 893F: arch/arm/mach-footbridge/
894 894
895ARM/FREESCALE IMX / MXC ARM ARCHITECTURE 895ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
896M: Shawn Guo <shawn.guo@linaro.org>
896M: Sascha Hauer <kernel@pengutronix.de> 897M: Sascha Hauer <kernel@pengutronix.de>
897L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 898L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
898S: Maintained 899S: Maintained
899T: git git://git.pengutronix.de/git/imx/linux-2.6.git 900T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
900F: arch/arm/mach-imx/ 901F: arch/arm/mach-imx/
902F: arch/arm/boot/dts/imx*
901F: arch/arm/configs/imx*_defconfig 903F: arch/arm/configs/imx*_defconfig
902 904
903ARM/FREESCALE IMX6
904M: Shawn Guo <shawn.guo@linaro.org>
905L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
906S: Maintained
907T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
908F: arch/arm/mach-imx/*imx6*
909
910ARM/FREESCALE MXS ARM ARCHITECTURE 905ARM/FREESCALE MXS ARM ARCHITECTURE
911M: Shawn Guo <shawn.guo@linaro.org> 906M: Shawn Guo <shawn.guo@linaro.org>
912L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 907L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1013,6 +1008,8 @@ M: Santosh Shilimkar <santosh.shilimkar@ti.com>
1013L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1008L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1014S: Maintained 1009S: Maintained
1015F: arch/arm/mach-keystone/ 1010F: arch/arm/mach-keystone/
1011F: drivers/clk/keystone/
1012T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
1016 1013
1017ARM/LOGICPD PXA270 MACHINE SUPPORT 1014ARM/LOGICPD PXA270 MACHINE SUPPORT
1018M: Lennert Buytenhek <kernel@wantstofly.org> 1015M: Lennert Buytenhek <kernel@wantstofly.org>
@@ -1371,6 +1368,9 @@ T: git git://git.xilinx.com/linux-xlnx.git
1371S: Supported 1368S: Supported
1372F: arch/arm/mach-zynq/ 1369F: arch/arm/mach-zynq/
1373F: drivers/cpuidle/cpuidle-zynq.c 1370F: drivers/cpuidle/cpuidle-zynq.c
1371N: zynq
1372N: xilinx
1373F: drivers/clocksource/cadence_ttc_timer.c
1374 1374
1375ARM SMMU DRIVER 1375ARM SMMU DRIVER
1376M: Will Deacon <will.deacon@arm.com> 1376M: Will Deacon <will.deacon@arm.com>
@@ -1934,7 +1934,8 @@ S: Maintained
1934F: drivers/gpio/gpio-bt8xx.c 1934F: drivers/gpio/gpio-bt8xx.c
1935 1935
1936BTRFS FILE SYSTEM 1936BTRFS FILE SYSTEM
1937M: Chris Mason <chris.mason@fusionio.com> 1937M: Chris Mason <clm@fb.com>
1938M: Josef Bacik <jbacik@fb.com>
1938L: linux-btrfs@vger.kernel.org 1939L: linux-btrfs@vger.kernel.org
1939W: http://btrfs.wiki.kernel.org/ 1940W: http://btrfs.wiki.kernel.org/
1940Q: http://patchwork.kernel.org/project/linux-btrfs/list/ 1941Q: http://patchwork.kernel.org/project/linux-btrfs/list/
@@ -2137,11 +2138,17 @@ S: Maintained
2137F: Documentation/zh_CN/ 2138F: Documentation/zh_CN/
2138 2139
2139CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER 2140CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
2140M: Alexander Shishkin <alexander.shishkin@linux.intel.com> 2141M: Peter Chen <Peter.Chen@freescale.com>
2142T: git://github.com/hzpeterchen/linux-usb.git
2141L: linux-usb@vger.kernel.org 2143L: linux-usb@vger.kernel.org
2142S: Maintained 2144S: Maintained
2143F: drivers/usb/chipidea/ 2145F: drivers/usb/chipidea/
2144 2146
2147CHROME HARDWARE PLATFORM SUPPORT
2148M: Olof Johansson <olof@lixom.net>
2149S: Maintained
2150F: drivers/platform/chrome/
2151
2145CISCO VIC ETHERNET NIC DRIVER 2152CISCO VIC ETHERNET NIC DRIVER
2146M: Christian Benvenuti <benve@cisco.com> 2153M: Christian Benvenuti <benve@cisco.com>
2147M: Sujith Sankar <ssujith@cisco.com> 2154M: Sujith Sankar <ssujith@cisco.com>
@@ -2821,8 +2828,10 @@ F: include/uapi/drm/
2821 2828
2822INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) 2829INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
2823M: Daniel Vetter <daniel.vetter@ffwll.ch> 2830M: Daniel Vetter <daniel.vetter@ffwll.ch>
2831M: Jani Nikula <jani.nikula@linux.intel.com>
2824L: intel-gfx@lists.freedesktop.org 2832L: intel-gfx@lists.freedesktop.org
2825L: dri-devel@lists.freedesktop.org 2833L: dri-devel@lists.freedesktop.org
2834Q: http://patchwork.freedesktop.org/project/intel-gfx/
2826T: git git://people.freedesktop.org/~danvet/drm-intel 2835T: git git://people.freedesktop.org/~danvet/drm-intel
2827S: Supported 2836S: Supported
2828F: drivers/gpu/drm/i915/ 2837F: drivers/gpu/drm/i915/
@@ -3759,9 +3768,11 @@ F: include/uapi/linux/gigaset_dev.h
3759 3768
3760GPIO SUBSYSTEM 3769GPIO SUBSYSTEM
3761M: Linus Walleij <linus.walleij@linaro.org> 3770M: Linus Walleij <linus.walleij@linaro.org>
3762S: Maintained 3771M: Alexandre Courbot <gnurou@gmail.com>
3763L: linux-gpio@vger.kernel.org 3772L: linux-gpio@vger.kernel.org
3764F: Documentation/gpio.txt 3773T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
3774S: Maintained
3775F: Documentation/gpio/
3765F: drivers/gpio/ 3776F: drivers/gpio/
3766F: include/linux/gpio* 3777F: include/linux/gpio*
3767F: include/asm-generic/gpio.h 3778F: include/asm-generic/gpio.h
@@ -3829,6 +3840,12 @@ T: git git://linuxtv.org/media_tree.git
3829S: Maintained 3840S: Maintained
3830F: drivers/media/usb/gspca/ 3841F: drivers/media/usb/gspca/
3831 3842
3843GUID PARTITION TABLE (GPT)
3844M: Davidlohr Bueso <davidlohr@hp.com>
3845L: linux-efi@vger.kernel.org
3846S: Maintained
3847F: block/partitions/efi.*
3848
3832STK1160 USB VIDEO CAPTURE DRIVER 3849STK1160 USB VIDEO CAPTURE DRIVER
3833M: Ezequiel Garcia <elezegarcia@gmail.com> 3850M: Ezequiel Garcia <elezegarcia@gmail.com>
3834L: linux-media@vger.kernel.org 3851L: linux-media@vger.kernel.org
@@ -4038,12 +4055,26 @@ W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi
4038S: Maintained 4055S: Maintained
4039F: fs/hpfs/ 4056F: fs/hpfs/
4040 4057
4058HSI SUBSYSTEM
4059M: Sebastian Reichel <sre@debian.org>
4060S: Maintained
4061F: Documentation/ABI/testing/sysfs-bus-hsi
4062F: drivers/hsi/
4063F: include/linux/hsi/
4064F: include/uapi/linux/hsi/
4065
4041HSO 3G MODEM DRIVER 4066HSO 3G MODEM DRIVER
4042M: Jan Dumon <j.dumon@option.com> 4067M: Jan Dumon <j.dumon@option.com>
4043W: http://www.pharscape.org 4068W: http://www.pharscape.org
4044S: Maintained 4069S: Maintained
4045F: drivers/net/usb/hso.c 4070F: drivers/net/usb/hso.c
4046 4071
4072HSR NETWORK PROTOCOL
4073M: Arvid Brodin <arvid.brodin@alten.se>
4074L: netdev@vger.kernel.org
4075S: Maintained
4076F: net/hsr/
4077
4047HTCPEN TOUCHSCREEN DRIVER 4078HTCPEN TOUCHSCREEN DRIVER
4048M: Pau Oliva Fora <pof@eslack.org> 4079M: Pau Oliva Fora <pof@eslack.org>
4049L: linux-input@vger.kernel.org 4080L: linux-input@vger.kernel.org
@@ -4450,10 +4481,8 @@ M: Bruce Allan <bruce.w.allan@intel.com>
4450M: Carolyn Wyborny <carolyn.wyborny@intel.com> 4481M: Carolyn Wyborny <carolyn.wyborny@intel.com>
4451M: Don Skidmore <donald.c.skidmore@intel.com> 4482M: Don Skidmore <donald.c.skidmore@intel.com>
4452M: Greg Rose <gregory.v.rose@intel.com> 4483M: Greg Rose <gregory.v.rose@intel.com>
4453M: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
4454M: Alex Duyck <alexander.h.duyck@intel.com> 4484M: Alex Duyck <alexander.h.duyck@intel.com>
4455M: John Ronciak <john.ronciak@intel.com> 4485M: John Ronciak <john.ronciak@intel.com>
4456M: Tushar Dave <tushar.n.dave@intel.com>
4457L: e1000-devel@lists.sourceforge.net 4486L: e1000-devel@lists.sourceforge.net
4458W: http://www.intel.com/support/feedback.htm 4487W: http://www.intel.com/support/feedback.htm
4459W: http://e1000.sourceforge.net/ 4488W: http://e1000.sourceforge.net/
@@ -5256,7 +5285,7 @@ S: Maintained
5256F: Documentation/lockdep*.txt 5285F: Documentation/lockdep*.txt
5257F: Documentation/lockstat.txt 5286F: Documentation/lockstat.txt
5258F: include/linux/lockdep.h 5287F: include/linux/lockdep.h
5259F: kernel/lockdep* 5288F: kernel/locking/
5260 5289
5261LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks) 5290LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks)
5262M: "Richard Russon (FlatCap)" <ldm@flatcap.org> 5291M: "Richard Russon (FlatCap)" <ldm@flatcap.org>
@@ -5897,12 +5926,21 @@ M: Steffen Klassert <steffen.klassert@secunet.com>
5897M: Herbert Xu <herbert@gondor.apana.org.au> 5926M: Herbert Xu <herbert@gondor.apana.org.au>
5898M: "David S. Miller" <davem@davemloft.net> 5927M: "David S. Miller" <davem@davemloft.net>
5899L: netdev@vger.kernel.org 5928L: netdev@vger.kernel.org
5900T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git 5929T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git
5930T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git
5901S: Maintained 5931S: Maintained
5902F: net/xfrm/ 5932F: net/xfrm/
5903F: net/key/ 5933F: net/key/
5904F: net/ipv4/xfrm* 5934F: net/ipv4/xfrm*
5935F: net/ipv4/esp4.c
5936F: net/ipv4/ah4.c
5937F: net/ipv4/ipcomp.c
5938F: net/ipv4/ip_vti.c
5905F: net/ipv6/xfrm* 5939F: net/ipv6/xfrm*
5940F: net/ipv6/esp6.c
5941F: net/ipv6/ah6.c
5942F: net/ipv6/ipcomp6.c
5943F: net/ipv6/ip6_vti.c
5906F: include/uapi/linux/xfrm.h 5944F: include/uapi/linux/xfrm.h
5907F: include/net/xfrm.h 5945F: include/net/xfrm.h
5908 5946
@@ -5968,10 +6006,10 @@ F: drivers/nfc/
5968F: include/linux/platform_data/pn544.h 6006F: include/linux/platform_data/pn544.h
5969 6007
5970NFS, SUNRPC, AND LOCKD CLIENTS 6008NFS, SUNRPC, AND LOCKD CLIENTS
5971M: Trond Myklebust <Trond.Myklebust@netapp.com> 6009M: Trond Myklebust <trond.myklebust@primarydata.com>
5972L: linux-nfs@vger.kernel.org 6010L: linux-nfs@vger.kernel.org
5973W: http://client.linux-nfs.org 6011W: http://client.linux-nfs.org
5974T: git git://git.linux-nfs.org/pub/linux/nfs-2.6.git 6012T: git git://git.linux-nfs.org/projects/trondmy/linux-nfs.git
5975S: Maintained 6013S: Maintained
5976F: fs/lockd/ 6014F: fs/lockd/
5977F: fs/nfs/ 6015F: fs/nfs/
@@ -6223,7 +6261,7 @@ F: drivers/i2c/busses/i2c-ocores.c
6223 6261
6224OPEN FIRMWARE AND FLATTENED DEVICE TREE 6262OPEN FIRMWARE AND FLATTENED DEVICE TREE
6225M: Grant Likely <grant.likely@linaro.org> 6263M: Grant Likely <grant.likely@linaro.org>
6226M: Rob Herring <rob.herring@calxeda.com> 6264M: Rob Herring <robh+dt@kernel.org>
6227L: devicetree@vger.kernel.org 6265L: devicetree@vger.kernel.org
6228W: http://fdt.secretlab.ca 6266W: http://fdt.secretlab.ca
6229T: git git://git.secretlab.ca/git/linux-2.6.git 6267T: git git://git.secretlab.ca/git/linux-2.6.git
@@ -6235,11 +6273,11 @@ K: of_get_property
6235K: of_match_table 6273K: of_match_table
6236 6274
6237OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS 6275OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
6238M: Rob Herring <rob.herring@calxeda.com> 6276M: Rob Herring <robh+dt@kernel.org>
6239M: Pawel Moll <pawel.moll@arm.com> 6277M: Pawel Moll <pawel.moll@arm.com>
6240M: Mark Rutland <mark.rutland@arm.com> 6278M: Mark Rutland <mark.rutland@arm.com>
6241M: Stephen Warren <swarren@wwwdotorg.org>
6242M: Ian Campbell <ijc+devicetree@hellion.org.uk> 6279M: Ian Campbell <ijc+devicetree@hellion.org.uk>
6280M: Kumar Gala <galak@codeaurora.org>
6243L: devicetree@vger.kernel.org 6281L: devicetree@vger.kernel.org
6244S: Maintained 6282S: Maintained
6245F: Documentation/devicetree/ 6283F: Documentation/devicetree/
@@ -6449,19 +6487,52 @@ F: drivers/pci/
6449F: include/linux/pci* 6487F: include/linux/pci*
6450F: arch/x86/pci/ 6488F: arch/x86/pci/
6451 6489
6490PCI DRIVER FOR IMX6
6491M: Richard Zhu <r65037@freescale.com>
6492M: Shawn Guo <shawn.guo@linaro.org>
6493L: linux-pci@vger.kernel.org
6494L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6495S: Maintained
6496F: drivers/pci/host/*imx6*
6497
6498PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
6499M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
6500M: Jason Cooper <jason@lakedaemon.net>
6501L: linux-pci@vger.kernel.org
6502L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6503S: Maintained
6504F: drivers/pci/host/*mvebu*
6505
6452PCI DRIVER FOR NVIDIA TEGRA 6506PCI DRIVER FOR NVIDIA TEGRA
6453M: Thierry Reding <thierry.reding@gmail.com> 6507M: Thierry Reding <thierry.reding@gmail.com>
6454L: linux-tegra@vger.kernel.org 6508L: linux-tegra@vger.kernel.org
6509L: linux-pci@vger.kernel.org
6455S: Supported 6510S: Supported
6456F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt 6511F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
6457F: drivers/pci/host/pci-tegra.c 6512F: drivers/pci/host/pci-tegra.c
6458 6513
6514PCI DRIVER FOR RENESAS R-CAR
6515M: Simon Horman <horms@verge.net.au>
6516L: linux-pci@vger.kernel.org
6517L: linux-sh@vger.kernel.org
6518S: Maintained
6519F: drivers/pci/host/*rcar*
6520
6459PCI DRIVER FOR SAMSUNG EXYNOS 6521PCI DRIVER FOR SAMSUNG EXYNOS
6460M: Jingoo Han <jg1.han@samsung.com> 6522M: Jingoo Han <jg1.han@samsung.com>
6461L: linux-pci@vger.kernel.org 6523L: linux-pci@vger.kernel.org
6524L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6525L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
6462S: Maintained 6526S: Maintained
6463F: drivers/pci/host/pci-exynos.c 6527F: drivers/pci/host/pci-exynos.c
6464 6528
6529PCI DRIVER FOR SYNOPSIS DESIGNWARE
6530M: Mohit Kumar <mohit.kumar@st.com>
6531M: Jingoo Han <jg1.han@samsung.com>
6532L: linux-pci@vger.kernel.org
6533S: Maintained
6534F: drivers/pci/host/*designware*
6535
6465PCMCIA SUBSYSTEM 6536PCMCIA SUBSYSTEM
6466P: Linux PCMCIA Team 6537P: Linux PCMCIA Team
6467L: linux-pcmcia@lists.infradead.org 6538L: linux-pcmcia@lists.infradead.org
@@ -7380,7 +7451,6 @@ S: Maintained
7380F: kernel/sched/ 7451F: kernel/sched/
7381F: include/linux/sched.h 7452F: include/linux/sched.h
7382F: include/uapi/linux/sched.h 7453F: include/uapi/linux/sched.h
7383F: kernel/wait.c
7384F: include/linux/wait.h 7454F: include/linux/wait.h
7385 7455
7386SCORE ARCHITECTURE 7456SCORE ARCHITECTURE
@@ -9525,7 +9595,7 @@ F: drivers/xen/*swiotlb*
9525 9595
9526XFS FILESYSTEM 9596XFS FILESYSTEM
9527P: Silicon Graphics Inc 9597P: Silicon Graphics Inc
9528M: Dave Chinner <dchinner@fromorbit.com> 9598M: Dave Chinner <david@fromorbit.com>
9529M: Ben Myers <bpm@sgi.com> 9599M: Ben Myers <bpm@sgi.com>
9530M: xfs@oss.sgi.com 9600M: xfs@oss.sgi.com
9531L: xfs@oss.sgi.com 9601L: xfs@oss.sgi.com
diff --git a/Makefile b/Makefile
index c0c2d58e3998..eeec740776f3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 13 2PATCHLEVEL = 13
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc8
5NAME = One Giant Leap for Frogkind 5NAME = One Giant Leap for Frogkind
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -732,19 +732,15 @@ export mod_strip_cmd
732# Select initial ramdisk compression format, default is gzip(1). 732# Select initial ramdisk compression format, default is gzip(1).
733# This shall be used by the dracut(8) tool while creating an initramfs image. 733# This shall be used by the dracut(8) tool while creating an initramfs image.
734# 734#
735INITRD_COMPRESS=gzip 735INITRD_COMPRESS-y := gzip
736ifeq ($(CONFIG_RD_BZIP2), y) 736INITRD_COMPRESS-$(CONFIG_RD_BZIP2) := bzip2
737 INITRD_COMPRESS=bzip2 737INITRD_COMPRESS-$(CONFIG_RD_LZMA) := lzma
738else ifeq ($(CONFIG_RD_LZMA), y) 738INITRD_COMPRESS-$(CONFIG_RD_XZ) := xz
739 INITRD_COMPRESS=lzma 739INITRD_COMPRESS-$(CONFIG_RD_LZO) := lzo
740else ifeq ($(CONFIG_RD_XZ), y) 740INITRD_COMPRESS-$(CONFIG_RD_LZ4) := lz4
741 INITRD_COMPRESS=xz 741# do not export INITRD_COMPRESS, since we didn't actually
742else ifeq ($(CONFIG_RD_LZO), y) 742# choose a sane default compression above.
743 INITRD_COMPRESS=lzo 743# export INITRD_COMPRESS := $(INITRD_COMPRESS-y)
744else ifeq ($(CONFIG_RD_LZ4), y)
745 INITRD_COMPRESS=lz4
746endif
747export INITRD_COMPRESS
748 744
749ifdef CONFIG_MODULE_SIG_ALL 745ifdef CONFIG_MODULE_SIG_ALL
750MODSECKEY = ./signing_key.priv 746MODSECKEY = ./signing_key.priv
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 2ee0c9bfd032..9063ae6553cc 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -8,6 +8,7 @@
8 8
9config ARC 9config ARC
10 def_bool y 10 def_bool y
11 select BUILDTIME_EXTABLE_SORT
11 select CLONE_BACKWARDS 12 select CLONE_BACKWARDS
12 # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev 13 # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev
13 select DEVTMPFS if !INITRAMFS_SOURCE="" 14 select DEVTMPFS if !INITRAMFS_SOURCE=""
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index 6f30484f34b7..39e58d1cdf90 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -8,6 +8,13 @@
8 8
9/******** no-legacy-syscalls-ABI *******/ 9/******** no-legacy-syscalls-ABI *******/
10 10
11/*
12 * Non-typical guard macro to enable inclusion twice in ARCH sys.c
13 * That is how the Generic syscall wrapper generator works
14 */
15#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
16#define _UAPI_ASM_ARC_UNISTD_H
17
11#define __ARCH_WANT_SYS_EXECVE 18#define __ARCH_WANT_SYS_EXECVE
12#define __ARCH_WANT_SYS_CLONE 19#define __ARCH_WANT_SYS_CLONE
13#define __ARCH_WANT_SYS_VFORK 20#define __ARCH_WANT_SYS_VFORK
@@ -32,3 +39,7 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls)
32/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */ 39/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
33#define __NR_sysfs (__NR_arch_specific_syscall + 3) 40#define __NR_sysfs (__NR_arch_specific_syscall + 3)
34__SYSCALL(__NR_sysfs, sys_sysfs) 41__SYSCALL(__NR_sysfs, sys_sysfs)
42
43#undef __SYSCALL
44
45#endif
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index e46d81f70979..63177e4cb66d 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -79,9 +79,9 @@ static int arc_pmu_cache_event(u64 config)
79 cache_result = (config >> 16) & 0xff; 79 cache_result = (config >> 16) & 0xff;
80 if (cache_type >= PERF_COUNT_HW_CACHE_MAX) 80 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
81 return -EINVAL; 81 return -EINVAL;
82 if (cache_type >= PERF_COUNT_HW_CACHE_OP_MAX) 82 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
83 return -EINVAL; 83 return -EINVAL;
84 if (cache_type >= PERF_COUNT_HW_CACHE_RESULT_MAX) 84 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
85 return -EINVAL; 85 return -EINVAL;
86 86
87 ret = arc_pmu_cache_map[cache_type][cache_op][cache_result]; 87 ret = arc_pmu_cache_map[cache_type][cache_op][cache_result];
diff --git a/arch/arm/boot/dts/am335x-base0033.dts b/arch/arm/boot/dts/am335x-base0033.dts
index b4f95c2bbf74..72a9b3fc4251 100644
--- a/arch/arm/boot/dts/am335x-base0033.dts
+++ b/arch/arm/boot/dts/am335x-base0033.dts
@@ -13,4 +13,83 @@
13/ { 13/ {
14 model = "IGEP COM AM335x on AQUILA Expansion"; 14 model = "IGEP COM AM335x on AQUILA Expansion";
15 compatible = "isee,am335x-base0033", "isee,am335x-igep0033", "ti,am33xx"; 15 compatible = "isee,am335x-base0033", "isee,am335x-igep0033", "ti,am33xx";
16
17 hdmi {
18 compatible = "ti,tilcdc,slave";
19 i2c = <&i2c0>;
20 pinctrl-names = "default", "off";
21 pinctrl-0 = <&nxp_hdmi_pins>;
22 pinctrl-1 = <&nxp_hdmi_off_pins>;
23 status = "okay";
24 };
25
26 leds_base {
27 pinctrl-names = "default";
28 pinctrl-0 = <&leds_base_pins>;
29
30 compatible = "gpio-leds";
31
32 led@0 {
33 label = "base:red:user";
34 gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>; /* gpio1_21 */
35 default-state = "off";
36 };
37
38 led@1 {
39 label = "base:green:user";
40 gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>; /* gpio2_0 */
41 default-state = "off";
42 };
43 };
44};
45
46&am33xx_pinmux {
47 nxp_hdmi_pins: pinmux_nxp_hdmi_pins {
48 pinctrl-single,pins = <
49 0x1b0 (PIN_OUTPUT | MUX_MODE3) /* xdma_event_intr0.clkout1 */
50 0xa0 (PIN_OUTPUT | MUX_MODE0) /* lcd_data0 */
51 0xa4 (PIN_OUTPUT | MUX_MODE0) /* lcd_data1 */
52 0xa8 (PIN_OUTPUT | MUX_MODE0) /* lcd_data2 */
53 0xac (PIN_OUTPUT | MUX_MODE0) /* lcd_data3 */
54 0xb0 (PIN_OUTPUT | MUX_MODE0) /* lcd_data4 */
55 0xb4 (PIN_OUTPUT | MUX_MODE0) /* lcd_data5 */
56 0xb8 (PIN_OUTPUT | MUX_MODE0) /* lcd_data6 */
57 0xbc (PIN_OUTPUT | MUX_MODE0) /* lcd_data7 */
58 0xc0 (PIN_OUTPUT | MUX_MODE0) /* lcd_data8 */
59 0xc4 (PIN_OUTPUT | MUX_MODE0) /* lcd_data9 */
60 0xc8 (PIN_OUTPUT | MUX_MODE0) /* lcd_data10 */
61 0xcc (PIN_OUTPUT | MUX_MODE0) /* lcd_data11 */
62 0xd0 (PIN_OUTPUT | MUX_MODE0) /* lcd_data12 */
63 0xd4 (PIN_OUTPUT | MUX_MODE0) /* lcd_data13 */
64 0xd8 (PIN_OUTPUT | MUX_MODE0) /* lcd_data14 */
65 0xdc (PIN_OUTPUT | MUX_MODE0) /* lcd_data15 */
66 0xe0 (PIN_OUTPUT | MUX_MODE0) /* lcd_vsync */
67 0xe4 (PIN_OUTPUT | MUX_MODE0) /* lcd_hsync */
68 0xe8 (PIN_OUTPUT | MUX_MODE0) /* lcd_pclk */
69 0xec (PIN_OUTPUT | MUX_MODE0) /* lcd_ac_bias_en */
70 >;
71 };
72 nxp_hdmi_off_pins: pinmux_nxp_hdmi_off_pins {
73 pinctrl-single,pins = <
74 0x1b0 (PIN_OUTPUT | MUX_MODE3) /* xdma_event_intr0.clkout1 */
75 >;
76 };
77
78 leds_base_pins: pinmux_leds_base_pins {
79 pinctrl-single,pins = <
80 0x54 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a5.gpio1_21 */
81 0x88 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_csn3.gpio2_0 */
82 >;
83 };
84};
85
86&lcdc {
87 status = "okay";
88};
89
90&i2c0 {
91 eeprom: eeprom@50 {
92 compatible = "at,24c256";
93 reg = <0x50>;
94 };
16}; 95};
diff --git a/arch/arm/boot/dts/am335x-igep0033.dtsi b/arch/arm/boot/dts/am335x-igep0033.dtsi
index 619624479311..7063311a58d9 100644
--- a/arch/arm/boot/dts/am335x-igep0033.dtsi
+++ b/arch/arm/boot/dts/am335x-igep0033.dtsi
@@ -199,6 +199,35 @@
199 pinctrl-0 = <&uart0_pins>; 199 pinctrl-0 = <&uart0_pins>;
200}; 200};
201 201
202&usb {
203 status = "okay";
204
205 control@44e10000 {
206 status = "okay";
207 };
208
209 usb-phy@47401300 {
210 status = "okay";
211 };
212
213 usb-phy@47401b00 {
214 status = "okay";
215 };
216
217 usb@47401000 {
218 status = "okay";
219 };
220
221 usb@47401800 {
222 status = "okay";
223 dr_mode = "host";
224 };
225
226 dma-controller@07402000 {
227 status = "okay";
228 };
229};
230
202#include "tps65910.dtsi" 231#include "tps65910.dtsi"
203 232
204&tps { 233&tps {
diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
index e99dfaf70052..03fcbf0a88a8 100644
--- a/arch/arm/boot/dts/am3517-evm.dts
+++ b/arch/arm/boot/dts/am3517-evm.dts
@@ -7,11 +7,11 @@
7 */ 7 */
8/dts-v1/; 8/dts-v1/;
9 9
10#include "omap34xx.dtsi" 10#include "am3517.dtsi"
11 11
12/ { 12/ {
13 model = "TI AM3517 EVM (AM3517/05)"; 13 model = "TI AM3517 EVM (AM3517/05 TMDSEVM3517)";
14 compatible = "ti,am3517-evm", "ti,omap3"; 14 compatible = "ti,am3517-evm", "ti,am3517", "ti,omap3";
15 15
16 memory { 16 memory {
17 device_type = "memory"; 17 device_type = "memory";
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi
new file mode 100644
index 000000000000..2fbe02faa8b1
--- /dev/null
+++ b/arch/arm/boot/dts/am3517.dtsi
@@ -0,0 +1,63 @@
1/*
2 * Device Tree Source for am3517 SoC
3 *
4 * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 */
10
11#include "omap3.dtsi"
12
13/ {
14 aliases {
15 serial3 = &uart4;
16 };
17
18 ocp {
19 am35x_otg_hs: am35x_otg_hs@5c040000 {
20 compatible = "ti,omap3-musb";
21 ti,hwmods = "am35x_otg_hs";
22 status = "disabled";
23 reg = <0x5c040000 0x1000>;
24 interrupts = <71>;
25 interrupt-names = "mc";
26 };
27
28 davinci_emac: ethernet@0x5c000000 {
29 compatible = "ti,am3517-emac";
30 ti,hwmods = "davinci_emac";
31 status = "disabled";
32 reg = <0x5c000000 0x30000>;
33 interrupts = <67 68 69 70>;
34 ti,davinci-ctrl-reg-offset = <0x10000>;
35 ti,davinci-ctrl-mod-reg-offset = <0>;
36 ti,davinci-ctrl-ram-offset = <0x20000>;
37 ti,davinci-ctrl-ram-size = <0x2000>;
38 ti,davinci-rmii-en = /bits/ 8 <1>;
39 local-mac-address = [ 00 00 00 00 00 00 ];
40 };
41
42 davinci_mdio: ethernet@0x5c030000 {
43 compatible = "ti,davinci_mdio";
44 ti,hwmods = "davinci_mdio";
45 status = "disabled";
46 reg = <0x5c030000 0x1000>;
47 bus_freq = <1000000>;
48 #address-cells = <1>;
49 #size-cells = <0>;
50 };
51
52 uart4: serial@4809e000 {
53 compatible = "ti,omap3-uart";
54 ti,hwmods = "uart4";
55 status = "disabled";
56 reg = <0x4809e000 0x400>;
57 interrupts = <84>;
58 dmas = <&sdma 55 &sdma 54>;
59 dma-names = "tx", "rx";
60 clock-frequency = <48000000>;
61 };
62 };
63};
diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts
index 90ce29dbe119..08a56bcfc724 100644
--- a/arch/arm/boot/dts/armada-370-db.dts
+++ b/arch/arm/boot/dts/armada-370-db.dts
@@ -99,22 +99,22 @@
99 spi-max-frequency = <50000000>; 99 spi-max-frequency = <50000000>;
100 }; 100 };
101 }; 101 };
102 };
102 103
103 pcie-controller { 104 pcie-controller {
105 status = "okay";
106 /*
107 * The two PCIe units are accessible through
108 * both standard PCIe slots and mini-PCIe
109 * slots on the board.
110 */
111 pcie@1,0 {
112 /* Port 0, Lane 0 */
113 status = "okay";
114 };
115 pcie@2,0 {
116 /* Port 1, Lane 0 */
104 status = "okay"; 117 status = "okay";
105 /*
106 * The two PCIe units are accessible through
107 * both standard PCIe slots and mini-PCIe
108 * slots on the board.
109 */
110 pcie@1,0 {
111 /* Port 0, Lane 0 */
112 status = "okay";
113 };
114 pcie@2,0 {
115 /* Port 1, Lane 0 */
116 status = "okay";
117 };
118 }; 118 };
119 }; 119 };
120 }; 120 };
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
index 00d6a798c705..7f10f627ae5b 100644
--- a/arch/arm/boot/dts/armada-370-xp.dtsi
+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
@@ -118,7 +118,7 @@
118 118
119 coherency-fabric@20200 { 119 coherency-fabric@20200 {
120 compatible = "marvell,coherency-fabric"; 120 compatible = "marvell,coherency-fabric";
121 reg = <0x20200 0xb0>, <0x21810 0x1c>; 121 reg = <0x20200 0xb0>, <0x21010 0x1c>;
122 }; 122 };
123 123
124 serial@12000 { 124 serial@12000 {
diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
index 3f5e6121c730..98335fb34b7a 100644
--- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
@@ -47,7 +47,7 @@
47 /* 47 /*
48 * MV78230 has 2 PCIe units Gen2.0: One unit can be 48 * MV78230 has 2 PCIe units Gen2.0: One unit can be
49 * configured as x4 or quad x1 lanes. One unit is 49 * configured as x4 or quad x1 lanes. One unit is
50 * x4/x1. 50 * x1 only.
51 */ 51 */
52 pcie-controller { 52 pcie-controller {
53 compatible = "marvell,armada-xp-pcie"; 53 compatible = "marvell,armada-xp-pcie";
@@ -62,10 +62,10 @@
62 62
63 ranges = 63 ranges =
64 <0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000 /* Port 0.0 registers */ 64 <0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000 /* Port 0.0 registers */
65 0x82000000 0 0x42000 MBUS_ID(0xf0, 0x01) 0x42000 0 0x00002000 /* Port 2.0 registers */
66 0x82000000 0 0x44000 MBUS_ID(0xf0, 0x01) 0x44000 0 0x00002000 /* Port 0.1 registers */ 65 0x82000000 0 0x44000 MBUS_ID(0xf0, 0x01) 0x44000 0 0x00002000 /* Port 0.1 registers */
67 0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000 /* Port 0.2 registers */ 66 0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000 /* Port 0.2 registers */
68 0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000 /* Port 0.3 registers */ 67 0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000 /* Port 0.3 registers */
68 0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000 /* Port 1.0 registers */
69 0x82000000 0x1 0 MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */ 69 0x82000000 0x1 0 MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */
70 0x81000000 0x1 0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO */ 70 0x81000000 0x1 0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO */
71 0x82000000 0x2 0 MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */ 71 0x82000000 0x2 0 MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */
@@ -74,8 +74,8 @@
74 0x81000000 0x3 0 MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO */ 74 0x81000000 0x3 0 MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO */
75 0x82000000 0x4 0 MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */ 75 0x82000000 0x4 0 MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */
76 0x81000000 0x4 0 MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO */ 76 0x81000000 0x4 0 MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO */
77 0x82000000 0x9 0 MBUS_ID(0x04, 0xf8) 0 1 0 /* Port 2.0 MEM */ 77 0x82000000 0x5 0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */
78 0x81000000 0x9 0 MBUS_ID(0x04, 0xf0) 0 1 0 /* Port 2.0 IO */>; 78 0x81000000 0x5 0 MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO */>;
79 79
80 pcie@1,0 { 80 pcie@1,0 {
81 device_type = "pci"; 81 device_type = "pci";
@@ -145,20 +145,20 @@
145 status = "disabled"; 145 status = "disabled";
146 }; 146 };
147 147
148 pcie@9,0 { 148 pcie@5,0 {
149 device_type = "pci"; 149 device_type = "pci";
150 assigned-addresses = <0x82000800 0 0x42000 0 0x2000>; 150 assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
151 reg = <0x4800 0 0 0 0>; 151 reg = <0x2800 0 0 0 0>;
152 #address-cells = <3>; 152 #address-cells = <3>;
153 #size-cells = <2>; 153 #size-cells = <2>;
154 #interrupt-cells = <1>; 154 #interrupt-cells = <1>;
155 ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0 155 ranges = <0x82000000 0 0 0x82000000 0x5 0 1 0
156 0x81000000 0 0 0x81000000 0x9 0 1 0>; 156 0x81000000 0 0 0x81000000 0x5 0 1 0>;
157 interrupt-map-mask = <0 0 0 0>; 157 interrupt-map-mask = <0 0 0 0>;
158 interrupt-map = <0 0 0 0 &mpic 99>; 158 interrupt-map = <0 0 0 0 &mpic 62>;
159 marvell,pcie-port = <2>; 159 marvell,pcie-port = <1>;
160 marvell,pcie-lane = <0>; 160 marvell,pcie-lane = <0>;
161 clocks = <&gateclk 26>; 161 clocks = <&gateclk 9>;
162 status = "disabled"; 162 status = "disabled";
163 }; 163 };
164 }; 164 };
diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
index 3e9fd1353f89..66609684d41b 100644
--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
@@ -48,7 +48,7 @@
48 /* 48 /*
49 * MV78260 has 3 PCIe units Gen2.0: Two units can be 49 * MV78260 has 3 PCIe units Gen2.0: Two units can be
50 * configured as x4 or quad x1 lanes. One unit is 50 * configured as x4 or quad x1 lanes. One unit is
51 * x4/x1. 51 * x4 only.
52 */ 52 */
53 pcie-controller { 53 pcie-controller {
54 compatible = "marvell,armada-xp-pcie"; 54 compatible = "marvell,armada-xp-pcie";
@@ -68,7 +68,9 @@
68 0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000 /* Port 0.2 registers */ 68 0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000 /* Port 0.2 registers */
69 0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000 /* Port 0.3 registers */ 69 0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000 /* Port 0.3 registers */
70 0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000 /* Port 1.0 registers */ 70 0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000 /* Port 1.0 registers */
71 0x82000000 0 0x82000 MBUS_ID(0xf0, 0x01) 0x82000 0 0x00002000 /* Port 3.0 registers */ 71 0x82000000 0 0x84000 MBUS_ID(0xf0, 0x01) 0x84000 0 0x00002000 /* Port 1.1 registers */
72 0x82000000 0 0x88000 MBUS_ID(0xf0, 0x01) 0x88000 0 0x00002000 /* Port 1.2 registers */
73 0x82000000 0 0x8c000 MBUS_ID(0xf0, 0x01) 0x8c000 0 0x00002000 /* Port 1.3 registers */
72 0x82000000 0x1 0 MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */ 74 0x82000000 0x1 0 MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */
73 0x81000000 0x1 0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO */ 75 0x81000000 0x1 0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO */
74 0x82000000 0x2 0 MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */ 76 0x82000000 0x2 0 MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */
@@ -77,10 +79,18 @@
77 0x81000000 0x3 0 MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO */ 79 0x81000000 0x3 0 MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO */
78 0x82000000 0x4 0 MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */ 80 0x82000000 0x4 0 MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */
79 0x81000000 0x4 0 MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO */ 81 0x81000000 0x4 0 MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO */
80 0x82000000 0x9 0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */ 82
81 0x81000000 0x9 0 MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO */ 83 0x82000000 0x5 0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */
82 0x82000000 0xa 0 MBUS_ID(0x08, 0xf8) 0 1 0 /* Port 3.0 MEM */ 84 0x81000000 0x5 0 MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO */
83 0x81000000 0xa 0 MBUS_ID(0x08, 0xf0) 0 1 0 /* Port 3.0 IO */>; 85 0x82000000 0x6 0 MBUS_ID(0x08, 0xd8) 0 1 0 /* Port 1.1 MEM */
86 0x81000000 0x6 0 MBUS_ID(0x08, 0xd0) 0 1 0 /* Port 1.1 IO */
87 0x82000000 0x7 0 MBUS_ID(0x08, 0xb8) 0 1 0 /* Port 1.2 MEM */
88 0x81000000 0x7 0 MBUS_ID(0x08, 0xb0) 0 1 0 /* Port 1.2 IO */
89 0x82000000 0x8 0 MBUS_ID(0x08, 0x78) 0 1 0 /* Port 1.3 MEM */
90 0x81000000 0x8 0 MBUS_ID(0x08, 0x70) 0 1 0 /* Port 1.3 IO */
91
92 0x82000000 0x9 0 MBUS_ID(0x04, 0xf8) 0 1 0 /* Port 2.0 MEM */
93 0x81000000 0x9 0 MBUS_ID(0x04, 0xf0) 0 1 0 /* Port 2.0 IO */>;
84 94
85 pcie@1,0 { 95 pcie@1,0 {
86 device_type = "pci"; 96 device_type = "pci";
@@ -106,8 +116,8 @@
106 #address-cells = <3>; 116 #address-cells = <3>;
107 #size-cells = <2>; 117 #size-cells = <2>;
108 #interrupt-cells = <1>; 118 #interrupt-cells = <1>;
109 ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0 119 ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0
110 0x81000000 0 0 0x81000000 0x2 0 1 0>; 120 0x81000000 0 0 0x81000000 0x2 0 1 0>;
111 interrupt-map-mask = <0 0 0 0>; 121 interrupt-map-mask = <0 0 0 0>;
112 interrupt-map = <0 0 0 0 &mpic 59>; 122 interrupt-map = <0 0 0 0 &mpic 59>;
113 marvell,pcie-port = <0>; 123 marvell,pcie-port = <0>;
@@ -150,37 +160,88 @@
150 status = "disabled"; 160 status = "disabled";
151 }; 161 };
152 162
153 pcie@9,0 { 163 pcie@5,0 {
154 device_type = "pci"; 164 device_type = "pci";
155 assigned-addresses = <0x82000800 0 0x42000 0 0x2000>; 165 assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
156 reg = <0x4800 0 0 0 0>; 166 reg = <0x2800 0 0 0 0>;
157 #address-cells = <3>; 167 #address-cells = <3>;
158 #size-cells = <2>; 168 #size-cells = <2>;
159 #interrupt-cells = <1>; 169 #interrupt-cells = <1>;
160 ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0 170 ranges = <0x82000000 0 0 0x82000000 0x5 0 1 0
161 0x81000000 0 0 0x81000000 0x9 0 1 0>; 171 0x81000000 0 0 0x81000000 0x5 0 1 0>;
162 interrupt-map-mask = <0 0 0 0>; 172 interrupt-map-mask = <0 0 0 0>;
163 interrupt-map = <0 0 0 0 &mpic 99>; 173 interrupt-map = <0 0 0 0 &mpic 62>;
164 marvell,pcie-port = <2>; 174 marvell,pcie-port = <1>;
165 marvell,pcie-lane = <0>; 175 marvell,pcie-lane = <0>;
166 clocks = <&gateclk 26>; 176 clocks = <&gateclk 9>;
167 status = "disabled"; 177 status = "disabled";
168 }; 178 };
169 179
170 pcie@10,0 { 180 pcie@6,0 {
171 device_type = "pci"; 181 device_type = "pci";
172 assigned-addresses = <0x82000800 0 0x82000 0 0x2000>; 182 assigned-addresses = <0x82000800 0 0x84000 0 0x2000>;
173 reg = <0x5000 0 0 0 0>; 183 reg = <0x3000 0 0 0 0>;
174 #address-cells = <3>; 184 #address-cells = <3>;
175 #size-cells = <2>; 185 #size-cells = <2>;
176 #interrupt-cells = <1>; 186 #interrupt-cells = <1>;
177 ranges = <0x82000000 0 0 0x82000000 0xa 0 1 0 187 ranges = <0x82000000 0 0 0x82000000 0x6 0 1 0
178 0x81000000 0 0 0x81000000 0xa 0 1 0>; 188 0x81000000 0 0 0x81000000 0x6 0 1 0>;
179 interrupt-map-mask = <0 0 0 0>; 189 interrupt-map-mask = <0 0 0 0>;
180 interrupt-map = <0 0 0 0 &mpic 103>; 190 interrupt-map = <0 0 0 0 &mpic 63>;
181 marvell,pcie-port = <3>; 191 marvell,pcie-port = <1>;
192 marvell,pcie-lane = <1>;
193 clocks = <&gateclk 10>;
194 status = "disabled";
195 };
196
197 pcie@7,0 {
198 device_type = "pci";
199 assigned-addresses = <0x82000800 0 0x88000 0 0x2000>;
200 reg = <0x3800 0 0 0 0>;
201 #address-cells = <3>;
202 #size-cells = <2>;
203 #interrupt-cells = <1>;
204 ranges = <0x82000000 0 0 0x82000000 0x7 0 1 0
205 0x81000000 0 0 0x81000000 0x7 0 1 0>;
206 interrupt-map-mask = <0 0 0 0>;
207 interrupt-map = <0 0 0 0 &mpic 64>;
208 marvell,pcie-port = <1>;
209 marvell,pcie-lane = <2>;
210 clocks = <&gateclk 11>;
211 status = "disabled";
212 };
213
214 pcie@8,0 {
215 device_type = "pci";
216 assigned-addresses = <0x82000800 0 0x8c000 0 0x2000>;
217 reg = <0x4000 0 0 0 0>;
218 #address-cells = <3>;
219 #size-cells = <2>;
220 #interrupt-cells = <1>;
221 ranges = <0x82000000 0 0 0x82000000 0x8 0 1 0
222 0x81000000 0 0 0x81000000 0x8 0 1 0>;
223 interrupt-map-mask = <0 0 0 0>;
224 interrupt-map = <0 0 0 0 &mpic 65>;
225 marvell,pcie-port = <1>;
226 marvell,pcie-lane = <3>;
227 clocks = <&gateclk 12>;
228 status = "disabled";
229 };
230
231 pcie@9,0 {
232 device_type = "pci";
233 assigned-addresses = <0x82000800 0 0x42000 0 0x2000>;
234 reg = <0x4800 0 0 0 0>;
235 #address-cells = <3>;
236 #size-cells = <2>;
237 #interrupt-cells = <1>;
238 ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0
239 0x81000000 0 0 0x81000000 0x9 0 1 0>;
240 interrupt-map-mask = <0 0 0 0>;
241 interrupt-map = <0 0 0 0 &mpic 99>;
242 marvell,pcie-port = <2>;
182 marvell,pcie-lane = <0>; 243 marvell,pcie-lane = <0>;
183 clocks = <&gateclk 27>; 244 clocks = <&gateclk 26>;
184 status = "disabled"; 245 status = "disabled";
185 }; 246 };
186 }; 247 };
diff --git a/arch/arm/boot/dts/at91sam9x5_usart3.dtsi b/arch/arm/boot/dts/at91sam9x5_usart3.dtsi
index 2347e9563cef..6801106fa1f8 100644
--- a/arch/arm/boot/dts/at91sam9x5_usart3.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5_usart3.dtsi
@@ -11,6 +11,10 @@
11#include <dt-bindings/interrupt-controller/irq.h> 11#include <dt-bindings/interrupt-controller/irq.h>
12 12
13/ { 13/ {
14 aliases {
15 serial4 = &usart3;
16 };
17
14 ahb { 18 ahb {
15 apb { 19 apb {
16 pinctrl@fffff400 { 20 pinctrl@fffff400 {
diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi
index 1e12aeff403b..aa537ed13f0a 100644
--- a/arch/arm/boot/dts/bcm2835.dtsi
+++ b/arch/arm/boot/dts/bcm2835.dtsi
@@ -85,6 +85,8 @@
85 reg = <0x7e205000 0x1000>; 85 reg = <0x7e205000 0x1000>;
86 interrupts = <2 21>; 86 interrupts = <2 21>;
87 clocks = <&clk_i2c>; 87 clocks = <&clk_i2c>;
88 #address-cells = <1>;
89 #size-cells = <0>;
88 status = "disabled"; 90 status = "disabled";
89 }; 91 };
90 92
@@ -93,6 +95,8 @@
93 reg = <0x7e804000 0x1000>; 95 reg = <0x7e804000 0x1000>;
94 interrupts = <2 21>; 96 interrupts = <2 21>;
95 clocks = <&clk_i2c>; 97 clocks = <&clk_i2c>;
98 #address-cells = <1>;
99 #size-cells = <0>;
96 status = "disabled"; 100 status = "disabled";
97 }; 101 };
98 102
diff --git a/arch/arm/boot/dts/cros5250-common.dtsi b/arch/arm/boot/dts/cros5250-common.dtsi
index dc259e8b8a73..9b186ac06c8b 100644
--- a/arch/arm/boot/dts/cros5250-common.dtsi
+++ b/arch/arm/boot/dts/cros5250-common.dtsi
@@ -27,6 +27,13 @@
27 i2c2_bus: i2c2-bus { 27 i2c2_bus: i2c2-bus {
28 samsung,pin-pud = <0>; 28 samsung,pin-pud = <0>;
29 }; 29 };
30
31 max77686_irq: max77686-irq {
32 samsung,pins = "gpx3-2";
33 samsung,pin-function = <0>;
34 samsung,pin-pud = <0>;
35 samsung,pin-drv = <0>;
36 };
30 }; 37 };
31 38
32 i2c@12C60000 { 39 i2c@12C60000 {
@@ -35,6 +42,11 @@
35 42
36 max77686@09 { 43 max77686@09 {
37 compatible = "maxim,max77686"; 44 compatible = "maxim,max77686";
45 interrupt-parent = <&gpx3>;
46 interrupts = <2 0>;
47 pinctrl-names = "default";
48 pinctrl-0 = <&max77686_irq>;
49 wakeup-source;
38 reg = <0x09>; 50 reg = <0x09>;
39 51
40 voltage-regulators { 52 voltage-regulators {
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 9db5047812f3..177becde7a26 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -559,7 +559,7 @@
559 compatible = "arm,pl330", "arm,primecell"; 559 compatible = "arm,pl330", "arm,primecell";
560 reg = <0x10800000 0x1000>; 560 reg = <0x10800000 0x1000>;
561 interrupts = <0 33 0>; 561 interrupts = <0 33 0>;
562 clocks = <&clock 271>; 562 clocks = <&clock 346>;
563 clock-names = "apb_pclk"; 563 clock-names = "apb_pclk";
564 #dma-cells = <1>; 564 #dma-cells = <1>;
565 #dma-channels = <8>; 565 #dma-channels = <8>;
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 59154dc15fe4..fb28b2ecb1db 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -161,7 +161,7 @@
161 clocks = <&clks 197>, <&clks 3>, 161 clocks = <&clks 197>, <&clks 3>,
162 <&clks 197>, <&clks 107>, 162 <&clks 197>, <&clks 107>,
163 <&clks 0>, <&clks 118>, 163 <&clks 0>, <&clks 118>,
164 <&clks 62>, <&clks 139>, 164 <&clks 0>, <&clks 139>,
165 <&clks 0>; 165 <&clks 0>;
166 clock-names = "core", "rxtx0", 166 clock-names = "core", "rxtx0",
167 "rxtx1", "rxtx2", 167 "rxtx1", "rxtx2",
diff --git a/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi b/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
index 9c18adf788f7..f577b7df9a29 100644
--- a/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
+++ b/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
@@ -44,8 +44,8 @@
44 gpmc,wr-access-ns = <186>; 44 gpmc,wr-access-ns = <186>;
45 gpmc,cycle2cycle-samecsen; 45 gpmc,cycle2cycle-samecsen;
46 gpmc,cycle2cycle-diffcsen; 46 gpmc,cycle2cycle-diffcsen;
47 vmmc-supply = <&vddvario>; 47 vddvario-supply = <&vddvario>;
48 vmmc_aux-supply = <&vdd33a>; 48 vdd33a-supply = <&vdd33a>;
49 reg-io-width = <4>; 49 reg-io-width = <4>;
50 smsc,save-mac-address; 50 smsc,save-mac-address;
51 }; 51 };
diff --git a/arch/arm/boot/dts/omap-zoom-common.dtsi b/arch/arm/boot/dts/omap-zoom-common.dtsi
index b0ee342598f0..68221fab978d 100644
--- a/arch/arm/boot/dts/omap-zoom-common.dtsi
+++ b/arch/arm/boot/dts/omap-zoom-common.dtsi
@@ -13,7 +13,7 @@
13 * they probably share the same GPIO IRQ 13 * they probably share the same GPIO IRQ
14 * REVISIT: Add timing support from slls644g.pdf 14 * REVISIT: Add timing support from slls644g.pdf
15 */ 15 */
16 8250@3,0 { 16 uart@3,0 {
17 compatible = "ns16550a"; 17 compatible = "ns16550a";
18 reg = <3 0 0x100>; 18 reg = <3 0 0x100>;
19 bank-width = <2>; 19 bank-width = <2>;
diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi
index a2bfcde858a6..d0c5b37e248c 100644
--- a/arch/arm/boot/dts/omap2.dtsi
+++ b/arch/arm/boot/dts/omap2.dtsi
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <dt-bindings/gpio/gpio.h> 11#include <dt-bindings/gpio/gpio.h>
12#include <dt-bindings/interrupt-controller/irq.h>
12#include <dt-bindings/pinctrl/omap.h> 13#include <dt-bindings/pinctrl/omap.h>
13 14
14#include "skeleton.dtsi" 15#include "skeleton.dtsi"
@@ -21,6 +22,8 @@
21 serial0 = &uart1; 22 serial0 = &uart1;
22 serial1 = &uart2; 23 serial1 = &uart2;
23 serial2 = &uart3; 24 serial2 = &uart3;
25 i2c0 = &i2c1;
26 i2c1 = &i2c2;
24 }; 27 };
25 28
26 cpus { 29 cpus {
@@ -53,6 +56,28 @@
53 ranges; 56 ranges;
54 ti,hwmods = "l3_main"; 57 ti,hwmods = "l3_main";
55 58
59 aes: aes@480a6000 {
60 compatible = "ti,omap2-aes";
61 ti,hwmods = "aes";
62 reg = <0x480a6000 0x50>;
63 dmas = <&sdma 9 &sdma 10>;
64 dma-names = "tx", "rx";
65 };
66
67 hdq1w: 1w@480b2000 {
68 compatible = "ti,omap2420-1w";
69 ti,hwmods = "hdq1w";
70 reg = <0x480b2000 0x1000>;
71 interrupts = <58>;
72 };
73
74 mailbox: mailbox@48094000 {
75 compatible = "ti,omap2-mailbox";
76 ti,hwmods = "mailbox";
77 reg = <0x48094000 0x200>;
78 interrupts = <26>;
79 };
80
56 intc: interrupt-controller@1 { 81 intc: interrupt-controller@1 {
57 compatible = "ti,omap2-intc"; 82 compatible = "ti,omap2-intc";
58 interrupt-controller; 83 interrupt-controller;
@@ -63,6 +88,7 @@
63 88
64 sdma: dma-controller@48056000 { 89 sdma: dma-controller@48056000 {
65 compatible = "ti,omap2430-sdma", "ti,omap2420-sdma"; 90 compatible = "ti,omap2430-sdma", "ti,omap2420-sdma";
91 ti,hwmods = "dma";
66 reg = <0x48056000 0x1000>; 92 reg = <0x48056000 0x1000>;
67 interrupts = <12>, 93 interrupts = <12>,
68 <13>, 94 <13>,
@@ -73,21 +99,91 @@
73 #dma-requests = <64>; 99 #dma-requests = <64>;
74 }; 100 };
75 101
102 i2c1: i2c@48070000 {
103 compatible = "ti,omap2-i2c";
104 ti,hwmods = "i2c1";
105 reg = <0x48070000 0x80>;
106 #address-cells = <1>;
107 #size-cells = <0>;
108 interrupts = <56>;
109 dmas = <&sdma 27 &sdma 28>;
110 dma-names = "tx", "rx";
111 };
112
113 i2c2: i2c@48072000 {
114 compatible = "ti,omap2-i2c";
115 ti,hwmods = "i2c2";
116 reg = <0x48072000 0x80>;
117 #address-cells = <1>;
118 #size-cells = <0>;
119 interrupts = <57>;
120 dmas = <&sdma 29 &sdma 30>;
121 dma-names = "tx", "rx";
122 };
123
124 mcspi1: mcspi@48098000 {
125 compatible = "ti,omap2-mcspi";
126 ti,hwmods = "mcspi1";
127 reg = <0x48098000 0x100>;
128 interrupts = <65>;
129 dmas = <&sdma 35 &sdma 36 &sdma 37 &sdma 38
130 &sdma 39 &sdma 40 &sdma 41 &sdma 42>;
131 dma-names = "tx0", "rx0", "tx1", "rx1",
132 "tx2", "rx2", "tx3", "rx3";
133 };
134
135 mcspi2: mcspi@4809a000 {
136 compatible = "ti,omap2-mcspi";
137 ti,hwmods = "mcspi2";
138 reg = <0x4809a000 0x100>;
139 interrupts = <66>;
140 dmas = <&sdma 43 &sdma 44 &sdma 45 &sdma 46>;
141 dma-names = "tx0", "rx0", "tx1", "rx1";
142 };
143
144 rng: rng@480a0000 {
145 compatible = "ti,omap2-rng";
146 ti,hwmods = "rng";
147 reg = <0x480a0000 0x50>;
148 interrupts = <36>;
149 };
150
151 sham: sham@480a4000 {
152 compatible = "ti,omap2-sham";
153 ti,hwmods = "sham";
154 reg = <0x480a4000 0x64>;
155 interrupts = <51>;
156 dmas = <&sdma 13>;
157 dma-names = "rx";
158 };
159
76 uart1: serial@4806a000 { 160 uart1: serial@4806a000 {
77 compatible = "ti,omap2-uart"; 161 compatible = "ti,omap2-uart";
78 ti,hwmods = "uart1"; 162 ti,hwmods = "uart1";
163 reg = <0x4806a000 0x2000>;
164 interrupts = <72>;
165 dmas = <&sdma 49 &sdma 50>;
166 dma-names = "tx", "rx";
79 clock-frequency = <48000000>; 167 clock-frequency = <48000000>;
80 }; 168 };
81 169
82 uart2: serial@4806c000 { 170 uart2: serial@4806c000 {
83 compatible = "ti,omap2-uart"; 171 compatible = "ti,omap2-uart";
84 ti,hwmods = "uart2"; 172 ti,hwmods = "uart2";
173 reg = <0x4806c000 0x400>;
174 interrupts = <73>;
175 dmas = <&sdma 51 &sdma 52>;
176 dma-names = "tx", "rx";
85 clock-frequency = <48000000>; 177 clock-frequency = <48000000>;
86 }; 178 };
87 179
88 uart3: serial@4806e000 { 180 uart3: serial@4806e000 {
89 compatible = "ti,omap2-uart"; 181 compatible = "ti,omap2-uart";
90 ti,hwmods = "uart3"; 182 ti,hwmods = "uart3";
183 reg = <0x4806e000 0x400>;
184 interrupts = <74>;
185 dmas = <&sdma 53 &sdma 54>;
186 dma-names = "tx", "rx";
91 clock-frequency = <48000000>; 187 clock-frequency = <48000000>;
92 }; 188 };
93 189
diff --git a/arch/arm/boot/dts/omap2420.dtsi b/arch/arm/boot/dts/omap2420.dtsi
index c8f9c55169ea..60c605de22dd 100644
--- a/arch/arm/boot/dts/omap2420.dtsi
+++ b/arch/arm/boot/dts/omap2420.dtsi
@@ -114,6 +114,15 @@
114 dma-names = "tx", "rx"; 114 dma-names = "tx", "rx";
115 }; 115 };
116 116
117 msdi1: mmc@4809c000 {
118 compatible = "ti,omap2420-mmc";
119 ti,hwmods = "msdi1";
120 reg = <0x4809c000 0x80>;
121 interrupts = <83>;
122 dmas = <&sdma 61 &sdma 62>;
123 dma-names = "tx", "rx";
124 };
125
117 timer1: timer@48028000 { 126 timer1: timer@48028000 {
118 compatible = "ti,omap2420-timer"; 127 compatible = "ti,omap2420-timer";
119 reg = <0x48028000 0x400>; 128 reg = <0x48028000 0x400>;
@@ -121,5 +130,19 @@
121 ti,hwmods = "timer1"; 130 ti,hwmods = "timer1";
122 ti,timer-alwon; 131 ti,timer-alwon;
123 }; 132 };
133
134 wd_timer2: wdt@48022000 {
135 compatible = "ti,omap2-wdt";
136 ti,hwmods = "wd_timer2";
137 reg = <0x48022000 0x80>;
138 };
124 }; 139 };
125}; 140};
141
142&i2c1 {
143 compatible = "ti,omap2420-i2c";
144};
145
146&i2c2 {
147 compatible = "ti,omap2420-i2c";
148};
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index c535a5a2b27f..d624345666f5 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -175,6 +175,25 @@
175 dma-names = "tx", "rx"; 175 dma-names = "tx", "rx";
176 }; 176 };
177 177
178 mmc1: mmc@4809c000 {
179 compatible = "ti,omap2-hsmmc";
180 reg = <0x4809c000 0x200>;
181 interrupts = <83>;
182 ti,hwmods = "mmc1";
183 ti,dual-volt;
184 dmas = <&sdma 61>, <&sdma 62>;
185 dma-names = "tx", "rx";
186 };
187
188 mmc2: mmc@480b4000 {
189 compatible = "ti,omap2-hsmmc";
190 reg = <0x480b4000 0x200>;
191 interrupts = <86>;
192 ti,hwmods = "mmc2";
193 dmas = <&sdma 47>, <&sdma 48>;
194 dma-names = "tx", "rx";
195 };
196
178 timer1: timer@49018000 { 197 timer1: timer@49018000 {
179 compatible = "ti,omap2420-timer"; 198 compatible = "ti,omap2420-timer";
180 reg = <0x49018000 0x400>; 199 reg = <0x49018000 0x400>;
@@ -182,5 +201,35 @@
182 ti,hwmods = "timer1"; 201 ti,hwmods = "timer1";
183 ti,timer-alwon; 202 ti,timer-alwon;
184 }; 203 };
204
205 mcspi3: mcspi@480b8000 {
206 compatible = "ti,omap2-mcspi";
207 ti,hwmods = "mcspi3";
208 reg = <0x480b8000 0x100>;
209 interrupts = <91>;
210 dmas = <&sdma 15 &sdma 16 &sdma 23 &sdma 24>;
211 dma-names = "tx0", "rx0", "tx1", "rx1";
212 };
213
214 usb_otg_hs: usb_otg_hs@480ac000 {
215 compatible = "ti,omap2-musb";
216 ti,hwmods = "usb_otg_hs";
217 reg = <0x480ac000 0x1000>;
218 interrupts = <93>;
219 };
220
221 wd_timer2: wdt@49016000 {
222 compatible = "ti,omap2-wdt";
223 ti,hwmods = "wd_timer2";
224 reg = <0x49016000 0x80>;
225 };
185 }; 226 };
186}; 227};
228
229&i2c1 {
230 compatible = "ti,omap2430-i2c";
231};
232
233&i2c2 {
234 compatible = "ti,omap2430-i2c";
235};
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index 31a632f7effb..df33a50bc070 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -215,3 +215,10 @@
215&usbhsehci { 215&usbhsehci {
216 phys = <0 &hsusb2_phy>; 216 phys = <0 &hsusb2_phy>;
217}; 217};
218
219&vaux2 {
220 regulator-name = "usb_1v8";
221 regulator-min-microvolt = <1800000>;
222 regulator-max-microvolt = <1800000>;
223 regulator-always-on;
224};
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index fa532aaacc68..3ba4a625ea5b 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -61,6 +61,14 @@
61 vcc-supply = <&hsusb2_power>; 61 vcc-supply = <&hsusb2_power>;
62 }; 62 };
63 63
64 sound {
65 compatible = "ti,omap-twl4030";
66 ti,model = "omap3beagle";
67
68 ti,mcbsp = <&mcbsp2>;
69 ti,codec = <&twl_audio>;
70 };
71
64 gpio_keys { 72 gpio_keys {
65 compatible = "gpio-keys"; 73 compatible = "gpio-keys";
66 74
@@ -120,6 +128,12 @@
120 reg = <0x48>; 128 reg = <0x48>;
121 interrupts = <7>; /* SYS_NIRQ cascaded to intc */ 129 interrupts = <7>; /* SYS_NIRQ cascaded to intc */
122 interrupt-parent = <&intc>; 130 interrupt-parent = <&intc>;
131
132 twl_audio: audio {
133 compatible = "ti,twl4030-audio";
134 codec {
135 };
136 };
123 }; 137 };
124}; 138};
125 139
@@ -178,3 +192,10 @@
178 mode = <3>; 192 mode = <3>;
179 power = <50>; 193 power = <50>;
180}; 194};
195
196&vaux2 {
197 regulator-name = "vdd_ehci";
198 regulator-min-microvolt = <1800000>;
199 regulator-max-microvolt = <1800000>;
200 regulator-always-on;
201};
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index ba1e58b7b7e3..165aaf7591ba 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -1,5 +1,5 @@
1/* 1/*
2 * Device Tree Source for IGEP Technology devices 2 * Common device tree for IGEP boards based on AM/DM37x
3 * 3 *
4 * Copyright (C) 2012 Javier Martinez Canillas <javier@collabora.co.uk> 4 * Copyright (C) 2012 Javier Martinez Canillas <javier@collabora.co.uk>
5 * Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com> 5 * Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com>
@@ -10,7 +10,7 @@
10 */ 10 */
11/dts-v1/; 11/dts-v1/;
12 12
13#include "omap34xx.dtsi" 13#include "omap36xx.dtsi"
14 14
15/ { 15/ {
16 memory { 16 memory {
@@ -24,6 +24,25 @@
24 ti,mcbsp = <&mcbsp2>; 24 ti,mcbsp = <&mcbsp2>;
25 ti,codec = <&twl_audio>; 25 ti,codec = <&twl_audio>;
26 }; 26 };
27
28 vdd33: regulator-vdd33 {
29 compatible = "regulator-fixed";
30 regulator-name = "vdd33";
31 regulator-always-on;
32 };
33
34 lbee1usjyc_vmmc: lbee1usjyc_vmmc {
35 pinctrl-names = "default";
36 pinctrl-0 = <&lbee1usjyc_pins>;
37 compatible = "regulator-fixed";
38 regulator-name = "regulator-lbee1usjyc";
39 regulator-min-microvolt = <3300000>;
40 regulator-max-microvolt = <3300000>;
41 gpio = <&gpio5 10 GPIO_ACTIVE_HIGH>; /* gpio_138 WIFI_PDN */
42 startup-delay-us = <10000>;
43 enable-active-high;
44 vin-supply = <&vdd33>;
45 };
27}; 46};
28 47
29&omap3_pmx_core { 48&omap3_pmx_core {
@@ -48,6 +67,15 @@
48 >; 67 >;
49 }; 68 };
50 69
70 /* WiFi/BT combo */
71 lbee1usjyc_pins: pinmux_lbee1usjyc_pins {
72 pinctrl-single,pins = <
73 0x136 (PIN_OUTPUT | MUX_MODE4) /* sdmmc2_dat5.gpio_137 */
74 0x138 (PIN_OUTPUT | MUX_MODE4) /* sdmmc2_dat6.gpio_138 */
75 0x13a (PIN_OUTPUT | MUX_MODE4) /* sdmmc2_dat7.gpio_139 */
76 >;
77 };
78
51 mcbsp2_pins: pinmux_mcbsp2_pins { 79 mcbsp2_pins: pinmux_mcbsp2_pins {
52 pinctrl-single,pins = < 80 pinctrl-single,pins = <
53 0x10c (PIN_INPUT | MUX_MODE0) /* mcbsp2_fsx.mcbsp2_fsx */ 81 0x10c (PIN_INPUT | MUX_MODE0) /* mcbsp2_fsx.mcbsp2_fsx */
@@ -65,10 +93,17 @@
65 0x11a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */ 93 0x11a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */
66 0x11c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */ 94 0x11c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */
67 0x11e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */ 95 0x11e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */
68 0x120 (PIN_INPUT | MUX_MODE0) /* sdmmc1_dat4.sdmmc1_dat4 */ 96 >;
69 0x122 (PIN_INPUT | MUX_MODE0) /* sdmmc1_dat5.sdmmc1_dat5 */ 97 };
70 0x124 (PIN_INPUT | MUX_MODE0) /* sdmmc1_dat6.sdmmc1_dat6 */ 98
71 0x126 (PIN_INPUT | MUX_MODE0) /* sdmmc1_dat7.sdmmc1_dat7 */ 99 mmc2_pins: pinmux_mmc2_pins {
100 pinctrl-single,pins = <
101 0x128 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk.sdmmc2_clk */
102 0x12a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd.sdmmc2_cmd */
103 0x12c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0.sdmmc2_dat0 */
104 0x12e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
105 0x130 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */
106 0x132 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */
72 >; 107 >;
73 }; 108 };
74 109
@@ -78,10 +113,33 @@
78 >; 113 >;
79 }; 114 };
80 115
116 i2c1_pins: pinmux_i2c1_pins {
117 pinctrl-single,pins = <
118 0x18a (PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
119 0x18c (PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
120 >;
121 };
122
123 i2c2_pins: pinmux_i2c2_pins {
124 pinctrl-single,pins = <
125 0x18e (PIN_INPUT | MUX_MODE0) /* i2c2_scl.i2c2_scl */
126 0x190 (PIN_INPUT | MUX_MODE0) /* i2c2_sda.i2c2_sda */
127 >;
128 };
129
130 i2c3_pins: pinmux_i2c3_pins {
131 pinctrl-single,pins = <
132 0x192 (PIN_INPUT | MUX_MODE0) /* i2c3_scl.i2c3_scl */
133 0x194 (PIN_INPUT | MUX_MODE0) /* i2c3_sda.i2c3_sda */
134 >;
135 };
136
81 leds_pins: pinmux_leds_pins { }; 137 leds_pins: pinmux_leds_pins { };
82}; 138};
83 139
84&i2c1 { 140&i2c1 {
141 pinctrl-names = "default";
142 pinctrl-0 = <&i2c1_pins>;
85 clock-frequency = <2600000>; 143 clock-frequency = <2600000>;
86 144
87 twl: twl@48 { 145 twl: twl@48 {
@@ -101,9 +159,16 @@
101#include "twl4030_omap3.dtsi" 159#include "twl4030_omap3.dtsi"
102 160
103&i2c2 { 161&i2c2 {
162 pinctrl-names = "default";
163 pinctrl-0 = <&i2c2_pins>;
104 clock-frequency = <400000>; 164 clock-frequency = <400000>;
105}; 165};
106 166
167&i2c3 {
168 pinctrl-names = "default";
169 pinctrl-0 = <&i2c3_pins>;
170};
171
107&mcbsp2 { 172&mcbsp2 {
108 pinctrl-names = "default"; 173 pinctrl-names = "default";
109 pinctrl-0 = <&mcbsp2_pins>; 174 pinctrl-0 = <&mcbsp2_pins>;
@@ -114,11 +179,15 @@
114 pinctrl-0 = <&mmc1_pins>; 179 pinctrl-0 = <&mmc1_pins>;
115 vmmc-supply = <&vmmc1>; 180 vmmc-supply = <&vmmc1>;
116 vmmc_aux-supply = <&vsim>; 181 vmmc_aux-supply = <&vsim>;
117 bus-width = <8>; 182 bus-width = <4>;
118}; 183};
119 184
120&mmc2 { 185&mmc2 {
121 status = "disabled"; 186 pinctrl-names = "default";
187 pinctrl-0 = <&mmc2_pins>;
188 vmmc-supply = <&lbee1usjyc_vmmc>;
189 bus-width = <4>;
190 non-removable;
122}; 191};
123 192
124&mmc3 { 193&mmc3 {
diff --git a/arch/arm/boot/dts/omap3-igep0020.dts b/arch/arm/boot/dts/omap3-igep0020.dts
index d5cc79267250..1c7e74d2d2bc 100644
--- a/arch/arm/boot/dts/omap3-igep0020.dts
+++ b/arch/arm/boot/dts/omap3-igep0020.dts
@@ -1,5 +1,5 @@
1/* 1/*
2 * Device Tree Source for IGEPv2 board 2 * Device Tree Source for IGEPv2 Rev. (TI OMAP AM/DM37x)
3 * 3 *
4 * Copyright (C) 2012 Javier Martinez Canillas <javier@collabora.co.uk> 4 * Copyright (C) 2012 Javier Martinez Canillas <javier@collabora.co.uk>
5 * Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com> 5 * Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com>
@@ -13,7 +13,7 @@
13#include "omap-gpmc-smsc911x.dtsi" 13#include "omap-gpmc-smsc911x.dtsi"
14 14
15/ { 15/ {
16 model = "IGEPv2"; 16 model = "IGEPv2 (TI OMAP AM/DM37x)";
17 compatible = "isee,omap3-igep0020", "ti,omap3"; 17 compatible = "isee,omap3-igep0020", "ti,omap3";
18 18
19 leds { 19 leds {
@@ -67,6 +67,8 @@
67 pinctrl-names = "default"; 67 pinctrl-names = "default";
68 pinctrl-0 = < 68 pinctrl-0 = <
69 &hsusbb1_pins 69 &hsusbb1_pins
70 &tfp410_pins
71 &dss_pins
70 >; 72 >;
71 73
72 hsusbb1_pins: pinmux_hsusbb1_pins { 74 hsusbb1_pins: pinmux_hsusbb1_pins {
@@ -85,6 +87,45 @@
85 0x5ba (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d7.hsusb1_data3 */ 87 0x5ba (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d7.hsusb1_data3 */
86 >; 88 >;
87 }; 89 };
90
91 tfp410_pins: tfp410_dvi_pins {
92 pinctrl-single,pins = <
93 0x196 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */
94 >;
95 };
96
97 dss_pins: pinmux_dss_dvi_pins {
98 pinctrl-single,pins = <
99 0x0a4 (PIN_OUTPUT | MUX_MODE0) /* dss_pclk.dss_pclk */
100 0x0a6 (PIN_OUTPUT | MUX_MODE0) /* dss_hsync.dss_hsync */
101 0x0a8 (PIN_OUTPUT | MUX_MODE0) /* dss_vsync.dss_vsync */
102 0x0aa (PIN_OUTPUT | MUX_MODE0) /* dss_acbias.dss_acbias */
103 0x0ac (PIN_OUTPUT | MUX_MODE0) /* dss_data0.dss_data0 */
104 0x0ae (PIN_OUTPUT | MUX_MODE0) /* dss_data1.dss_data1 */
105 0x0b0 (PIN_OUTPUT | MUX_MODE0) /* dss_data2.dss_data2 */
106 0x0b2 (PIN_OUTPUT | MUX_MODE0) /* dss_data3.dss_data3 */
107 0x0b4 (PIN_OUTPUT | MUX_MODE0) /* dss_data4.dss_data4 */
108 0x0b6 (PIN_OUTPUT | MUX_MODE0) /* dss_data5.dss_data5 */
109 0x0b8 (PIN_OUTPUT | MUX_MODE0) /* dss_data6.dss_data6 */
110 0x0ba (PIN_OUTPUT | MUX_MODE0) /* dss_data7.dss_data7 */
111 0x0bc (PIN_OUTPUT | MUX_MODE0) /* dss_data8.dss_data8 */
112 0x0be (PIN_OUTPUT | MUX_MODE0) /* dss_data9.dss_data9 */
113 0x0c0 (PIN_OUTPUT | MUX_MODE0) /* dss_data10.dss_data10 */
114 0x0c2 (PIN_OUTPUT | MUX_MODE0) /* dss_data11.dss_data11 */
115 0x0c4 (PIN_OUTPUT | MUX_MODE0) /* dss_data12.dss_data12 */
116 0x0c6 (PIN_OUTPUT | MUX_MODE0) /* dss_data13.dss_data13 */
117 0x0c8 (PIN_OUTPUT | MUX_MODE0) /* dss_data14.dss_data14 */
118 0x0ca (PIN_OUTPUT | MUX_MODE0) /* dss_data15.dss_data15 */
119 0x0cc (PIN_OUTPUT | MUX_MODE0) /* dss_data16.dss_data16 */
120 0x0ce (PIN_OUTPUT | MUX_MODE0) /* dss_data17.dss_data17 */
121 0x0d0 (PIN_OUTPUT | MUX_MODE0) /* dss_data18.dss_data18 */
122 0x0d2 (PIN_OUTPUT | MUX_MODE0) /* dss_data19.dss_data19 */
123 0x0d4 (PIN_OUTPUT | MUX_MODE0) /* dss_data20.dss_data20 */
124 0x0d6 (PIN_OUTPUT | MUX_MODE0) /* dss_data21.dss_data21 */
125 0x0d8 (PIN_OUTPUT | MUX_MODE0) /* dss_data22.dss_data22 */
126 0x0da (PIN_OUTPUT | MUX_MODE0) /* dss_data23.dss_data23 */
127 >;
128 };
88}; 129};
89 130
90&leds_pins { 131&leds_pins {
@@ -174,3 +215,8 @@
174&usbhsehci { 215&usbhsehci {
175 phys = <&hsusb1_phy>; 216 phys = <&hsusb1_phy>;
176}; 217};
218
219&vpll2 {
220 /* Needed for DSS */
221 regulator-name = "vdds_dsi";
222};
diff --git a/arch/arm/boot/dts/omap3-igep0030.dts b/arch/arm/boot/dts/omap3-igep0030.dts
index 525e6d9b0978..02a23f8a3384 100644
--- a/arch/arm/boot/dts/omap3-igep0030.dts
+++ b/arch/arm/boot/dts/omap3-igep0030.dts
@@ -1,5 +1,5 @@
1/* 1/*
2 * Device Tree Source for IGEP COM Module 2 * Device Tree Source for IGEP COM MODULE (TI OMAP AM/DM37x)
3 * 3 *
4 * Copyright (C) 2012 Javier Martinez Canillas <javier@collabora.co.uk> 4 * Copyright (C) 2012 Javier Martinez Canillas <javier@collabora.co.uk>
5 * Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com> 5 * Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com>
@@ -12,7 +12,7 @@
12#include "omap3-igep.dtsi" 12#include "omap3-igep.dtsi"
13 13
14/ { 14/ {
15 model = "IGEP COM Module"; 15 model = "IGEP COM MODULE (TI OMAP AM/DM37x)";
16 compatible = "isee,omap3-igep0030", "ti,omap3"; 16 compatible = "isee,omap3-igep0030", "ti,omap3";
17 17
18 leds { 18 leds {
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index c4f20bfe4cce..6fc85f963530 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -9,7 +9,7 @@
9 9
10/dts-v1/; 10/dts-v1/;
11 11
12#include "omap34xx.dtsi" 12#include "omap34xx-hs.dtsi"
13 13
14/ { 14/ {
15 model = "Nokia N900"; 15 model = "Nokia N900";
@@ -125,6 +125,21 @@
125 >; 125 >;
126 }; 126 };
127 127
128 mmc2_pins: pinmux_mmc2_pins {
129 pinctrl-single,pins = <
130 0x128 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk */
131 0x12a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd */
132 0x12c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0 */
133 0x12e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1 */
134 0x130 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2 */
135 0x132 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3 */
136 0x134 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat4 */
137 0x136 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat5 */
138 0x138 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat6 */
139 0x13a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat7 */
140 >;
141 };
142
128 display_pins: pinmux_display_pins { 143 display_pins: pinmux_display_pins {
129 pinctrl-single,pins = < 144 pinctrl-single,pins = <
130 0x0d4 (PIN_OUTPUT | MUX_MODE4) /* RX51_LCD_RESET_GPIO */ 145 0x0d4 (PIN_OUTPUT | MUX_MODE4) /* RX51_LCD_RESET_GPIO */
@@ -358,8 +373,14 @@
358 cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */ 373 cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */
359}; 374};
360 375
376/* most boards use vaux3, only some old versions use vmmc2 instead */
361&mmc2 { 377&mmc2 {
362 status = "disabled"; 378 pinctrl-names = "default";
379 pinctrl-0 = <&mmc2_pins>;
380 vmmc-supply = <&vaux3>;
381 vmmc_aux-supply = <&vsim>;
382 bus-width = <8>;
383 non-removable;
363}; 384};
364 385
365&mmc3 { 386&mmc3 {
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index 94eb77d3b9dd..5c26c184f2c1 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -8,7 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include "omap36xx.dtsi" 11#include "omap36xx-hs.dtsi"
12 12
13/ { 13/ {
14 cpus { 14 cpus {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index f3a0c26ed0c2..daabf99d402a 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -82,6 +82,13 @@
82 ranges; 82 ranges;
83 ti,hwmods = "l3_main"; 83 ti,hwmods = "l3_main";
84 84
85 aes: aes@480c5000 {
86 compatible = "ti,omap3-aes";
87 ti,hwmods = "aes";
88 reg = <0x480c5000 0x50>;
89 interrupts = <0>;
90 };
91
85 counter32k: counter@48320000 { 92 counter32k: counter@48320000 {
86 compatible = "ti,omap-counter32k"; 93 compatible = "ti,omap-counter32k";
87 reg = <0x48320000 0x20>; 94 reg = <0x48320000 0x20>;
@@ -260,6 +267,13 @@
260 ti,hwmods = "i2c3"; 267 ti,hwmods = "i2c3";
261 }; 268 };
262 269
270 mailbox: mailbox@48094000 {
271 compatible = "ti,omap3-mailbox";
272 ti,hwmods = "mailbox";
273 reg = <0x48094000 0x200>;
274 interrupts = <26>;
275 };
276
263 mcspi1: spi@48098000 { 277 mcspi1: spi@48098000 {
264 compatible = "ti,omap2-mcspi"; 278 compatible = "ti,omap2-mcspi";
265 reg = <0x48098000 0x100>; 279 reg = <0x48098000 0x100>;
@@ -357,6 +371,13 @@
357 dma-names = "tx", "rx"; 371 dma-names = "tx", "rx";
358 }; 372 };
359 373
374 mmu_isp: mmu@480bd400 {
375 compatible = "ti,omap3-mmu-isp";
376 ti,hwmods = "mmu_isp";
377 reg = <0x480bd400 0x80>;
378 interrupts = <8>;
379 };
380
360 wdt2: wdt@48314000 { 381 wdt2: wdt@48314000 {
361 compatible = "ti,omap3-wdt"; 382 compatible = "ti,omap3-wdt";
362 reg = <0x48314000 0x80>; 383 reg = <0x48314000 0x80>;
@@ -442,6 +463,27 @@
442 dma-names = "tx", "rx"; 463 dma-names = "tx", "rx";
443 }; 464 };
444 465
466 sham: sham@480c3000 {
467 compatible = "ti,omap3-sham";
468 ti,hwmods = "sham";
469 reg = <0x480c3000 0x64>;
470 interrupts = <49>;
471 };
472
473 smartreflex_core: smartreflex@480cb000 {
474 compatible = "ti,omap3-smartreflex-core";
475 ti,hwmods = "smartreflex_core";
476 reg = <0x480cb000 0x400>;
477 interrupts = <19>;
478 };
479
480 smartreflex_mpu_iva: smartreflex@480c9000 {
481 compatible = "ti,omap3-smartreflex-iva";
482 ti,hwmods = "smartreflex_mpu_iva";
483 reg = <0x480c9000 0x400>;
484 interrupts = <18>;
485 };
486
445 timer1: timer@48318000 { 487 timer1: timer@48318000 {
446 compatible = "ti,omap3430-timer"; 488 compatible = "ti,omap3430-timer";
447 reg = <0x48318000 0x400>; 489 reg = <0x48318000 0x400>;
diff --git a/arch/arm/boot/dts/omap34xx-hs.dtsi b/arch/arm/boot/dts/omap34xx-hs.dtsi
new file mode 100644
index 000000000000..1ff626489546
--- /dev/null
+++ b/arch/arm/boot/dts/omap34xx-hs.dtsi
@@ -0,0 +1,16 @@
1/* Disabled modules for secure omaps */
2
3#include "omap34xx.dtsi"
4
5/* Secure omaps have some devices inaccessible depending on the firmware */
6&aes {
7 status = "disabled";
8};
9
10&sham {
11 status = "disabled";
12};
13
14&timer12 {
15 status = "disabled";
16};
diff --git a/arch/arm/boot/dts/omap36xx-hs.dtsi b/arch/arm/boot/dts/omap36xx-hs.dtsi
new file mode 100644
index 000000000000..2c7febb0e016
--- /dev/null
+++ b/arch/arm/boot/dts/omap36xx-hs.dtsi
@@ -0,0 +1,16 @@
1/* Disabled modules for secure omaps */
2
3#include "omap36xx.dtsi"
4
5/* Secure omaps have some devices inaccessible depending on the firmware */
6&aes {
7 status = "disabled";
8};
9
10&sham {
11 status = "disabled";
12};
13
14&timer12 {
15 status = "disabled";
16};
diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
index 298e85020e1b..88c6a05cab41 100644
--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
+++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
@@ -246,15 +246,6 @@
246 0xf0 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c4_sda */ 246 0xf0 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c4_sda */
247 >; 247 >;
248 }; 248 };
249};
250
251&omap4_pmx_wkup {
252 led_wkgpio_pins: pinmux_leds_wkpins {
253 pinctrl-single,pins = <
254 0x1a (PIN_OUTPUT | MUX_MODE3) /* gpio_wk7 */
255 0x1c (PIN_OUTPUT | MUX_MODE3) /* gpio_wk8 */
256 >;
257 };
258 249
259 /* 250 /*
260 * wl12xx GPIO outputs for WLAN_EN, BT_EN, FM_EN, BT_WAKEUP 251 * wl12xx GPIO outputs for WLAN_EN, BT_EN, FM_EN, BT_WAKEUP
@@ -274,7 +265,7 @@
274 pinctrl-single,pins = < 265 pinctrl-single,pins = <
275 0x38 (PIN_INPUT | MUX_MODE3) /* gpmc_ncs2.gpio_52 */ 266 0x38 (PIN_INPUT | MUX_MODE3) /* gpmc_ncs2.gpio_52 */
276 0x3a (PIN_INPUT | MUX_MODE3) /* gpmc_ncs3.gpio_53 */ 267 0x3a (PIN_INPUT | MUX_MODE3) /* gpmc_ncs3.gpio_53 */
277 0x108 (PIN_OUTPUT | MUX_MODE0) /* sdmmc5_clk.sdmmc5_clk */ 268 0x108 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_clk.sdmmc5_clk */
278 0x10a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_cmd.sdmmc5_cmd */ 269 0x10a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_cmd.sdmmc5_cmd */
279 0x10c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat0.sdmmc5_dat0 */ 270 0x10c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat0.sdmmc5_dat0 */
280 0x10e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat1.sdmmc5_dat1 */ 271 0x10e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat1.sdmmc5_dat1 */
@@ -284,6 +275,15 @@
284 }; 275 };
285}; 276};
286 277
278&omap4_pmx_wkup {
279 led_wkgpio_pins: pinmux_leds_wkpins {
280 pinctrl-single,pins = <
281 0x1a (PIN_OUTPUT | MUX_MODE3) /* gpio_wk7 */
282 0x1c (PIN_OUTPUT | MUX_MODE3) /* gpio_wk8 */
283 >;
284 };
285};
286
287&i2c1 { 287&i2c1 {
288 pinctrl-names = "default"; 288 pinctrl-names = "default";
289 pinctrl-0 = <&i2c1_pins>; 289 pinctrl-0 = <&i2c1_pins>;
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index 5fc3f43c5a81..dbc81fb6ef03 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -300,12 +300,12 @@
300 wl12xx_pins: pinmux_wl12xx_pins { 300 wl12xx_pins: pinmux_wl12xx_pins {
301 pinctrl-single,pins = < 301 pinctrl-single,pins = <
302 0x3a (PIN_INPUT | MUX_MODE3) /* gpmc_ncs3.gpio_53 */ 302 0x3a (PIN_INPUT | MUX_MODE3) /* gpmc_ncs3.gpio_53 */
303 0x108 (PIN_OUTPUT | MUX_MODE3) /* sdmmc5_clk.sdmmc5_clk */ 303 0x108 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_clk.sdmmc5_clk */
304 0x10a (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_cmd.sdmmc5_cmd */ 304 0x10a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_cmd.sdmmc5_cmd */
305 0x10c (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat0.sdmmc5_dat0 */ 305 0x10c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat0.sdmmc5_dat0 */
306 0x10e (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat1.sdmmc5_dat1 */ 306 0x10e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat1.sdmmc5_dat1 */
307 0x110 (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat2.sdmmc5_dat2 */ 307 0x110 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat2.sdmmc5_dat2 */
308 0x112 (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat3.sdmmc5_dat3 */ 308 0x112 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat3.sdmmc5_dat3 */
309 >; 309 >;
310 }; 310 };
311}; 311};
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index ee845fad939b..9987dd0e9c59 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -87,9 +87,9 @@
87 interrupts = <1 9 0xf04>; 87 interrupts = <1 9 0xf04>;
88 }; 88 };
89 89
90 gpio0: gpio@ffc40000 { 90 gpio0: gpio@e6050000 {
91 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; 91 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
92 reg = <0 0xffc40000 0 0x2c>; 92 reg = <0 0xe6050000 0 0x50>;
93 interrupt-parent = <&gic>; 93 interrupt-parent = <&gic>;
94 interrupts = <0 4 0x4>; 94 interrupts = <0 4 0x4>;
95 #gpio-cells = <2>; 95 #gpio-cells = <2>;
@@ -99,9 +99,9 @@
99 interrupt-controller; 99 interrupt-controller;
100 }; 100 };
101 101
102 gpio1: gpio@ffc41000 { 102 gpio1: gpio@e6051000 {
103 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; 103 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
104 reg = <0 0xffc41000 0 0x2c>; 104 reg = <0 0xe6051000 0 0x50>;
105 interrupt-parent = <&gic>; 105 interrupt-parent = <&gic>;
106 interrupts = <0 5 0x4>; 106 interrupts = <0 5 0x4>;
107 #gpio-cells = <2>; 107 #gpio-cells = <2>;
@@ -111,9 +111,9 @@
111 interrupt-controller; 111 interrupt-controller;
112 }; 112 };
113 113
114 gpio2: gpio@ffc42000 { 114 gpio2: gpio@e6052000 {
115 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; 115 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
116 reg = <0 0xffc42000 0 0x2c>; 116 reg = <0 0xe6052000 0 0x50>;
117 interrupt-parent = <&gic>; 117 interrupt-parent = <&gic>;
118 interrupts = <0 6 0x4>; 118 interrupts = <0 6 0x4>;
119 #gpio-cells = <2>; 119 #gpio-cells = <2>;
@@ -123,9 +123,9 @@
123 interrupt-controller; 123 interrupt-controller;
124 }; 124 };
125 125
126 gpio3: gpio@ffc43000 { 126 gpio3: gpio@e6053000 {
127 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; 127 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
128 reg = <0 0xffc43000 0 0x2c>; 128 reg = <0 0xe6053000 0 0x50>;
129 interrupt-parent = <&gic>; 129 interrupt-parent = <&gic>;
130 interrupts = <0 7 0x4>; 130 interrupts = <0 7 0x4>;
131 #gpio-cells = <2>; 131 #gpio-cells = <2>;
@@ -135,9 +135,9 @@
135 interrupt-controller; 135 interrupt-controller;
136 }; 136 };
137 137
138 gpio4: gpio@ffc44000 { 138 gpio4: gpio@e6054000 {
139 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; 139 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
140 reg = <0 0xffc44000 0 0x2c>; 140 reg = <0 0xe6054000 0 0x50>;
141 interrupt-parent = <&gic>; 141 interrupt-parent = <&gic>;
142 interrupts = <0 8 0x4>; 142 interrupts = <0 8 0x4>;
143 #gpio-cells = <2>; 143 #gpio-cells = <2>;
@@ -147,9 +147,9 @@
147 interrupt-controller; 147 interrupt-controller;
148 }; 148 };
149 149
150 gpio5: gpio@ffc45000 { 150 gpio5: gpio@e6055000 {
151 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; 151 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
152 reg = <0 0xffc45000 0 0x2c>; 152 reg = <0 0xe6055000 0 0x50>;
153 interrupt-parent = <&gic>; 153 interrupt-parent = <&gic>;
154 interrupts = <0 9 0x4>; 154 interrupts = <0 9 0x4>;
155 #gpio-cells = <2>; 155 #gpio-cells = <2>;
@@ -241,7 +241,7 @@
241 241
242 sdhi0: sdhi@ee100000 { 242 sdhi0: sdhi@ee100000 {
243 compatible = "renesas,sdhi-r8a7790"; 243 compatible = "renesas,sdhi-r8a7790";
244 reg = <0 0xee100000 0 0x100>; 244 reg = <0 0xee100000 0 0x200>;
245 interrupt-parent = <&gic>; 245 interrupt-parent = <&gic>;
246 interrupts = <0 165 4>; 246 interrupts = <0 165 4>;
247 cap-sd-highspeed; 247 cap-sd-highspeed;
@@ -250,7 +250,7 @@
250 250
251 sdhi1: sdhi@ee120000 { 251 sdhi1: sdhi@ee120000 {
252 compatible = "renesas,sdhi-r8a7790"; 252 compatible = "renesas,sdhi-r8a7790";
253 reg = <0 0xee120000 0 0x100>; 253 reg = <0 0xee120000 0 0x200>;
254 interrupt-parent = <&gic>; 254 interrupt-parent = <&gic>;
255 interrupts = <0 166 4>; 255 interrupts = <0 166 4>;
256 cap-sd-highspeed; 256 cap-sd-highspeed;
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 6d09b8d42fdd..f936476c2753 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -245,14 +245,14 @@
245 245
246 mpu_periph_clk: mpu_periph_clk { 246 mpu_periph_clk: mpu_periph_clk {
247 #clock-cells = <0>; 247 #clock-cells = <0>;
248 compatible = "altr,socfpga-gate-clk"; 248 compatible = "altr,socfpga-perip-clk";
249 clocks = <&mpuclk>; 249 clocks = <&mpuclk>;
250 fixed-divider = <4>; 250 fixed-divider = <4>;
251 }; 251 };
252 252
253 mpu_l2_ram_clk: mpu_l2_ram_clk { 253 mpu_l2_ram_clk: mpu_l2_ram_clk {
254 #clock-cells = <0>; 254 #clock-cells = <0>;
255 compatible = "altr,socfpga-gate-clk"; 255 compatible = "altr,socfpga-perip-clk";
256 clocks = <&mpuclk>; 256 clocks = <&mpuclk>;
257 fixed-divider = <2>; 257 fixed-divider = <2>;
258 }; 258 };
@@ -266,8 +266,9 @@
266 266
267 l3_main_clk: l3_main_clk { 267 l3_main_clk: l3_main_clk {
268 #clock-cells = <0>; 268 #clock-cells = <0>;
269 compatible = "altr,socfpga-gate-clk"; 269 compatible = "altr,socfpga-perip-clk";
270 clocks = <&mainclk>; 270 clocks = <&mainclk>;
271 fixed-divider = <1>;
271 }; 272 };
272 273
273 l3_mp_clk: l3_mp_clk { 274 l3_mp_clk: l3_mp_clk {
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index c1751a64889a..7f5878c2784a 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -193,7 +193,10 @@
193 pio: pinctrl@01c20800 { 193 pio: pinctrl@01c20800 {
194 compatible = "allwinner,sun6i-a31-pinctrl"; 194 compatible = "allwinner,sun6i-a31-pinctrl";
195 reg = <0x01c20800 0x400>; 195 reg = <0x01c20800 0x400>;
196 interrupts = <0 11 1>, <0 15 1>, <0 16 1>, <0 17 1>; 196 interrupts = <0 11 4>,
197 <0 15 4>,
198 <0 16 4>,
199 <0 17 4>;
197 clocks = <&apb1_gates 5>; 200 clocks = <&apb1_gates 5>;
198 gpio-controller; 201 gpio-controller;
199 interrupt-controller; 202 interrupt-controller;
@@ -212,11 +215,11 @@
212 timer@01c20c00 { 215 timer@01c20c00 {
213 compatible = "allwinner,sun4i-timer"; 216 compatible = "allwinner,sun4i-timer";
214 reg = <0x01c20c00 0xa0>; 217 reg = <0x01c20c00 0xa0>;
215 interrupts = <0 18 1>, 218 interrupts = <0 18 4>,
216 <0 19 1>, 219 <0 19 4>,
217 <0 20 1>, 220 <0 20 4>,
218 <0 21 1>, 221 <0 21 4>,
219 <0 22 1>; 222 <0 22 4>;
220 clocks = <&osc24M>; 223 clocks = <&osc24M>;
221 }; 224 };
222 225
@@ -228,7 +231,7 @@
228 uart0: serial@01c28000 { 231 uart0: serial@01c28000 {
229 compatible = "snps,dw-apb-uart"; 232 compatible = "snps,dw-apb-uart";
230 reg = <0x01c28000 0x400>; 233 reg = <0x01c28000 0x400>;
231 interrupts = <0 0 1>; 234 interrupts = <0 0 4>;
232 reg-shift = <2>; 235 reg-shift = <2>;
233 reg-io-width = <4>; 236 reg-io-width = <4>;
234 clocks = <&apb2_gates 16>; 237 clocks = <&apb2_gates 16>;
@@ -238,7 +241,7 @@
238 uart1: serial@01c28400 { 241 uart1: serial@01c28400 {
239 compatible = "snps,dw-apb-uart"; 242 compatible = "snps,dw-apb-uart";
240 reg = <0x01c28400 0x400>; 243 reg = <0x01c28400 0x400>;
241 interrupts = <0 1 1>; 244 interrupts = <0 1 4>;
242 reg-shift = <2>; 245 reg-shift = <2>;
243 reg-io-width = <4>; 246 reg-io-width = <4>;
244 clocks = <&apb2_gates 17>; 247 clocks = <&apb2_gates 17>;
@@ -248,7 +251,7 @@
248 uart2: serial@01c28800 { 251 uart2: serial@01c28800 {
249 compatible = "snps,dw-apb-uart"; 252 compatible = "snps,dw-apb-uart";
250 reg = <0x01c28800 0x400>; 253 reg = <0x01c28800 0x400>;
251 interrupts = <0 2 1>; 254 interrupts = <0 2 4>;
252 reg-shift = <2>; 255 reg-shift = <2>;
253 reg-io-width = <4>; 256 reg-io-width = <4>;
254 clocks = <&apb2_gates 18>; 257 clocks = <&apb2_gates 18>;
@@ -258,7 +261,7 @@
258 uart3: serial@01c28c00 { 261 uart3: serial@01c28c00 {
259 compatible = "snps,dw-apb-uart"; 262 compatible = "snps,dw-apb-uart";
260 reg = <0x01c28c00 0x400>; 263 reg = <0x01c28c00 0x400>;
261 interrupts = <0 3 1>; 264 interrupts = <0 3 4>;
262 reg-shift = <2>; 265 reg-shift = <2>;
263 reg-io-width = <4>; 266 reg-io-width = <4>;
264 clocks = <&apb2_gates 19>; 267 clocks = <&apb2_gates 19>;
@@ -268,7 +271,7 @@
268 uart4: serial@01c29000 { 271 uart4: serial@01c29000 {
269 compatible = "snps,dw-apb-uart"; 272 compatible = "snps,dw-apb-uart";
270 reg = <0x01c29000 0x400>; 273 reg = <0x01c29000 0x400>;
271 interrupts = <0 4 1>; 274 interrupts = <0 4 4>;
272 reg-shift = <2>; 275 reg-shift = <2>;
273 reg-io-width = <4>; 276 reg-io-width = <4>;
274 clocks = <&apb2_gates 20>; 277 clocks = <&apb2_gates 20>;
@@ -278,7 +281,7 @@
278 uart5: serial@01c29400 { 281 uart5: serial@01c29400 {
279 compatible = "snps,dw-apb-uart"; 282 compatible = "snps,dw-apb-uart";
280 reg = <0x01c29400 0x400>; 283 reg = <0x01c29400 0x400>;
281 interrupts = <0 5 1>; 284 interrupts = <0 5 4>;
282 reg-shift = <2>; 285 reg-shift = <2>;
283 reg-io-width = <4>; 286 reg-io-width = <4>;
284 clocks = <&apb2_gates 21>; 287 clocks = <&apb2_gates 21>;
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index e46cfedde74c..367611a0730b 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -170,7 +170,7 @@
170 emac: ethernet@01c0b000 { 170 emac: ethernet@01c0b000 {
171 compatible = "allwinner,sun4i-emac"; 171 compatible = "allwinner,sun4i-emac";
172 reg = <0x01c0b000 0x1000>; 172 reg = <0x01c0b000 0x1000>;
173 interrupts = <0 55 1>; 173 interrupts = <0 55 4>;
174 clocks = <&ahb_gates 17>; 174 clocks = <&ahb_gates 17>;
175 status = "disabled"; 175 status = "disabled";
176 }; 176 };
@@ -186,7 +186,7 @@
186 pio: pinctrl@01c20800 { 186 pio: pinctrl@01c20800 {
187 compatible = "allwinner,sun7i-a20-pinctrl"; 187 compatible = "allwinner,sun7i-a20-pinctrl";
188 reg = <0x01c20800 0x400>; 188 reg = <0x01c20800 0x400>;
189 interrupts = <0 28 1>; 189 interrupts = <0 28 4>;
190 clocks = <&apb0_gates 5>; 190 clocks = <&apb0_gates 5>;
191 gpio-controller; 191 gpio-controller;
192 interrupt-controller; 192 interrupt-controller;
@@ -251,12 +251,12 @@
251 timer@01c20c00 { 251 timer@01c20c00 {
252 compatible = "allwinner,sun4i-timer"; 252 compatible = "allwinner,sun4i-timer";
253 reg = <0x01c20c00 0x90>; 253 reg = <0x01c20c00 0x90>;
254 interrupts = <0 22 1>, 254 interrupts = <0 22 4>,
255 <0 23 1>, 255 <0 23 4>,
256 <0 24 1>, 256 <0 24 4>,
257 <0 25 1>, 257 <0 25 4>,
258 <0 67 1>, 258 <0 67 4>,
259 <0 68 1>; 259 <0 68 4>;
260 clocks = <&osc24M>; 260 clocks = <&osc24M>;
261 }; 261 };
262 262
@@ -273,7 +273,7 @@
273 uart0: serial@01c28000 { 273 uart0: serial@01c28000 {
274 compatible = "snps,dw-apb-uart"; 274 compatible = "snps,dw-apb-uart";
275 reg = <0x01c28000 0x400>; 275 reg = <0x01c28000 0x400>;
276 interrupts = <0 1 1>; 276 interrupts = <0 1 4>;
277 reg-shift = <2>; 277 reg-shift = <2>;
278 reg-io-width = <4>; 278 reg-io-width = <4>;
279 clocks = <&apb1_gates 16>; 279 clocks = <&apb1_gates 16>;
@@ -283,7 +283,7 @@
283 uart1: serial@01c28400 { 283 uart1: serial@01c28400 {
284 compatible = "snps,dw-apb-uart"; 284 compatible = "snps,dw-apb-uart";
285 reg = <0x01c28400 0x400>; 285 reg = <0x01c28400 0x400>;
286 interrupts = <0 2 1>; 286 interrupts = <0 2 4>;
287 reg-shift = <2>; 287 reg-shift = <2>;
288 reg-io-width = <4>; 288 reg-io-width = <4>;
289 clocks = <&apb1_gates 17>; 289 clocks = <&apb1_gates 17>;
@@ -293,7 +293,7 @@
293 uart2: serial@01c28800 { 293 uart2: serial@01c28800 {
294 compatible = "snps,dw-apb-uart"; 294 compatible = "snps,dw-apb-uart";
295 reg = <0x01c28800 0x400>; 295 reg = <0x01c28800 0x400>;
296 interrupts = <0 3 1>; 296 interrupts = <0 3 4>;
297 reg-shift = <2>; 297 reg-shift = <2>;
298 reg-io-width = <4>; 298 reg-io-width = <4>;
299 clocks = <&apb1_gates 18>; 299 clocks = <&apb1_gates 18>;
@@ -303,7 +303,7 @@
303 uart3: serial@01c28c00 { 303 uart3: serial@01c28c00 {
304 compatible = "snps,dw-apb-uart"; 304 compatible = "snps,dw-apb-uart";
305 reg = <0x01c28c00 0x400>; 305 reg = <0x01c28c00 0x400>;
306 interrupts = <0 4 1>; 306 interrupts = <0 4 4>;
307 reg-shift = <2>; 307 reg-shift = <2>;
308 reg-io-width = <4>; 308 reg-io-width = <4>;
309 clocks = <&apb1_gates 19>; 309 clocks = <&apb1_gates 19>;
@@ -313,7 +313,7 @@
313 uart4: serial@01c29000 { 313 uart4: serial@01c29000 {
314 compatible = "snps,dw-apb-uart"; 314 compatible = "snps,dw-apb-uart";
315 reg = <0x01c29000 0x400>; 315 reg = <0x01c29000 0x400>;
316 interrupts = <0 17 1>; 316 interrupts = <0 17 4>;
317 reg-shift = <2>; 317 reg-shift = <2>;
318 reg-io-width = <4>; 318 reg-io-width = <4>;
319 clocks = <&apb1_gates 20>; 319 clocks = <&apb1_gates 20>;
@@ -323,7 +323,7 @@
323 uart5: serial@01c29400 { 323 uart5: serial@01c29400 {
324 compatible = "snps,dw-apb-uart"; 324 compatible = "snps,dw-apb-uart";
325 reg = <0x01c29400 0x400>; 325 reg = <0x01c29400 0x400>;
326 interrupts = <0 18 1>; 326 interrupts = <0 18 4>;
327 reg-shift = <2>; 327 reg-shift = <2>;
328 reg-io-width = <4>; 328 reg-io-width = <4>;
329 clocks = <&apb1_gates 21>; 329 clocks = <&apb1_gates 21>;
@@ -333,7 +333,7 @@
333 uart6: serial@01c29800 { 333 uart6: serial@01c29800 {
334 compatible = "snps,dw-apb-uart"; 334 compatible = "snps,dw-apb-uart";
335 reg = <0x01c29800 0x400>; 335 reg = <0x01c29800 0x400>;
336 interrupts = <0 19 1>; 336 interrupts = <0 19 4>;
337 reg-shift = <2>; 337 reg-shift = <2>;
338 reg-io-width = <4>; 338 reg-io-width = <4>;
339 clocks = <&apb1_gates 22>; 339 clocks = <&apb1_gates 22>;
@@ -343,7 +343,7 @@
343 uart7: serial@01c29c00 { 343 uart7: serial@01c29c00 {
344 compatible = "snps,dw-apb-uart"; 344 compatible = "snps,dw-apb-uart";
345 reg = <0x01c29c00 0x400>; 345 reg = <0x01c29c00 0x400>;
346 interrupts = <0 20 1>; 346 interrupts = <0 20 4>;
347 reg-shift = <2>; 347 reg-shift = <2>;
348 reg-io-width = <4>; 348 reg-io-width = <4>;
349 clocks = <&apb1_gates 23>; 349 clocks = <&apb1_gates 23>;
@@ -353,7 +353,7 @@
353 i2c0: i2c@01c2ac00 { 353 i2c0: i2c@01c2ac00 {
354 compatible = "allwinner,sun4i-i2c"; 354 compatible = "allwinner,sun4i-i2c";
355 reg = <0x01c2ac00 0x400>; 355 reg = <0x01c2ac00 0x400>;
356 interrupts = <0 7 1>; 356 interrupts = <0 7 4>;
357 clocks = <&apb1_gates 0>; 357 clocks = <&apb1_gates 0>;
358 clock-frequency = <100000>; 358 clock-frequency = <100000>;
359 status = "disabled"; 359 status = "disabled";
@@ -362,7 +362,7 @@
362 i2c1: i2c@01c2b000 { 362 i2c1: i2c@01c2b000 {
363 compatible = "allwinner,sun4i-i2c"; 363 compatible = "allwinner,sun4i-i2c";
364 reg = <0x01c2b000 0x400>; 364 reg = <0x01c2b000 0x400>;
365 interrupts = <0 8 1>; 365 interrupts = <0 8 4>;
366 clocks = <&apb1_gates 1>; 366 clocks = <&apb1_gates 1>;
367 clock-frequency = <100000>; 367 clock-frequency = <100000>;
368 status = "disabled"; 368 status = "disabled";
@@ -371,7 +371,7 @@
371 i2c2: i2c@01c2b400 { 371 i2c2: i2c@01c2b400 {
372 compatible = "allwinner,sun4i-i2c"; 372 compatible = "allwinner,sun4i-i2c";
373 reg = <0x01c2b400 0x400>; 373 reg = <0x01c2b400 0x400>;
374 interrupts = <0 9 1>; 374 interrupts = <0 9 4>;
375 clocks = <&apb1_gates 2>; 375 clocks = <&apb1_gates 2>;
376 clock-frequency = <100000>; 376 clock-frequency = <100000>;
377 status = "disabled"; 377 status = "disabled";
@@ -380,7 +380,7 @@
380 i2c3: i2c@01c2b800 { 380 i2c3: i2c@01c2b800 {
381 compatible = "allwinner,sun4i-i2c"; 381 compatible = "allwinner,sun4i-i2c";
382 reg = <0x01c2b800 0x400>; 382 reg = <0x01c2b800 0x400>;
383 interrupts = <0 88 1>; 383 interrupts = <0 88 4>;
384 clocks = <&apb1_gates 3>; 384 clocks = <&apb1_gates 3>;
385 clock-frequency = <100000>; 385 clock-frequency = <100000>;
386 status = "disabled"; 386 status = "disabled";
@@ -389,7 +389,7 @@
389 i2c4: i2c@01c2bc00 { 389 i2c4: i2c@01c2bc00 {
390 compatible = "allwinner,sun4i-i2c"; 390 compatible = "allwinner,sun4i-i2c";
391 reg = <0x01c2bc00 0x400>; 391 reg = <0x01c2bc00 0x400>;
392 interrupts = <0 89 1>; 392 interrupts = <0 89 4>;
393 clocks = <&apb1_gates 15>; 393 clocks = <&apb1_gates 15>;
394 clock-frequency = <100000>; 394 clock-frequency = <100000>;
395 status = "disabled"; 395 status = "disabled";
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 4a5903e04827..c1df4e9db140 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -69,6 +69,7 @@ CONFIG_KS8851=y
69CONFIG_SMSC911X=y 69CONFIG_SMSC911X=y
70CONFIG_STMMAC_ETH=y 70CONFIG_STMMAC_ETH=y
71CONFIG_MDIO_SUN4I=y 71CONFIG_MDIO_SUN4I=y
72CONFIG_TI_CPSW=y
72CONFIG_KEYBOARD_SPEAR=y 73CONFIG_KEYBOARD_SPEAR=y
73CONFIG_SERIO_AMBAKMI=y 74CONFIG_SERIO_AMBAKMI=y
74CONFIG_SERIAL_8250=y 75CONFIG_SERIAL_8250=y
@@ -133,12 +134,14 @@ CONFIG_USB_GPIO_VBUS=y
133CONFIG_USB_ISP1301=y 134CONFIG_USB_ISP1301=y
134CONFIG_USB_MXS_PHY=y 135CONFIG_USB_MXS_PHY=y
135CONFIG_MMC=y 136CONFIG_MMC=y
137CONFIG_MMC_BLOCK_MINORS=16
136CONFIG_MMC_ARMMMCI=y 138CONFIG_MMC_ARMMMCI=y
137CONFIG_MMC_SDHCI=y 139CONFIG_MMC_SDHCI=y
138CONFIG_MMC_SDHCI_PLTFM=y 140CONFIG_MMC_SDHCI_PLTFM=y
139CONFIG_MMC_SDHCI_ESDHC_IMX=y 141CONFIG_MMC_SDHCI_ESDHC_IMX=y
140CONFIG_MMC_SDHCI_TEGRA=y 142CONFIG_MMC_SDHCI_TEGRA=y
141CONFIG_MMC_SDHCI_SPEAR=y 143CONFIG_MMC_SDHCI_SPEAR=y
144CONFIG_MMC_SDHCI_BCM_KONA=y
142CONFIG_MMC_OMAP=y 145CONFIG_MMC_OMAP=y
143CONFIG_MMC_OMAP_HS=y 146CONFIG_MMC_OMAP_HS=y
144CONFIG_EDAC=y 147CONFIG_EDAC=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 98a50c309b90..bfa80a11e8c7 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -173,6 +173,7 @@ CONFIG_MFD_PALMAS=y
173CONFIG_MFD_TPS65217=y 173CONFIG_MFD_TPS65217=y
174CONFIG_MFD_TPS65910=y 174CONFIG_MFD_TPS65910=y
175CONFIG_TWL6040_CORE=y 175CONFIG_TWL6040_CORE=y
176CONFIG_REGULATOR_FIXED_VOLTAGE=y
176CONFIG_REGULATOR_PALMAS=y 177CONFIG_REGULATOR_PALMAS=y
177CONFIG_REGULATOR_TPS65023=y 178CONFIG_REGULATOR_TPS65023=y
178CONFIG_REGULATOR_TPS6507X=y 179CONFIG_REGULATOR_TPS6507X=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index d57a85badb5e..3e2259b60236 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -12,6 +12,9 @@ CONFIG_NET=y
12CONFIG_PACKET=y 12CONFIG_PACKET=y
13CONFIG_UNIX=y 13CONFIG_UNIX=y
14CONFIG_INET=y 14CONFIG_INET=y
15CONFIG_IP_PNP=y
16CONFIG_IP_PNP_DHCP=y
17CONFIG_IP_PNP_BOOTP=y
15# CONFIG_INET_XFRM_MODE_TRANSPORT is not set 18# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
16# CONFIG_INET_XFRM_MODE_TUNNEL is not set 19# CONFIG_INET_XFRM_MODE_TUNNEL is not set
17# CONFIG_INET_XFRM_MODE_BEET is not set 20# CONFIG_INET_XFRM_MODE_BEET is not set
@@ -58,4 +61,8 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y
58CONFIG_LEDS_TRIGGER_DEFAULT_ON=y 61CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
59CONFIG_COMMON_CLK_DEBUG=y 62CONFIG_COMMON_CLK_DEBUG=y
60# CONFIG_IOMMU_SUPPORT is not set 63# CONFIG_IOMMU_SUPPORT is not set
64CONFIG_TMPFS=y
65CONFIG_NFS_FS=y
66CONFIG_ROOT_NFS=y
61CONFIG_NLS=y 67CONFIG_NLS=y
68CONFIG_PRINTK_TIME=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index ac632cc38f24..c6ebc184bf68 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -22,6 +22,7 @@ CONFIG_CMDLINE="root=/dev/ram0 console=ttyAMA2,115200n8"
22CONFIG_CPU_FREQ=y 22CONFIG_CPU_FREQ=y
23CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 23CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
24CONFIG_CPU_IDLE=y 24CONFIG_CPU_IDLE=y
25CONFIG_ARM_U8500_CPUIDLE=y
25CONFIG_VFP=y 26CONFIG_VFP=y
26CONFIG_NEON=y 27CONFIG_NEON=y
27CONFIG_PM_RUNTIME=y 28CONFIG_PM_RUNTIME=y
@@ -109,6 +110,8 @@ CONFIG_EXT2_FS_SECURITY=y
109CONFIG_EXT3_FS=y 110CONFIG_EXT3_FS=y
110CONFIG_EXT4_FS=y 111CONFIG_EXT4_FS=y
111CONFIG_VFAT_FS=y 112CONFIG_VFAT_FS=y
113CONFIG_DEVTMPFS=y
114CONFIG_DEVTMPFS_MOUNT=y
112CONFIG_TMPFS=y 115CONFIG_TMPFS=y
113CONFIG_TMPFS_POSIX_ACL=y 116CONFIG_TMPFS_POSIX_ACL=y
114# CONFIG_MISC_FILESYSTEMS is not set 117# CONFIG_MISC_FILESYSTEMS is not set
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped
index 64205d453260..71e5fc7cfb18 100644
--- a/arch/arm/crypto/aesbs-core.S_shipped
+++ b/arch/arm/crypto/aesbs-core.S_shipped
@@ -58,7 +58,7 @@
58# define VFP_ABI_FRAME 0 58# define VFP_ABI_FRAME 0
59# define BSAES_ASM_EXTENDED_KEY 59# define BSAES_ASM_EXTENDED_KEY
60# define XTS_CHAIN_TWEAK 60# define XTS_CHAIN_TWEAK
61# define __ARM_ARCH__ __LINUX_ARM_ARCH__ 61# define __ARM_ARCH__ 7
62#endif 62#endif
63 63
64#ifdef __thumb__ 64#ifdef __thumb__
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl
index f3d96d932573..be068db960ee 100644
--- a/arch/arm/crypto/bsaes-armv7.pl
+++ b/arch/arm/crypto/bsaes-armv7.pl
@@ -701,7 +701,7 @@ $code.=<<___;
701# define VFP_ABI_FRAME 0 701# define VFP_ABI_FRAME 0
702# define BSAES_ASM_EXTENDED_KEY 702# define BSAES_ASM_EXTENDED_KEY
703# define XTS_CHAIN_TWEAK 703# define XTS_CHAIN_TWEAK
704# define __ARM_ARCH__ __LINUX_ARM_ARCH__ 704# define __ARM_ARCH__ 7
705#endif 705#endif
706 706
707#ifdef __thumb__ 707#ifdef __thumb__
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 3c597c222ef2..fbeb39c869e9 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -329,7 +329,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
329 */ 329 */
330#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) 330#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
331#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) 331#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
332#define ioremap_cached(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) 332#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
333#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) 333#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC)
334#define iounmap __arm_iounmap 334#define iounmap __arm_iounmap
335 335
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 9ecccc865046..8756e4bcdba0 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -100,23 +100,19 @@
100#define TASK_UNMAPPED_BASE UL(0x00000000) 100#define TASK_UNMAPPED_BASE UL(0x00000000)
101#endif 101#endif
102 102
103#ifndef PHYS_OFFSET
104#define PHYS_OFFSET UL(CONFIG_DRAM_BASE)
105#endif
106
107#ifndef END_MEM 103#ifndef END_MEM
108#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) 104#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
109#endif 105#endif
110 106
111#ifndef PAGE_OFFSET 107#ifndef PAGE_OFFSET
112#define PAGE_OFFSET (PHYS_OFFSET) 108#define PAGE_OFFSET PLAT_PHYS_OFFSET
113#endif 109#endif
114 110
115/* 111/*
116 * The module can be at any place in ram in nommu mode. 112 * The module can be at any place in ram in nommu mode.
117 */ 113 */
118#define MODULES_END (END_MEM) 114#define MODULES_END (END_MEM)
119#define MODULES_VADDR (PHYS_OFFSET) 115#define MODULES_VADDR PAGE_OFFSET
120 116
121#define XIP_VIRT_ADDR(physaddr) (physaddr) 117#define XIP_VIRT_ADDR(physaddr) (physaddr)
122 118
@@ -157,6 +153,16 @@
157#endif 153#endif
158#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) 154#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
159 155
156/*
157 * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
158 * memory. This is used for XIP and NoMMU kernels, or by kernels which
159 * have their own mach/memory.h. Assembly code must always use
160 * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
161 */
162#ifndef PLAT_PHYS_OFFSET
163#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
164#endif
165
160#ifndef __ASSEMBLY__ 166#ifndef __ASSEMBLY__
161 167
162/* 168/*
@@ -239,6 +245,8 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
239 245
240#else 246#else
241 247
248#define PHYS_OFFSET PLAT_PHYS_OFFSET
249
242static inline phys_addr_t __virt_to_phys(unsigned long x) 250static inline phys_addr_t __virt_to_phys(unsigned long x)
243{ 251{
244 return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; 252 return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
@@ -251,17 +259,6 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
251 259
252#endif 260#endif
253#endif 261#endif
254#endif /* __ASSEMBLY__ */
255
256#ifndef PHYS_OFFSET
257#ifdef PLAT_PHYS_OFFSET
258#define PHYS_OFFSET PLAT_PHYS_OFFSET
259#else
260#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
261#endif
262#endif
263
264#ifndef __ASSEMBLY__
265 262
266/* 263/*
267 * PFNs are used to describe any physical page; this means 264 * PFNs are used to describe any physical page; this means
@@ -350,7 +347,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
350#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET 347#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
351 348
352#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 349#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
353#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) 350#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
351 && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) )
354 352
355#endif 353#endif
356 354
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index be956dbf6bae..1571d126e9dd 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -61,7 +61,7 @@ extern void __pgd_error(const char *file, int line, pgd_t);
61 * mapping to be mapped at. This is particularly important for 61 * mapping to be mapped at. This is particularly important for
62 * non-high vector CPUs. 62 * non-high vector CPUs.
63 */ 63 */
64#define FIRST_USER_ADDRESS PAGE_SIZE 64#define FIRST_USER_ADDRESS (PAGE_SIZE * 2)
65 65
66/* 66/*
67 * Use TASK_SIZE as the ceiling argument for free_pgtables() and 67 * Use TASK_SIZE as the ceiling argument for free_pgtables() and
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 75579a9d6f76..3759cacdd7f8 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -117,6 +117,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
117 return __set_phys_to_machine(pfn, mfn); 117 return __set_phys_to_machine(pfn, mfn);
118} 118}
119 119
120#define xen_remap(cookie, size) ioremap_cached((cookie), (size)); 120#define xen_remap(cookie, size) ioremap_cache((cookie), (size));
121 121
122#endif /* _ASM_ARM_XEN_PAGE_H */ 122#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 14235ba64a90..716249cc2ee1 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -68,7 +68,7 @@ ENTRY(stext)
68 68
69#ifdef CONFIG_ARM_MPU 69#ifdef CONFIG_ARM_MPU
70 /* Calculate the size of a region covering just the kernel */ 70 /* Calculate the size of a region covering just the kernel */
71 ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET 71 ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
72 ldr r6, =(_end) @ Cover whole kernel 72 ldr r6, =(_end) @ Cover whole kernel
73 sub r6, r6, r5 @ Minimum size of region to map 73 sub r6, r6, r5 @ Minimum size of region to map
74 clz r6, r6 @ Region size must be 2^N... 74 clz r6, r6 @ Region size must be 2^N...
@@ -213,7 +213,7 @@ ENTRY(__setup_mpu)
213 set_region_nr r0, #MPU_RAM_REGION 213 set_region_nr r0, #MPU_RAM_REGION
214 isb 214 isb
215 /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ 215 /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
216 ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET 216 ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
217 ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) 217 ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL)
218 218
219 setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled 219 setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 11d59b32fb8d..32f317e5828a 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -110,7 +110,7 @@ ENTRY(stext)
110 sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) 110 sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
111 add r8, r8, r4 @ PHYS_OFFSET 111 add r8, r8, r4 @ PHYS_OFFSET
112#else 112#else
113 ldr r8, =PHYS_OFFSET @ always constant in this case 113 ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
114#endif 114#endif
115 115
116 /* 116 /*
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 57221e349a7c..f0d180d8b29f 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -14,11 +14,12 @@
14#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>
15#include <asm/mmu_context.h> 15#include <asm/mmu_context.h>
16#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
17#include <asm/fncpy.h>
17#include <asm/mach-types.h> 18#include <asm/mach-types.h>
18#include <asm/smp_plat.h> 19#include <asm/smp_plat.h>
19#include <asm/system_misc.h> 20#include <asm/system_misc.h>
20 21
21extern const unsigned char relocate_new_kernel[]; 22extern void relocate_new_kernel(void);
22extern const unsigned int relocate_new_kernel_size; 23extern const unsigned int relocate_new_kernel_size;
23 24
24extern unsigned long kexec_start_address; 25extern unsigned long kexec_start_address;
@@ -142,6 +143,8 @@ void machine_kexec(struct kimage *image)
142{ 143{
143 unsigned long page_list; 144 unsigned long page_list;
144 unsigned long reboot_code_buffer_phys; 145 unsigned long reboot_code_buffer_phys;
146 unsigned long reboot_entry = (unsigned long)relocate_new_kernel;
147 unsigned long reboot_entry_phys;
145 void *reboot_code_buffer; 148 void *reboot_code_buffer;
146 149
147 /* 150 /*
@@ -168,16 +171,16 @@ void machine_kexec(struct kimage *image)
168 171
169 172
170 /* copy our kernel relocation code to the control code page */ 173 /* copy our kernel relocation code to the control code page */
171 memcpy(reboot_code_buffer, 174 reboot_entry = fncpy(reboot_code_buffer,
172 relocate_new_kernel, relocate_new_kernel_size); 175 reboot_entry,
176 relocate_new_kernel_size);
177 reboot_entry_phys = (unsigned long)reboot_entry +
178 (reboot_code_buffer_phys - (unsigned long)reboot_code_buffer);
173 179
174
175 flush_icache_range((unsigned long) reboot_code_buffer,
176 (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
177 printk(KERN_INFO "Bye!\n"); 180 printk(KERN_INFO "Bye!\n");
178 181
179 if (kexec_reinit) 182 if (kexec_reinit)
180 kexec_reinit(); 183 kexec_reinit();
181 184
182 soft_restart(reboot_code_buffer_phys); 185 soft_restart(reboot_entry_phys);
183} 186}
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 94f6b05f9e24..92f7b15dd221 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -404,6 +404,7 @@ EXPORT_SYMBOL(dump_fpu);
404unsigned long get_wchan(struct task_struct *p) 404unsigned long get_wchan(struct task_struct *p)
405{ 405{
406 struct stackframe frame; 406 struct stackframe frame;
407 unsigned long stack_page;
407 int count = 0; 408 int count = 0;
408 if (!p || p == current || p->state == TASK_RUNNING) 409 if (!p || p == current || p->state == TASK_RUNNING)
409 return 0; 410 return 0;
@@ -412,9 +413,11 @@ unsigned long get_wchan(struct task_struct *p)
412 frame.sp = thread_saved_sp(p); 413 frame.sp = thread_saved_sp(p);
413 frame.lr = 0; /* recovered from the stack */ 414 frame.lr = 0; /* recovered from the stack */
414 frame.pc = thread_saved_pc(p); 415 frame.pc = thread_saved_pc(p);
416 stack_page = (unsigned long)task_stack_page(p);
415 do { 417 do {
416 int ret = unwind_frame(&frame); 418 if (frame.sp < stack_page ||
417 if (ret < 0) 419 frame.sp >= stack_page + THREAD_SIZE ||
420 unwind_frame(&frame) < 0)
418 return 0; 421 return 0;
419 if (!in_sched_functions(frame.pc)) 422 if (!in_sched_functions(frame.pc))
420 return frame.pc; 423 return frame.pc;
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index d0cdedf4864d..95858966d84e 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -2,10 +2,12 @@
2 * relocate_kernel.S - put the kernel image in place to boot 2 * relocate_kernel.S - put the kernel image in place to boot
3 */ 3 */
4 4
5#include <linux/linkage.h>
5#include <asm/kexec.h> 6#include <asm/kexec.h>
6 7
7 .globl relocate_new_kernel 8 .align 3 /* not needed for this code, but keeps fncpy() happy */
8relocate_new_kernel: 9
10ENTRY(relocate_new_kernel)
9 11
10 ldr r0,kexec_indirection_page 12 ldr r0,kexec_indirection_page
11 ldr r1,kexec_start_address 13 ldr r1,kexec_start_address
@@ -79,6 +81,8 @@ kexec_mach_type:
79kexec_boot_atags: 81kexec_boot_atags:
80 .long 0x0 82 .long 0x0
81 83
84ENDPROC(relocate_new_kernel)
85
82relocate_new_kernel_end: 86relocate_new_kernel_end:
83 87
84 .globl relocate_new_kernel_size 88 .globl relocate_new_kernel_size
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 6a1b8a81b1ae..987a7f5bce5f 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -873,8 +873,6 @@ void __init setup_arch(char **cmdline_p)
873 machine_desc = mdesc; 873 machine_desc = mdesc;
874 machine_name = mdesc->name; 874 machine_name = mdesc->name;
875 875
876 setup_dma_zone(mdesc);
877
878 if (mdesc->reboot_mode != REBOOT_HARD) 876 if (mdesc->reboot_mode != REBOOT_HARD)
879 reboot_mode = mdesc->reboot_mode; 877 reboot_mode = mdesc->reboot_mode;
880 878
@@ -892,6 +890,7 @@ void __init setup_arch(char **cmdline_p)
892 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); 890 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
893 891
894 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id())); 892 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
893 setup_dma_zone(mdesc);
895 sanity_check_meminfo(); 894 sanity_check_meminfo();
896 arm_memblock_init(&meminfo, mdesc); 895 arm_memblock_init(&meminfo, mdesc);
897 896
diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S
index 3c5d0f2170fd..b84d0cb13682 100644
--- a/arch/arm/kernel/sigreturn_codes.S
+++ b/arch/arm/kernel/sigreturn_codes.S
@@ -30,6 +30,27 @@
30 * snippets. 30 * snippets.
31 */ 31 */
32 32
33/*
34 * In CPU_THUMBONLY case kernel arm opcodes are not allowed.
35 * Note in this case codes skips those instructions but it uses .org
36 * directive to keep correct layout of sigreturn_codes array.
37 */
38#ifndef CONFIG_CPU_THUMBONLY
39#define ARM_OK(code...) code
40#else
41#define ARM_OK(code...)
42#endif
43
44 .macro arm_slot n
45 .org sigreturn_codes + 12 * (\n)
46ARM_OK( .arm )
47 .endm
48
49 .macro thumb_slot n
50 .org sigreturn_codes + 12 * (\n) + 8
51 .thumb
52 .endm
53
33#if __LINUX_ARM_ARCH__ <= 4 54#if __LINUX_ARM_ARCH__ <= 4
34 /* 55 /*
35 * Note we manually set minimally required arch that supports 56 * Note we manually set minimally required arch that supports
@@ -45,26 +66,27 @@
45 .global sigreturn_codes 66 .global sigreturn_codes
46 .type sigreturn_codes, #object 67 .type sigreturn_codes, #object
47 68
48 .arm 69 .align
49 70
50sigreturn_codes: 71sigreturn_codes:
51 72
52 /* ARM sigreturn syscall code snippet */ 73 /* ARM sigreturn syscall code snippet */
53 mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) 74 arm_slot 0
54 swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE) 75ARM_OK( mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) )
76ARM_OK( swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE) )
55 77
56 /* Thumb sigreturn syscall code snippet */ 78 /* Thumb sigreturn syscall code snippet */
57 .thumb 79 thumb_slot 0
58 movs r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) 80 movs r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
59 swi #0 81 swi #0
60 82
61 /* ARM sigreturn_rt syscall code snippet */ 83 /* ARM sigreturn_rt syscall code snippet */
62 .arm 84 arm_slot 1
63 mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) 85ARM_OK( mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) )
64 swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE) 86ARM_OK( swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE) )
65 87
66 /* Thumb sigreturn_rt syscall code snippet */ 88 /* Thumb sigreturn_rt syscall code snippet */
67 .thumb 89 thumb_slot 1
68 movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) 90 movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
69 swi #0 91 swi #0
70 92
@@ -74,7 +96,7 @@ sigreturn_codes:
74 * it is thumb case or not, so we need additional 96 * it is thumb case or not, so we need additional
75 * word after real last entry. 97 * word after real last entry.
76 */ 98 */
77 .arm 99 arm_slot 2
78 .space 4 100 .space 4
79 101
80 .size sigreturn_codes, . - sigreturn_codes 102 .size sigreturn_codes, . - sigreturn_codes
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 00f79e59985b..af4e8c8a5422 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame)
31 high = ALIGN(low, THREAD_SIZE); 31 high = ALIGN(low, THREAD_SIZE);
32 32
33 /* check current frame pointer is within bounds */ 33 /* check current frame pointer is within bounds */
34 if (fp < (low + 12) || fp + 4 >= high) 34 if (fp < low + 12 || fp > high - 4)
35 return -EINVAL; 35 return -EINVAL;
36 36
37 /* restore the registers from the stack frame */ 37 /* restore the registers from the stack frame */
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index dbf0923e8d76..6eda3bf85c52 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -36,7 +36,13 @@
36#include <asm/system_misc.h> 36#include <asm/system_misc.h>
37#include <asm/opcodes.h> 37#include <asm/opcodes.h>
38 38
39static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; 39static const char *handler[]= {
40 "prefetch abort",
41 "data abort",
42 "address exception",
43 "interrupt",
44 "undefined instruction",
45};
40 46
41void *vectors_page; 47void *vectors_page;
42 48
@@ -509,9 +515,10 @@ static inline int
509__do_cache_op(unsigned long start, unsigned long end) 515__do_cache_op(unsigned long start, unsigned long end)
510{ 516{
511 int ret; 517 int ret;
512 unsigned long chunk = PAGE_SIZE;
513 518
514 do { 519 do {
520 unsigned long chunk = min(PAGE_SIZE, end - start);
521
515 if (signal_pending(current)) { 522 if (signal_pending(current)) {
516 struct thread_info *ti = current_thread_info(); 523 struct thread_info *ti = current_thread_info();
517 524
diff --git a/arch/arm/lib/delay-loop.S b/arch/arm/lib/delay-loop.S
index 36b668d8e121..bc1033b897b4 100644
--- a/arch/arm/lib/delay-loop.S
+++ b/arch/arm/lib/delay-loop.S
@@ -40,6 +40,7 @@ ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06
40/* 40/*
41 * loops = r0 * HZ * loops_per_jiffy / 1000000 41 * loops = r0 * HZ * loops_per_jiffy / 1000000
42 */ 42 */
43 .align 3
43 44
44@ Delay routine 45@ Delay routine
45ENTRY(__loop_delay) 46ENTRY(__loop_delay)
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index f607deb40f4d..bc7b363a3083 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -174,7 +174,6 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev)
174static struct clock_event_device clkevt = { 174static struct clock_event_device clkevt = {
175 .name = "at91_tick", 175 .name = "at91_tick",
176 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 176 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
177 .shift = 32,
178 .rating = 150, 177 .rating = 150,
179 .set_next_event = clkevt32k_next_event, 178 .set_next_event = clkevt32k_next_event,
180 .set_mode = clkevt32k_mode, 179 .set_mode = clkevt32k_mode,
@@ -265,11 +264,9 @@ void __init at91rm9200_timer_init(void)
265 at91_st_write(AT91_ST_RTMR, 1); 264 at91_st_write(AT91_ST_RTMR, 1);
266 265
267 /* Setup timer clockevent, with minimum of two ticks (important!!) */ 266 /* Setup timer clockevent, with minimum of two ticks (important!!) */
268 clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift);
269 clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt);
270 clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1;
271 clkevt.cpumask = cpumask_of(0); 267 clkevt.cpumask = cpumask_of(0);
272 clockevents_register_device(&clkevt); 268 clockevents_config_and_register(&clkevt, AT91_SLOW_CLOCK,
269 2, AT91_ST_ALMV);
273 270
274 /* register clocksource */ 271 /* register clocksource */
275 clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK); 272 clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK);
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
index 3ed190ce062b..c5101dcb4fb0 100644
--- a/arch/arm/mach-at91/pm.h
+++ b/arch/arm/mach-at91/pm.h
@@ -16,7 +16,11 @@
16#include <mach/at91_ramc.h> 16#include <mach/at91_ramc.h>
17#include <mach/at91rm9200_sdramc.h> 17#include <mach/at91rm9200_sdramc.h>
18 18
19#ifdef CONFIG_PM
19extern void at91_pm_set_standby(void (*at91_standby)(void)); 20extern void at91_pm_set_standby(void (*at91_standby)(void));
21#else
22static inline void at91_pm_set_standby(void (*at91_standby)(void)) { }
23#endif
20 24
21/* 25/*
22 * The AT91RM9200 goes into self-refresh mode with this command, and will 26 * The AT91RM9200 goes into self-refresh mode with this command, and will
diff --git a/arch/arm/mach-at91/sama5d3.c b/arch/arm/mach-at91/sama5d3.c
index 3ea86428ee09..a28873fe3049 100644
--- a/arch/arm/mach-at91/sama5d3.c
+++ b/arch/arm/mach-at91/sama5d3.c
@@ -95,19 +95,19 @@ static struct clk twi0_clk = {
95 .name = "twi0_clk", 95 .name = "twi0_clk",
96 .pid = SAMA5D3_ID_TWI0, 96 .pid = SAMA5D3_ID_TWI0,
97 .type = CLK_TYPE_PERIPHERAL, 97 .type = CLK_TYPE_PERIPHERAL,
98 .div = AT91_PMC_PCR_DIV2, 98 .div = AT91_PMC_PCR_DIV8,
99}; 99};
100static struct clk twi1_clk = { 100static struct clk twi1_clk = {
101 .name = "twi1_clk", 101 .name = "twi1_clk",
102 .pid = SAMA5D3_ID_TWI1, 102 .pid = SAMA5D3_ID_TWI1,
103 .type = CLK_TYPE_PERIPHERAL, 103 .type = CLK_TYPE_PERIPHERAL,
104 .div = AT91_PMC_PCR_DIV2, 104 .div = AT91_PMC_PCR_DIV8,
105}; 105};
106static struct clk twi2_clk = { 106static struct clk twi2_clk = {
107 .name = "twi2_clk", 107 .name = "twi2_clk",
108 .pid = SAMA5D3_ID_TWI2, 108 .pid = SAMA5D3_ID_TWI2,
109 .type = CLK_TYPE_PERIPHERAL, 109 .type = CLK_TYPE_PERIPHERAL,
110 .div = AT91_PMC_PCR_DIV2, 110 .div = AT91_PMC_PCR_DIV8,
111}; 111};
112static struct clk mmc0_clk = { 112static struct clk mmc0_clk = {
113 .name = "mci0_clk", 113 .name = "mci0_clk",
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index c46eccbbd512..78829c513fdc 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -487,7 +487,7 @@ int __init da8xx_register_emac(void)
487 487
488static struct resource da830_mcasp1_resources[] = { 488static struct resource da830_mcasp1_resources[] = {
489 { 489 {
490 .name = "mcasp1", 490 .name = "mpu",
491 .start = DAVINCI_DA830_MCASP1_REG_BASE, 491 .start = DAVINCI_DA830_MCASP1_REG_BASE,
492 .end = DAVINCI_DA830_MCASP1_REG_BASE + (SZ_1K * 12) - 1, 492 .end = DAVINCI_DA830_MCASP1_REG_BASE + (SZ_1K * 12) - 1,
493 .flags = IORESOURCE_MEM, 493 .flags = IORESOURCE_MEM,
@@ -515,7 +515,7 @@ static struct platform_device da830_mcasp1_device = {
515 515
516static struct resource da850_mcasp_resources[] = { 516static struct resource da850_mcasp_resources[] = {
517 { 517 {
518 .name = "mcasp", 518 .name = "mpu",
519 .start = DAVINCI_DA8XX_MCASP0_REG_BASE, 519 .start = DAVINCI_DA8XX_MCASP0_REG_BASE,
520 .end = DAVINCI_DA8XX_MCASP0_REG_BASE + (SZ_1K * 12) - 1, 520 .end = DAVINCI_DA8XX_MCASP0_REG_BASE + (SZ_1K * 12) - 1,
521 .flags = IORESOURCE_MEM, 521 .flags = IORESOURCE_MEM,
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index ef9ff1fb6f52..6117fc644188 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -641,6 +641,7 @@ static struct platform_device dm355_edma_device = {
641 641
642static struct resource dm355_asp1_resources[] = { 642static struct resource dm355_asp1_resources[] = {
643 { 643 {
644 .name = "mpu",
644 .start = DAVINCI_ASP1_BASE, 645 .start = DAVINCI_ASP1_BASE,
645 .end = DAVINCI_ASP1_BASE + SZ_8K - 1, 646 .end = DAVINCI_ASP1_BASE + SZ_8K - 1,
646 .flags = IORESOURCE_MEM, 647 .flags = IORESOURCE_MEM,
@@ -906,7 +907,7 @@ static struct davinci_gpio_platform_data dm355_gpio_platform_data = {
906int __init dm355_gpio_register(void) 907int __init dm355_gpio_register(void)
907{ 908{
908 return davinci_gpio_register(dm355_gpio_resources, 909 return davinci_gpio_register(dm355_gpio_resources,
909 sizeof(dm355_gpio_resources), 910 ARRAY_SIZE(dm355_gpio_resources),
910 &dm355_gpio_platform_data); 911 &dm355_gpio_platform_data);
911} 912}
912/*----------------------------------------------------------------------*/ 913/*----------------------------------------------------------------------*/
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 1511a0680f9a..d7c6f85d3fc9 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -720,7 +720,7 @@ static struct davinci_gpio_platform_data dm365_gpio_platform_data = {
720int __init dm365_gpio_register(void) 720int __init dm365_gpio_register(void)
721{ 721{
722 return davinci_gpio_register(dm365_gpio_resources, 722 return davinci_gpio_register(dm365_gpio_resources,
723 sizeof(dm365_gpio_resources), 723 ARRAY_SIZE(dm365_gpio_resources),
724 &dm365_gpio_platform_data); 724 &dm365_gpio_platform_data);
725} 725}
726 726
@@ -942,6 +942,7 @@ static struct platform_device dm365_edma_device = {
942 942
943static struct resource dm365_asp_resources[] = { 943static struct resource dm365_asp_resources[] = {
944 { 944 {
945 .name = "mpu",
945 .start = DAVINCI_DM365_ASP0_BASE, 946 .start = DAVINCI_DM365_ASP0_BASE,
946 .end = DAVINCI_DM365_ASP0_BASE + SZ_8K - 1, 947 .end = DAVINCI_DM365_ASP0_BASE + SZ_8K - 1,
947 .flags = IORESOURCE_MEM, 948 .flags = IORESOURCE_MEM,
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 143a3217e8ef..3ce47997bb46 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -572,6 +572,7 @@ static struct platform_device dm644x_edma_device = {
572/* DM6446 EVM uses ASP0; line-out is a pair of RCA jacks */ 572/* DM6446 EVM uses ASP0; line-out is a pair of RCA jacks */
573static struct resource dm644x_asp_resources[] = { 573static struct resource dm644x_asp_resources[] = {
574 { 574 {
575 .name = "mpu",
575 .start = DAVINCI_ASP0_BASE, 576 .start = DAVINCI_ASP0_BASE,
576 .end = DAVINCI_ASP0_BASE + SZ_8K - 1, 577 .end = DAVINCI_ASP0_BASE + SZ_8K - 1,
577 .flags = IORESOURCE_MEM, 578 .flags = IORESOURCE_MEM,
@@ -792,7 +793,7 @@ static struct davinci_gpio_platform_data dm644_gpio_platform_data = {
792int __init dm644x_gpio_register(void) 793int __init dm644x_gpio_register(void)
793{ 794{
794 return davinci_gpio_register(dm644_gpio_resources, 795 return davinci_gpio_register(dm644_gpio_resources,
795 sizeof(dm644_gpio_resources), 796 ARRAY_SIZE(dm644_gpio_resources),
796 &dm644_gpio_platform_data); 797 &dm644_gpio_platform_data);
797} 798}
798/*----------------------------------------------------------------------*/ 799/*----------------------------------------------------------------------*/
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 2a73f299c1d0..0e81fea65e7f 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -621,7 +621,7 @@ static struct platform_device dm646x_edma_device = {
621 621
622static struct resource dm646x_mcasp0_resources[] = { 622static struct resource dm646x_mcasp0_resources[] = {
623 { 623 {
624 .name = "mcasp0", 624 .name = "mpu",
625 .start = DAVINCI_DM646X_MCASP0_REG_BASE, 625 .start = DAVINCI_DM646X_MCASP0_REG_BASE,
626 .end = DAVINCI_DM646X_MCASP0_REG_BASE + (SZ_1K << 1) - 1, 626 .end = DAVINCI_DM646X_MCASP0_REG_BASE + (SZ_1K << 1) - 1,
627 .flags = IORESOURCE_MEM, 627 .flags = IORESOURCE_MEM,
@@ -641,7 +641,7 @@ static struct resource dm646x_mcasp0_resources[] = {
641 641
642static struct resource dm646x_mcasp1_resources[] = { 642static struct resource dm646x_mcasp1_resources[] = {
643 { 643 {
644 .name = "mcasp1", 644 .name = "mpu",
645 .start = DAVINCI_DM646X_MCASP1_REG_BASE, 645 .start = DAVINCI_DM646X_MCASP1_REG_BASE,
646 .end = DAVINCI_DM646X_MCASP1_REG_BASE + (SZ_1K << 1) - 1, 646 .end = DAVINCI_DM646X_MCASP1_REG_BASE + (SZ_1K << 1) - 1,
647 .flags = IORESOURCE_MEM, 647 .flags = IORESOURCE_MEM,
@@ -769,7 +769,7 @@ static struct davinci_gpio_platform_data dm646x_gpio_platform_data = {
769int __init dm646x_gpio_register(void) 769int __init dm646x_gpio_register(void)
770{ 770{
771 return davinci_gpio_register(dm646x_gpio_resources, 771 return davinci_gpio_register(dm646x_gpio_resources,
772 sizeof(dm646x_gpio_resources), 772 ARRAY_SIZE(dm646x_gpio_resources),
773 &dm646x_gpio_platform_data); 773 &dm646x_gpio_platform_data);
774} 774}
775/*----------------------------------------------------------------------*/ 775/*----------------------------------------------------------------------*/
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
index 2739ca2c1334..e0091685fd48 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <video/vga.h>
18 19
19#include <asm/pgtable.h> 20#include <asm/pgtable.h>
20#include <asm/page.h> 21#include <asm/page.h>
@@ -196,6 +197,8 @@ void __init footbridge_map_io(void)
196 iotable_init(ebsa285_host_io_desc, ARRAY_SIZE(ebsa285_host_io_desc)); 197 iotable_init(ebsa285_host_io_desc, ARRAY_SIZE(ebsa285_host_io_desc));
197 pci_map_io_early(__phys_to_pfn(DC21285_PCI_IO)); 198 pci_map_io_early(__phys_to_pfn(DC21285_PCI_IO));
198 } 199 }
200
201 vga_base = PCIMEM_BASE;
199} 202}
200 203
201void footbridge_restart(enum reboot_mode mode, const char *cmd) 204void footbridge_restart(enum reboot_mode mode, const char *cmd)
diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c
index 9ee78f7b4990..782f6c71fa0a 100644
--- a/arch/arm/mach-footbridge/dc21285-timer.c
+++ b/arch/arm/mach-footbridge/dc21285-timer.c
@@ -96,11 +96,12 @@ static struct irqaction footbridge_timer_irq = {
96void __init footbridge_timer_init(void) 96void __init footbridge_timer_init(void)
97{ 97{
98 struct clock_event_device *ce = &ckevt_dc21285; 98 struct clock_event_device *ce = &ckevt_dc21285;
99 unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16);
99 100
100 clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16); 101 clocksource_register_hz(&cksrc_dc21285, rate);
101 102
102 setup_irq(ce->irq, &footbridge_timer_irq); 103 setup_irq(ce->irq, &footbridge_timer_irq);
103 104
104 ce->cpumask = cpumask_of(smp_processor_id()); 105 ce->cpumask = cpumask_of(smp_processor_id());
105 clockevents_config_and_register(ce, mem_fclk_21285, 0x4, 0xffffff); 106 clockevents_config_and_register(ce, rate, 0x4, 0xffffff);
106} 107}
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index 3490a24f969e..7c2fdae9a38b 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -18,7 +18,6 @@
18#include <linux/irq.h> 18#include <linux/irq.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <video/vga.h>
22 21
23#include <asm/irq.h> 22#include <asm/irq.h>
24#include <asm/mach/pci.h> 23#include <asm/mach/pci.h>
@@ -291,7 +290,6 @@ void __init dc21285_preinit(void)
291 int cfn_mode; 290 int cfn_mode;
292 291
293 pcibios_min_mem = 0x81000000; 292 pcibios_min_mem = 0x81000000;
294 vga_base = PCIMEM_BASE;
295 293
296 mem_size = (unsigned int)high_memory - PAGE_OFFSET; 294 mem_size = (unsigned int)high_memory - PAGE_OFFSET;
297 for (mem_mask = 0x00100000; mem_mask < 0x10000000; mem_mask <<= 1) 295 for (mem_mask = 0x00100000; mem_mask < 0x10000000; mem_mask <<= 1)
diff --git a/arch/arm/mach-footbridge/ebsa285.c b/arch/arm/mach-footbridge/ebsa285.c
index b08243500e2e..1a7235fb52ac 100644
--- a/arch/arm/mach-footbridge/ebsa285.c
+++ b/arch/arm/mach-footbridge/ebsa285.c
@@ -30,21 +30,24 @@ static const struct {
30 const char *name; 30 const char *name;
31 const char *trigger; 31 const char *trigger;
32} ebsa285_leds[] = { 32} ebsa285_leds[] = {
33 { "ebsa285:amber", "heartbeat", }, 33 { "ebsa285:amber", "cpu0", },
34 { "ebsa285:green", "cpu0", }, 34 { "ebsa285:green", "heartbeat", },
35 { "ebsa285:red",}, 35 { "ebsa285:red",},
36}; 36};
37 37
38static unsigned char hw_led_state;
39
38static void ebsa285_led_set(struct led_classdev *cdev, 40static void ebsa285_led_set(struct led_classdev *cdev,
39 enum led_brightness b) 41 enum led_brightness b)
40{ 42{
41 struct ebsa285_led *led = container_of(cdev, 43 struct ebsa285_led *led = container_of(cdev,
42 struct ebsa285_led, cdev); 44 struct ebsa285_led, cdev);
43 45
44 if (b != LED_OFF) 46 if (b == LED_OFF)
45 *XBUS_LEDS |= led->mask; 47 hw_led_state |= led->mask;
46 else 48 else
47 *XBUS_LEDS &= ~led->mask; 49 hw_led_state &= ~led->mask;
50 *XBUS_LEDS = hw_led_state;
48} 51}
49 52
50static enum led_brightness ebsa285_led_get(struct led_classdev *cdev) 53static enum led_brightness ebsa285_led_get(struct led_classdev *cdev)
@@ -52,18 +55,19 @@ static enum led_brightness ebsa285_led_get(struct led_classdev *cdev)
52 struct ebsa285_led *led = container_of(cdev, 55 struct ebsa285_led *led = container_of(cdev,
53 struct ebsa285_led, cdev); 56 struct ebsa285_led, cdev);
54 57
55 return (*XBUS_LEDS & led->mask) ? LED_FULL : LED_OFF; 58 return hw_led_state & led->mask ? LED_OFF : LED_FULL;
56} 59}
57 60
58static int __init ebsa285_leds_init(void) 61static int __init ebsa285_leds_init(void)
59{ 62{
60 int i; 63 int i;
61 64
62 if (machine_is_ebsa285()) 65 if (!machine_is_ebsa285())
63 return -ENODEV; 66 return -ENODEV;
64 67
65 /* 3 LEDS All ON */ 68 /* 3 LEDS all off */
66 *XBUS_LEDS |= XBUS_LED_AMBER | XBUS_LED_GREEN | XBUS_LED_RED; 69 hw_led_state = XBUS_LED_AMBER | XBUS_LED_GREEN | XBUS_LED_RED;
70 *XBUS_LEDS = hw_led_state;
67 71
68 for (i = 0; i < ARRAY_SIZE(ebsa285_leds); i++) { 72 for (i = 0; i < ARRAY_SIZE(ebsa285_leds); i++) {
69 struct ebsa285_led *led; 73 struct ebsa285_led *led;
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index b3d7e5634b83..bd3bf66ce344 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -17,12 +17,15 @@
17#include <linux/clkdev.h> 17#include <linux/clkdev.h>
18#include <linux/clocksource.h> 18#include <linux/clocksource.h>
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20#include <linux/input.h>
20#include <linux/io.h> 21#include <linux/io.h>
21#include <linux/irqchip.h> 22#include <linux/irqchip.h>
23#include <linux/mailbox.h>
22#include <linux/of.h> 24#include <linux/of.h>
23#include <linux/of_irq.h> 25#include <linux/of_irq.h>
24#include <linux/of_platform.h> 26#include <linux/of_platform.h>
25#include <linux/of_address.h> 27#include <linux/of_address.h>
28#include <linux/reboot.h>
26#include <linux/amba/bus.h> 29#include <linux/amba/bus.h>
27#include <linux/platform_device.h> 30#include <linux/platform_device.h>
28 31
@@ -130,6 +133,24 @@ static struct platform_device highbank_cpuidle_device = {
130 .name = "cpuidle-calxeda", 133 .name = "cpuidle-calxeda",
131}; 134};
132 135
136static int hb_keys_notifier(struct notifier_block *nb, unsigned long event, void *data)
137{
138 u32 key = *(u32 *)data;
139
140 if (event != 0x1000)
141 return 0;
142
143 if (key == KEY_POWER)
144 orderly_poweroff(false);
145 else if (key == 0xffff)
146 ctrl_alt_del();
147
148 return 0;
149}
150static struct notifier_block hb_keys_nb = {
151 .notifier_call = hb_keys_notifier,
152};
153
133static void __init highbank_init(void) 154static void __init highbank_init(void)
134{ 155{
135 struct device_node *np; 156 struct device_node *np;
@@ -145,6 +166,8 @@ static void __init highbank_init(void)
145 bus_register_notifier(&platform_bus_type, &highbank_platform_nb); 166 bus_register_notifier(&platform_bus_type, &highbank_platform_nb);
146 bus_register_notifier(&amba_bustype, &highbank_amba_nb); 167 bus_register_notifier(&amba_bustype, &highbank_amba_nb);
147 168
169 pl320_ipc_register_notifier(&hb_keys_nb);
170
148 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 171 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
149 172
150 if (psci_ops.cpu_suspend) 173 if (psci_ops.cpu_suspend)
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 1f25f3e99c05..adcef406ff0a 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -19,11 +19,11 @@ secure-common = omap-smc.o omap-secure.o
19 19
20obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) 20obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common)
21obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common) 21obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
22obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common) $(secure-common) 22obj-$(CONFIG_ARCH_OMAP4) += $(hwmod-common) $(secure-common)
23obj-$(CONFIG_SOC_AM33XX) += irq.o $(hwmod-common) 23obj-$(CONFIG_SOC_AM33XX) += irq.o $(hwmod-common)
24obj-$(CONFIG_SOC_OMAP5) += prm44xx.o $(hwmod-common) $(secure-common) 24obj-$(CONFIG_SOC_OMAP5) += $(hwmod-common) $(secure-common)
25obj-$(CONFIG_SOC_AM43XX) += $(hwmod-common) $(secure-common) 25obj-$(CONFIG_SOC_AM43XX) += $(hwmod-common) $(secure-common)
26obj-$(CONFIG_SOC_DRA7XX) += prm44xx.o $(hwmod-common) $(secure-common) 26obj-$(CONFIG_SOC_DRA7XX) += $(hwmod-common) $(secure-common)
27 27
28ifneq ($(CONFIG_SND_OMAP_SOC_MCBSP),) 28ifneq ($(CONFIG_SND_OMAP_SOC_MCBSP),)
29obj-y += mcbsp.o 29obj-y += mcbsp.o
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 19f1652e94cf..8d972ff18c56 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -131,6 +131,24 @@ DT_MACHINE_START(OMAP3_GP_DT, "Generic OMAP3-GP (Flattened Device Tree)")
131 .dt_compat = omap3_gp_boards_compat, 131 .dt_compat = omap3_gp_boards_compat,
132 .restart = omap3xxx_restart, 132 .restart = omap3xxx_restart,
133MACHINE_END 133MACHINE_END
134
135static const char *am3517_boards_compat[] __initdata = {
136 "ti,am3517",
137 NULL,
138};
139
140DT_MACHINE_START(AM3517_DT, "Generic AM3517 (Flattened Device Tree)")
141 .reserve = omap_reserve,
142 .map_io = omap3_map_io,
143 .init_early = am35xx_init_early,
144 .init_irq = omap_intc_of_init,
145 .handle_irq = omap3_intc_handle_irq,
146 .init_machine = omap_generic_init,
147 .init_late = omap3_init_late,
148 .init_time = omap3_gptimer_timer_init,
149 .dt_compat = am3517_boards_compat,
150 .restart = omap3xxx_restart,
151MACHINE_END
134#endif 152#endif
135 153
136#ifdef CONFIG_SOC_AM33XX 154#ifdef CONFIG_SOC_AM33XX
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index 4ec8d82b0492..44a59c3abfb0 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -242,12 +242,18 @@ static void __init ldp_display_init(void)
242 242
243static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) 243static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio)
244{ 244{
245 int res;
246
245 /* LCD enable GPIO */ 247 /* LCD enable GPIO */
246 ldp_lcd_pdata.enable_gpio = gpio + 7; 248 ldp_lcd_pdata.enable_gpio = gpio + 7;
247 249
248 /* Backlight enable GPIO */ 250 /* Backlight enable GPIO */
249 ldp_lcd_pdata.backlight_gpio = gpio + 15; 251 ldp_lcd_pdata.backlight_gpio = gpio + 15;
250 252
253 res = platform_device_register(&ldp_lcd_device);
254 if (res)
255 pr_err("Unable to register LCD: %d\n", res);
256
251 return 0; 257 return 0;
252} 258}
253 259
@@ -346,7 +352,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
346 352
347static struct platform_device *ldp_devices[] __initdata = { 353static struct platform_device *ldp_devices[] __initdata = {
348 &ldp_gpio_keys_device, 354 &ldp_gpio_keys_device,
349 &ldp_lcd_device,
350}; 355};
351 356
352#ifdef CONFIG_OMAP_MUX 357#ifdef CONFIG_OMAP_MUX
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index f7644febee81..e30ef6797c63 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -299,7 +299,6 @@ struct omap_sdrc_params;
299extern void omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0, 299extern void omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
300 struct omap_sdrc_params *sdrc_cs1); 300 struct omap_sdrc_params *sdrc_cs1);
301struct omap2_hsmmc_info; 301struct omap2_hsmmc_info;
302extern int omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers);
303extern void omap_reserve(void); 302extern void omap_reserve(void);
304 303
305struct omap_hwmod; 304struct omap_hwmod;
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index a4e536b11ec9..4cf165502b35 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -32,7 +32,6 @@
32 32
33#include "soc.h" 33#include "soc.h"
34#include "iomap.h" 34#include "iomap.h"
35#include "mux.h"
36#include "control.h" 35#include "control.h"
37#include "display.h" 36#include "display.h"
38#include "prm.h" 37#include "prm.h"
@@ -102,35 +101,6 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = {
102 { "dss_hdmi", "omapdss_hdmi", -1 }, 101 { "dss_hdmi", "omapdss_hdmi", -1 },
103}; 102};
104 103
105static void __init omap4_tpd12s015_mux_pads(void)
106{
107 omap_mux_init_signal("hdmi_cec",
108 OMAP_PIN_INPUT_PULLUP);
109 omap_mux_init_signal("hdmi_ddc_scl",
110 OMAP_PIN_INPUT_PULLUP);
111 omap_mux_init_signal("hdmi_ddc_sda",
112 OMAP_PIN_INPUT_PULLUP);
113}
114
115static void __init omap4_hdmi_mux_pads(enum omap_hdmi_flags flags)
116{
117 u32 reg;
118 u16 control_i2c_1;
119
120 /*
121 * CONTROL_I2C_1: HDMI_DDC_SDA_PULLUPRESX (bit 28) and
122 * HDMI_DDC_SCL_PULLUPRESX (bit 24) are set to disable
123 * internal pull up resistor.
124 */
125 if (flags & OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP) {
126 control_i2c_1 = OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_I2C_1;
127 reg = omap4_ctrl_pad_readl(control_i2c_1);
128 reg |= (OMAP4_HDMI_DDC_SDA_PULLUPRESX_MASK |
129 OMAP4_HDMI_DDC_SCL_PULLUPRESX_MASK);
130 omap4_ctrl_pad_writel(reg, control_i2c_1);
131 }
132}
133
134static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) 104static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
135{ 105{
136 u32 enable_mask, enable_shift; 106 u32 enable_mask, enable_shift;
@@ -164,16 +134,6 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
164 return 0; 134 return 0;
165} 135}
166 136
167int __init omap_hdmi_init(enum omap_hdmi_flags flags)
168{
169 if (cpu_is_omap44xx()) {
170 omap4_hdmi_mux_pads(flags);
171 omap4_tpd12s015_mux_pads();
172 }
173
174 return 0;
175}
176
177static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) 137static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask)
178{ 138{
179 if (cpu_is_omap44xx()) 139 if (cpu_is_omap44xx())
diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c
index 365bfd3d9c68..dadccc91488c 100644
--- a/arch/arm/mach-omap2/dss-common.c
+++ b/arch/arm/mach-omap2/dss-common.c
@@ -223,7 +223,7 @@ void __init omap_4430sdp_display_init_of(void)
223static struct connector_dvi_platform_data omap3_igep2_dvi_connector_pdata = { 223static struct connector_dvi_platform_data omap3_igep2_dvi_connector_pdata = {
224 .name = "dvi", 224 .name = "dvi",
225 .source = "tfp410.0", 225 .source = "tfp410.0",
226 .i2c_bus_num = 3, 226 .i2c_bus_num = 2,
227}; 227};
228 228
229static struct platform_device omap3_igep2_dvi_connector_device = { 229static struct platform_device omap3_igep2_dvi_connector_device = {
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 81de56251955..d24926e6340f 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1502,6 +1502,22 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
1502 } 1502 }
1503 1503
1504 /* 1504 /*
1505 * For some GPMC devices we still need to rely on the bootloader
1506 * timings because the devices can be connected via FPGA. So far
1507 * the list is smc91x on the omap2 SDP boards, and 8250 on zooms.
1508 * REVISIT: Add timing support from slls644g.pdf and from the
1509 * lan91c96 manual.
1510 */
1511 if (of_device_is_compatible(child, "ns16550a") ||
1512 of_device_is_compatible(child, "smsc,lan91c94") ||
1513 of_device_is_compatible(child, "smsc,lan91c111")) {
1514 dev_warn(&pdev->dev,
1515 "%s using bootloader timings on CS%d\n",
1516 child->name, cs);
1517 goto no_timings;
1518 }
1519
1520 /*
1505 * FIXME: gpmc_cs_request() will map the CS to an arbitary 1521 * FIXME: gpmc_cs_request() will map the CS to an arbitary
1506 * location in the gpmc address space. When booting with 1522 * location in the gpmc address space. When booting with
1507 * device-tree we want the NOR flash to be mapped to the 1523 * device-tree we want the NOR flash to be mapped to the
@@ -1529,6 +1545,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
1529 gpmc_read_timings_dt(child, &gpmc_t); 1545 gpmc_read_timings_dt(child, &gpmc_t);
1530 gpmc_cs_set_timings(cs, &gpmc_t); 1546 gpmc_cs_set_timings(cs, &gpmc_t);
1531 1547
1548no_timings:
1532 if (of_platform_device_create(child, NULL, &pdev->dev)) 1549 if (of_platform_device_create(child, NULL, &pdev->dev))
1533 return 0; 1550 return 0;
1534 1551
@@ -1541,42 +1558,6 @@ err:
1541 return ret; 1558 return ret;
1542} 1559}
1543 1560
1544/*
1545 * REVISIT: Add timing support from slls644g.pdf
1546 */
1547static int gpmc_probe_8250(struct platform_device *pdev,
1548 struct device_node *child)
1549{
1550 struct resource res;
1551 unsigned long base;
1552 int ret, cs;
1553
1554 if (of_property_read_u32(child, "reg", &cs) < 0) {
1555 dev_err(&pdev->dev, "%s has no 'reg' property\n",
1556 child->full_name);
1557 return -ENODEV;
1558 }
1559
1560 if (of_address_to_resource(child, 0, &res) < 0) {
1561 dev_err(&pdev->dev, "%s has malformed 'reg' property\n",
1562 child->full_name);
1563 return -ENODEV;
1564 }
1565
1566 ret = gpmc_cs_request(cs, resource_size(&res), &base);
1567 if (ret < 0) {
1568 dev_err(&pdev->dev, "cannot request GPMC CS %d\n", cs);
1569 return ret;
1570 }
1571
1572 if (of_platform_device_create(child, NULL, &pdev->dev))
1573 return 0;
1574
1575 dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name);
1576
1577 return -ENODEV;
1578}
1579
1580static int gpmc_probe_dt(struct platform_device *pdev) 1561static int gpmc_probe_dt(struct platform_device *pdev)
1581{ 1562{
1582 int ret; 1563 int ret;
@@ -1618,10 +1599,9 @@ static int gpmc_probe_dt(struct platform_device *pdev)
1618 else if (of_node_cmp(child->name, "onenand") == 0) 1599 else if (of_node_cmp(child->name, "onenand") == 0)
1619 ret = gpmc_probe_onenand_child(pdev, child); 1600 ret = gpmc_probe_onenand_child(pdev, child);
1620 else if (of_node_cmp(child->name, "ethernet") == 0 || 1601 else if (of_node_cmp(child->name, "ethernet") == 0 ||
1621 of_node_cmp(child->name, "nor") == 0) 1602 of_node_cmp(child->name, "nor") == 0 ||
1603 of_node_cmp(child->name, "uart") == 0)
1622 ret = gpmc_probe_generic_child(pdev, child); 1604 ret = gpmc_probe_generic_child(pdev, child);
1623 else if (of_node_cmp(child->name, "8250") == 0)
1624 ret = gpmc_probe_8250(pdev, child);
1625 1605
1626 if (WARN(ret < 0, "%s: probing gpmc child %s failed\n", 1606 if (WARN(ret < 0, "%s: probing gpmc child %s failed\n",
1627 __func__, child->full_name)) 1607 __func__, child->full_name))
diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h
index 8cc7d331437d..3e97c6c8ecf1 100644
--- a/arch/arm/mach-omap2/omap-secure.h
+++ b/arch/arm/mach-omap2/omap-secure.h
@@ -76,6 +76,13 @@ static inline void omap_barrier_reserve_memblock(void)
76{ } 76{ }
77#endif 77#endif
78 78
79#ifdef CONFIG_SOC_HAS_REALTIME_COUNTER
79void set_cntfreq(void); 80void set_cntfreq(void);
81#else
82static inline void set_cntfreq(void)
83{
84}
85#endif
86
80#endif /* __ASSEMBLER__ */ 87#endif /* __ASSEMBLER__ */
81#endif /* OMAP_ARCH_OMAP_SECURE_H */ 88#endif /* OMAP_ARCH_OMAP_SECURE_H */
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 57911430324e..b39efd46abf9 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -35,7 +35,6 @@
35#include "iomap.h" 35#include "iomap.h"
36#include "common.h" 36#include "common.h"
37#include "mmc.h" 37#include "mmc.h"
38#include "hsmmc.h"
39#include "prminst44xx.h" 38#include "prminst44xx.h"
40#include "prcm_mpu44xx.h" 39#include "prcm_mpu44xx.h"
41#include "omap4-sar-layout.h" 40#include "omap4-sar-layout.h"
@@ -284,59 +283,3 @@ skip_errata_init:
284 omap_wakeupgen_init(); 283 omap_wakeupgen_init();
285 irqchip_init(); 284 irqchip_init();
286} 285}
287
288#if defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
289static int omap4_twl6030_hsmmc_late_init(struct device *dev)
290{
291 int irq = 0;
292 struct platform_device *pdev = container_of(dev,
293 struct platform_device, dev);
294 struct omap_mmc_platform_data *pdata = dev->platform_data;
295
296 /* Setting MMC1 Card detect Irq */
297 if (pdev->id == 0) {
298 irq = twl6030_mmc_card_detect_config();
299 if (irq < 0) {
300 dev_err(dev, "%s: Error card detect config(%d)\n",
301 __func__, irq);
302 return irq;
303 }
304 pdata->slots[0].card_detect_irq = irq;
305 pdata->slots[0].card_detect = twl6030_mmc_card_detect;
306 }
307 return 0;
308}
309
310static __init void omap4_twl6030_hsmmc_set_late_init(struct device *dev)
311{
312 struct omap_mmc_platform_data *pdata;
313
314 /* dev can be null if CONFIG_MMC_OMAP_HS is not set */
315 if (!dev) {
316 pr_err("Failed %s\n", __func__);
317 return;
318 }
319 pdata = dev->platform_data;
320 pdata->init = omap4_twl6030_hsmmc_late_init;
321}
322
323int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers)
324{
325 struct omap2_hsmmc_info *c;
326
327 omap_hsmmc_init(controllers);
328 for (c = controllers; c->mmc; c++) {
329 /* pdev can be null if CONFIG_MMC_OMAP_HS is not set */
330 if (!c->pdev)
331 continue;
332 omap4_twl6030_hsmmc_set_late_init(&c->pdev->dev);
333 }
334
335 return 0;
336}
337#else
338int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers)
339{
340 return 0;
341}
342#endif
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 53f0735817bb..e0a398cf28d8 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -183,6 +183,10 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
183odbfd_exit1: 183odbfd_exit1:
184 kfree(hwmods); 184 kfree(hwmods);
185odbfd_exit: 185odbfd_exit:
186 /* if data/we are at fault.. load up a fail handler */
187 if (ret)
188 pdev->dev.pm_domain = &omap_device_fail_pm_domain;
189
186 return ret; 190 return ret;
187} 191}
188 192
@@ -604,6 +608,19 @@ static int _od_runtime_resume(struct device *dev)
604 608
605 return pm_generic_runtime_resume(dev); 609 return pm_generic_runtime_resume(dev);
606} 610}
611
612static int _od_fail_runtime_suspend(struct device *dev)
613{
614 dev_warn(dev, "%s: FIXME: missing hwmod/omap_dev info\n", __func__);
615 return -ENODEV;
616}
617
618static int _od_fail_runtime_resume(struct device *dev)
619{
620 dev_warn(dev, "%s: FIXME: missing hwmod/omap_dev info\n", __func__);
621 return -ENODEV;
622}
623
607#endif 624#endif
608 625
609#ifdef CONFIG_SUSPEND 626#ifdef CONFIG_SUSPEND
@@ -657,6 +674,13 @@ static int _od_resume_noirq(struct device *dev)
657#define _od_resume_noirq NULL 674#define _od_resume_noirq NULL
658#endif 675#endif
659 676
677struct dev_pm_domain omap_device_fail_pm_domain = {
678 .ops = {
679 SET_RUNTIME_PM_OPS(_od_fail_runtime_suspend,
680 _od_fail_runtime_resume, NULL)
681 }
682};
683
660struct dev_pm_domain omap_device_pm_domain = { 684struct dev_pm_domain omap_device_pm_domain = {
661 .ops = { 685 .ops = {
662 SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume, 686 SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume,
diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
index 17ca1aec2710..78c02b355179 100644
--- a/arch/arm/mach-omap2/omap_device.h
+++ b/arch/arm/mach-omap2/omap_device.h
@@ -29,6 +29,7 @@
29#include "omap_hwmod.h" 29#include "omap_hwmod.h"
30 30
31extern struct dev_pm_domain omap_device_pm_domain; 31extern struct dev_pm_domain omap_device_pm_domain;
32extern struct dev_pm_domain omap_device_fail_pm_domain;
32 33
33/* omap_device._state values */ 34/* omap_device._state values */
34#define OMAP_DEVICE_STATE_UNKNOWN 0 35#define OMAP_DEVICE_STATE_UNKNOWN 0
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index e3f0ecaf87dd..8a1b5e0bad40 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -399,7 +399,7 @@ static int _set_clockactivity(struct omap_hwmod *oh, u8 clockact, u32 *v)
399} 399}
400 400
401/** 401/**
402 * _set_softreset: set OCP_SYSCONFIG.CLOCKACTIVITY bits in @v 402 * _set_softreset: set OCP_SYSCONFIG.SOFTRESET bit in @v
403 * @oh: struct omap_hwmod * 403 * @oh: struct omap_hwmod *
404 * @v: pointer to register contents to modify 404 * @v: pointer to register contents to modify
405 * 405 *
@@ -427,6 +427,36 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v)
427} 427}
428 428
429/** 429/**
430 * _clear_softreset: clear OCP_SYSCONFIG.SOFTRESET bit in @v
431 * @oh: struct omap_hwmod *
432 * @v: pointer to register contents to modify
433 *
434 * Clear the SOFTRESET bit in @v for hwmod @oh. Returns -EINVAL upon
435 * error or 0 upon success.
436 */
437static int _clear_softreset(struct omap_hwmod *oh, u32 *v)
438{
439 u32 softrst_mask;
440
441 if (!oh->class->sysc ||
442 !(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET))
443 return -EINVAL;
444
445 if (!oh->class->sysc->sysc_fields) {
446 WARN(1,
447 "omap_hwmod: %s: sysc_fields absent for sysconfig class\n",
448 oh->name);
449 return -EINVAL;
450 }
451
452 softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift);
453
454 *v &= ~softrst_mask;
455
456 return 0;
457}
458
459/**
430 * _wait_softreset_complete - wait for an OCP softreset to complete 460 * _wait_softreset_complete - wait for an OCP softreset to complete
431 * @oh: struct omap_hwmod * to wait on 461 * @oh: struct omap_hwmod * to wait on
432 * 462 *
@@ -785,6 +815,7 @@ static int _init_interface_clks(struct omap_hwmod *oh)
785 pr_warning("omap_hwmod: %s: cannot clk_get interface_clk %s\n", 815 pr_warning("omap_hwmod: %s: cannot clk_get interface_clk %s\n",
786 oh->name, os->clk); 816 oh->name, os->clk);
787 ret = -EINVAL; 817 ret = -EINVAL;
818 continue;
788 } 819 }
789 os->_clk = c; 820 os->_clk = c;
790 /* 821 /*
@@ -821,6 +852,7 @@ static int _init_opt_clks(struct omap_hwmod *oh)
821 pr_warning("omap_hwmod: %s: cannot clk_get opt_clk %s\n", 852 pr_warning("omap_hwmod: %s: cannot clk_get opt_clk %s\n",
822 oh->name, oc->clk); 853 oh->name, oc->clk);
823 ret = -EINVAL; 854 ret = -EINVAL;
855 continue;
824 } 856 }
825 oc->_clk = c; 857 oc->_clk = c;
826 /* 858 /*
@@ -1911,6 +1943,12 @@ static int _ocp_softreset(struct omap_hwmod *oh)
1911 ret = _set_softreset(oh, &v); 1943 ret = _set_softreset(oh, &v);
1912 if (ret) 1944 if (ret)
1913 goto dis_opt_clks; 1945 goto dis_opt_clks;
1946
1947 _write_sysconfig(v, oh);
1948 ret = _clear_softreset(oh, &v);
1949 if (ret)
1950 goto dis_opt_clks;
1951
1914 _write_sysconfig(v, oh); 1952 _write_sysconfig(v, oh);
1915 1953
1916 if (oh->class->sysc->srst_udelay) 1954 if (oh->class->sysc->srst_udelay)
@@ -2326,38 +2364,80 @@ static int _shutdown(struct omap_hwmod *oh)
2326 return 0; 2364 return 0;
2327} 2365}
2328 2366
2367static int of_dev_find_hwmod(struct device_node *np,
2368 struct omap_hwmod *oh)
2369{
2370 int count, i, res;
2371 const char *p;
2372
2373 count = of_property_count_strings(np, "ti,hwmods");
2374 if (count < 1)
2375 return -ENODEV;
2376
2377 for (i = 0; i < count; i++) {
2378 res = of_property_read_string_index(np, "ti,hwmods",
2379 i, &p);
2380 if (res)
2381 continue;
2382 if (!strcmp(p, oh->name)) {
2383 pr_debug("omap_hwmod: dt %s[%i] uses hwmod %s\n",
2384 np->name, i, oh->name);
2385 return i;
2386 }
2387 }
2388
2389 return -ENODEV;
2390}
2391
2329/** 2392/**
2330 * of_dev_hwmod_lookup - look up needed hwmod from dt blob 2393 * of_dev_hwmod_lookup - look up needed hwmod from dt blob
2331 * @np: struct device_node * 2394 * @np: struct device_node *
2332 * @oh: struct omap_hwmod * 2395 * @oh: struct omap_hwmod *
2396 * @index: index of the entry found
2397 * @found: struct device_node * found or NULL
2333 * 2398 *
2334 * Parse the dt blob and find out needed hwmod. Recursive function is 2399 * Parse the dt blob and find out needed hwmod. Recursive function is
2335 * implemented to take care hierarchical dt blob parsing. 2400 * implemented to take care hierarchical dt blob parsing.
2336 * Return: The device node on success or NULL on failure. 2401 * Return: Returns 0 on success, -ENODEV when not found.
2337 */ 2402 */
2338static struct device_node *of_dev_hwmod_lookup(struct device_node *np, 2403static int of_dev_hwmod_lookup(struct device_node *np,
2339 struct omap_hwmod *oh) 2404 struct omap_hwmod *oh,
2405 int *index,
2406 struct device_node **found)
2340{ 2407{
2341 struct device_node *np0 = NULL, *np1 = NULL; 2408 struct device_node *np0 = NULL;
2342 const char *p; 2409 int res;
2410
2411 res = of_dev_find_hwmod(np, oh);
2412 if (res >= 0) {
2413 *found = np;
2414 *index = res;
2415 return 0;
2416 }
2343 2417
2344 for_each_child_of_node(np, np0) { 2418 for_each_child_of_node(np, np0) {
2345 if (of_find_property(np0, "ti,hwmods", NULL)) { 2419 struct device_node *fc;
2346 p = of_get_property(np0, "ti,hwmods", NULL); 2420 int i;
2347 if (!strcmp(p, oh->name)) 2421
2348 return np0; 2422 res = of_dev_hwmod_lookup(np0, oh, &i, &fc);
2349 np1 = of_dev_hwmod_lookup(np0, oh); 2423 if (res == 0) {
2350 if (np1) 2424 *found = fc;
2351 return np1; 2425 *index = i;
2426 return 0;
2352 } 2427 }
2353 } 2428 }
2354 return NULL; 2429
2430 *found = NULL;
2431 *index = 0;
2432
2433 return -ENODEV;
2355} 2434}
2356 2435
2357/** 2436/**
2358 * _init_mpu_rt_base - populate the virtual address for a hwmod 2437 * _init_mpu_rt_base - populate the virtual address for a hwmod
2359 * @oh: struct omap_hwmod * to locate the virtual address 2438 * @oh: struct omap_hwmod * to locate the virtual address
2360 * @data: (unused, caller should pass NULL) 2439 * @data: (unused, caller should pass NULL)
2440 * @index: index of the reg entry iospace in device tree
2361 * @np: struct device_node * of the IP block's device node in the DT data 2441 * @np: struct device_node * of the IP block's device node in the DT data
2362 * 2442 *
2363 * Cache the virtual address used by the MPU to access this IP block's 2443 * Cache the virtual address used by the MPU to access this IP block's
@@ -2368,7 +2448,7 @@ static struct device_node *of_dev_hwmod_lookup(struct device_node *np,
2368 * -ENXIO on absent or invalid register target address space. 2448 * -ENXIO on absent or invalid register target address space.
2369 */ 2449 */
2370static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, 2450static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
2371 struct device_node *np) 2451 int index, struct device_node *np)
2372{ 2452{
2373 struct omap_hwmod_addr_space *mem; 2453 struct omap_hwmod_addr_space *mem;
2374 void __iomem *va_start = NULL; 2454 void __iomem *va_start = NULL;
@@ -2390,13 +2470,17 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
2390 if (!np) 2470 if (!np)
2391 return -ENXIO; 2471 return -ENXIO;
2392 2472
2393 va_start = of_iomap(np, oh->mpu_rt_idx); 2473 va_start = of_iomap(np, index + oh->mpu_rt_idx);
2394 } else { 2474 } else {
2395 va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start); 2475 va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start);
2396 } 2476 }
2397 2477
2398 if (!va_start) { 2478 if (!va_start) {
2399 pr_err("omap_hwmod: %s: Could not ioremap\n", oh->name); 2479 if (mem)
2480 pr_err("omap_hwmod: %s: Could not ioremap\n", oh->name);
2481 else
2482 pr_err("omap_hwmod: %s: Missing dt reg%i for %s\n",
2483 oh->name, index, np->full_name);
2400 return -ENXIO; 2484 return -ENXIO;
2401 } 2485 }
2402 2486
@@ -2422,17 +2506,29 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
2422 */ 2506 */
2423static int __init _init(struct omap_hwmod *oh, void *data) 2507static int __init _init(struct omap_hwmod *oh, void *data)
2424{ 2508{
2425 int r; 2509 int r, index;
2426 struct device_node *np = NULL; 2510 struct device_node *np = NULL;
2427 2511
2428 if (oh->_state != _HWMOD_STATE_REGISTERED) 2512 if (oh->_state != _HWMOD_STATE_REGISTERED)
2429 return 0; 2513 return 0;
2430 2514
2431 if (of_have_populated_dt()) 2515 if (of_have_populated_dt()) {
2432 np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh); 2516 struct device_node *bus;
2517
2518 bus = of_find_node_by_name(NULL, "ocp");
2519 if (!bus)
2520 return -ENODEV;
2521
2522 r = of_dev_hwmod_lookup(bus, oh, &index, &np);
2523 if (r)
2524 pr_debug("omap_hwmod: %s missing dt data\n", oh->name);
2525 else if (np && index)
2526 pr_warn("omap_hwmod: %s using broken dt data from %s\n",
2527 oh->name, np->name);
2528 }
2433 2529
2434 if (oh->class->sysc) { 2530 if (oh->class->sysc) {
2435 r = _init_mpu_rt_base(oh, NULL, np); 2531 r = _init_mpu_rt_base(oh, NULL, index, np);
2436 if (r < 0) { 2532 if (r < 0) {
2437 WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", 2533 WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
2438 oh->name); 2534 oh->name);
@@ -3169,6 +3265,11 @@ int omap_hwmod_softreset(struct omap_hwmod *oh)
3169 goto error; 3265 goto error;
3170 _write_sysconfig(v, oh); 3266 _write_sysconfig(v, oh);
3171 3267
3268 ret = _clear_softreset(oh, &v);
3269 if (ret)
3270 goto error;
3271 _write_sysconfig(v, oh);
3272
3172error: 3273error:
3173 return ret; 3274 return ret;
3174} 3275}
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index 56cebb05509e..d23c77fadb31 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = {
796 796
797/* gpmc */ 797/* gpmc */
798static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = { 798static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = {
799 { .irq = 20 }, 799 { .irq = 20 + OMAP_INTC_START, },
800 { .irq = -1 } 800 { .irq = -1 }
801}; 801};
802 802
@@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = {
841}; 841};
842 842
843static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = { 843static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = {
844 { .irq = 52 }, 844 { .irq = 52 + OMAP_INTC_START, },
845 { .irq = -1 } 845 { .irq = -1 }
846}; 846};
847 847
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 9e56fabd7fa3..4c3b1e6df508 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1943,7 +1943,8 @@ static struct omap_hwmod_class_sysconfig omap3xxx_usb_host_hs_sysc = {
1943 .syss_offs = 0x0014, 1943 .syss_offs = 0x0014,
1944 .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY | 1944 .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
1945 SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | 1945 SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP |
1946 SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), 1946 SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
1947 SYSS_HAS_RESET_STATUS),
1947 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | 1948 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
1948 MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), 1949 MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
1949 .sysc_fields = &omap_hwmod_sysc_type1, 1950 .sysc_fields = &omap_hwmod_sysc_type1,
@@ -2021,15 +2022,7 @@ static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = {
2021 * hence HWMOD_SWSUP_MSTANDBY 2022 * hence HWMOD_SWSUP_MSTANDBY
2022 */ 2023 */
2023 2024
2024 /* 2025 .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
2025 * During system boot; If the hwmod framework resets the module
2026 * the module will have smart idle settings; which can lead to deadlock
2027 * (above Errata Id:i660); so, dont reset the module during boot;
2028 * Use HWMOD_INIT_NO_RESET.
2029 */
2030
2031 .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
2032 HWMOD_INIT_NO_RESET,
2033}; 2026};
2034 2027
2035/* 2028/*
@@ -2172,7 +2165,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = {
2172}; 2165};
2173 2166
2174static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = { 2167static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = {
2175 { .irq = 20 }, 2168 { .irq = 20 + OMAP_INTC_START, },
2176 { .irq = -1 } 2169 { .irq = -1 }
2177}; 2170};
2178 2171
@@ -3006,7 +2999,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = {
3006 2999
3007static struct omap_hwmod omap3xxx_mmu_isp_hwmod; 3000static struct omap_hwmod omap3xxx_mmu_isp_hwmod;
3008static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = { 3001static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = {
3009 { .irq = 24 }, 3002 { .irq = 24 + OMAP_INTC_START, },
3010 { .irq = -1 } 3003 { .irq = -1 }
3011}; 3004};
3012 3005
@@ -3048,7 +3041,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = {
3048 3041
3049static struct omap_hwmod omap3xxx_mmu_iva_hwmod; 3042static struct omap_hwmod omap3xxx_mmu_iva_hwmod;
3050static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = { 3043static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = {
3051 { .irq = 28 }, 3044 { .irq = 28 + OMAP_INTC_START, },
3052 { .irq = -1 } 3045 { .irq = -1 }
3053}; 3046};
3054 3047
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 1e5b12cb8246..3318cae96e7d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2937,7 +2937,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_usb_host_hs_sysc = {
2937 .sysc_offs = 0x0010, 2937 .sysc_offs = 0x0010,
2938 .syss_offs = 0x0014, 2938 .syss_offs = 0x0014,
2939 .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE | 2939 .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE |
2940 SYSC_HAS_SOFTRESET), 2940 SYSC_HAS_SOFTRESET | SYSC_HAS_RESET_STATUS),
2941 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | 2941 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
2942 SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | 2942 SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
2943 MSTANDBY_SMART | MSTANDBY_SMART_WKUP), 2943 MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
@@ -3001,15 +3001,7 @@ static struct omap_hwmod omap44xx_usb_host_hs_hwmod = {
3001 * hence HWMOD_SWSUP_MSTANDBY 3001 * hence HWMOD_SWSUP_MSTANDBY
3002 */ 3002 */
3003 3003
3004 /* 3004 .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
3005 * During system boot; If the hwmod framework resets the module
3006 * the module will have smart idle settings; which can lead to deadlock
3007 * (above Errata Id:i660); so, dont reset the module during boot;
3008 * Use HWMOD_INIT_NO_RESET.
3009 */
3010
3011 .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
3012 HWMOD_INIT_NO_RESET,
3013}; 3005};
3014 3006
3015/* 3007/*
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index 9e08d6994a0b..e297d6231c3a 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -1544,7 +1544,8 @@ static struct omap_hwmod_class_sysconfig omap54xx_usb_host_hs_sysc = {
1544 .rev_offs = 0x0000, 1544 .rev_offs = 0x0000,
1545 .sysc_offs = 0x0010, 1545 .sysc_offs = 0x0010,
1546 .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS | 1546 .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS |
1547 SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), 1547 SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
1548 SYSC_HAS_RESET_STATUS),
1548 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | 1549 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
1549 SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | 1550 SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
1550 MSTANDBY_SMART | MSTANDBY_SMART_WKUP), 1551 MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
@@ -1598,15 +1599,7 @@ static struct omap_hwmod omap54xx_usb_host_hs_hwmod = {
1598 * hence HWMOD_SWSUP_MSTANDBY 1599 * hence HWMOD_SWSUP_MSTANDBY
1599 */ 1600 */
1600 1601
1601 /* 1602 .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
1602 * During system boot; If the hwmod framework resets the module
1603 * the module will have smart idle settings; which can lead to deadlock
1604 * (above Errata Id:i660); so, dont reset the module during boot;
1605 * Use HWMOD_INIT_NO_RESET.
1606 */
1607
1608 .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
1609 HWMOD_INIT_NO_RESET,
1610 .main_clk = "l3init_60m_fclk", 1603 .main_clk = "l3init_60m_fclk",
1611 .prcm = { 1604 .prcm = {
1612 .omap4 = { 1605 .omap4 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index db32d5380b11..18f333c440db 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1637,7 +1637,7 @@ static struct omap_hwmod dra7xx_uart1_hwmod = {
1637 .class = &dra7xx_uart_hwmod_class, 1637 .class = &dra7xx_uart_hwmod_class,
1638 .clkdm_name = "l4per_clkdm", 1638 .clkdm_name = "l4per_clkdm",
1639 .main_clk = "uart1_gfclk_mux", 1639 .main_clk = "uart1_gfclk_mux",
1640 .flags = HWMOD_SWSUP_SIDLE_ACT, 1640 .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS,
1641 .prcm = { 1641 .prcm = {
1642 .omap4 = { 1642 .omap4 = {
1643 .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET, 1643 .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET,
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 10c71450cf63..39f020c982e8 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -139,6 +139,7 @@ struct of_dev_auxdata omap_auxdata_lookup[] __initdata = {
139 139
140static struct pdata_init pdata_quirks[] __initdata = { 140static struct pdata_init pdata_quirks[] __initdata = {
141#ifdef CONFIG_ARCH_OMAP3 141#ifdef CONFIG_ARCH_OMAP3
142 { "nokia,omap3-n900", hsmmc2_internal_input_clk, },
142 { "nokia,omap3-n9", hsmmc2_internal_input_clk, }, 143 { "nokia,omap3-n9", hsmmc2_internal_input_clk, },
143 { "nokia,omap3-n950", hsmmc2_internal_input_clk, }, 144 { "nokia,omap3-n950", hsmmc2_internal_input_clk, },
144 { "isee,omap3-igep0020", omap3_igep0020_legacy_init, }, 145 { "isee,omap3-igep0020", omap3_igep0020_legacy_init, },
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 93b80e5da8d4..1f3770a8a728 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -120,7 +120,7 @@ static void omap3_save_secure_ram_context(void)
120 * will hang the system. 120 * will hang the system.
121 */ 121 */
122 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); 122 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
123 ret = _omap_save_secure_sram((u32 *) 123 ret = _omap_save_secure_sram((u32 *)(unsigned long)
124 __pa(omap3_secure_ram_storage)); 124 __pa(omap3_secure_ram_storage));
125 pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state); 125 pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
126 /* Following is for error tracking, it should not happen */ 126 /* Following is for error tracking, it should not happen */
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index e233dfcbc186..93a2a6e4260f 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -128,7 +128,8 @@ skip_voltdm:
128 for (i = 0; i < pwrdm->banks; i++) 128 for (i = 0; i < pwrdm->banks; i++)
129 pwrdm->ret_mem_off_counter[i] = 0; 129 pwrdm->ret_mem_off_counter[i] = 0;
130 130
131 arch_pwrdm->pwrdm_wait_transition(pwrdm); 131 if (arch_pwrdm && arch_pwrdm->pwrdm_wait_transition)
132 arch_pwrdm->pwrdm_wait_transition(pwrdm);
132 pwrdm->state = pwrdm_read_pwrst(pwrdm); 133 pwrdm->state = pwrdm_read_pwrst(pwrdm);
133 pwrdm->state_counter[pwrdm->state] = 1; 134 pwrdm->state_counter[pwrdm->state] = 1;
134 135
diff --git a/arch/arm/mach-omap2/prm44xx_54xx.h b/arch/arm/mach-omap2/prm44xx_54xx.h
index 7a976065e138..8d95aa543ef5 100644
--- a/arch/arm/mach-omap2/prm44xx_54xx.h
+++ b/arch/arm/mach-omap2/prm44xx_54xx.h
@@ -43,7 +43,7 @@ extern void omap4_prm_vcvp_write(u32 val, u8 offset);
43extern u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset); 43extern u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset);
44 44
45#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ 45#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
46 defined(CONFIG_SOC_DRA7XX) 46 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM43XX)
47void omap44xx_prm_reconfigure_io_chain(void); 47void omap44xx_prm_reconfigure_io_chain(void);
48#else 48#else
49static inline void omap44xx_prm_reconfigure_io_chain(void) 49static inline void omap44xx_prm_reconfigure_io_chain(void)
diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/include/mach/lubbock.h
index 2a086e8373eb..958cd6af9384 100644
--- a/arch/arm/mach-pxa/include/mach/lubbock.h
+++ b/arch/arm/mach-pxa/include/mach/lubbock.h
@@ -10,6 +10,8 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <mach/irqs.h>
14
13#define LUBBOCK_ETH_PHYS PXA_CS3_PHYS 15#define LUBBOCK_ETH_PHYS PXA_CS3_PHYS
14 16
15#define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS 17#define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS
diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c
index 0d5dd646f61f..263b15249b5b 100644
--- a/arch/arm/mach-pxa/reset.c
+++ b/arch/arm/mach-pxa/reset.c
@@ -13,6 +13,7 @@
13 13
14#include <mach/regs-ost.h> 14#include <mach/regs-ost.h>
15#include <mach/reset.h> 15#include <mach/reset.h>
16#include <mach/smemc.h>
16 17
17unsigned int reset_status; 18unsigned int reset_status;
18EXPORT_SYMBOL(reset_status); 19EXPORT_SYMBOL(reset_status);
@@ -81,6 +82,12 @@ static void do_hw_reset(void)
81 writel_relaxed(OSSR_M3, OSSR); 82 writel_relaxed(OSSR_M3, OSSR);
82 /* ... in 100 ms */ 83 /* ... in 100 ms */
83 writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3); 84 writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3);
85 /*
86 * SDRAM hangs on watchdog reset on Marvell PXA270 (erratum 71)
87 * we put SDRAM into self-refresh to prevent that
88 */
89 while (1)
90 writel_relaxed(MDREFR_SLFRSH, MDREFR);
84} 91}
85 92
86void pxa_restart(enum reboot_mode mode, const char *cmd) 93void pxa_restart(enum reboot_mode mode, const char *cmd)
@@ -104,4 +111,3 @@ void pxa_restart(enum reboot_mode mode, const char *cmd)
104 break; 111 break;
105 } 112 }
106} 113}
107
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 0206b915a6f6..ef5557b807ed 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -425,57 +425,57 @@ static struct platform_device tosa_power_device = {
425 * Tosa Keyboard 425 * Tosa Keyboard
426 */ 426 */
427static const uint32_t tosakbd_keymap[] = { 427static const uint32_t tosakbd_keymap[] = {
428 KEY(0, 2, KEY_W), 428 KEY(0, 1, KEY_W),
429 KEY(0, 6, KEY_K), 429 KEY(0, 5, KEY_K),
430 KEY(0, 7, KEY_BACKSPACE), 430 KEY(0, 6, KEY_BACKSPACE),
431 KEY(0, 8, KEY_P), 431 KEY(0, 7, KEY_P),
432 KEY(1, 1, KEY_Q), 432 KEY(1, 0, KEY_Q),
433 KEY(1, 2, KEY_E), 433 KEY(1, 1, KEY_E),
434 KEY(1, 3, KEY_T), 434 KEY(1, 2, KEY_T),
435 KEY(1, 4, KEY_Y), 435 KEY(1, 3, KEY_Y),
436 KEY(1, 6, KEY_O), 436 KEY(1, 5, KEY_O),
437 KEY(1, 7, KEY_I), 437 KEY(1, 6, KEY_I),
438 KEY(1, 8, KEY_COMMA), 438 KEY(1, 7, KEY_COMMA),
439 KEY(2, 1, KEY_A), 439 KEY(2, 0, KEY_A),
440 KEY(2, 2, KEY_D), 440 KEY(2, 1, KEY_D),
441 KEY(2, 3, KEY_G), 441 KEY(2, 2, KEY_G),
442 KEY(2, 4, KEY_U), 442 KEY(2, 3, KEY_U),
443 KEY(2, 6, KEY_L), 443 KEY(2, 5, KEY_L),
444 KEY(2, 7, KEY_ENTER), 444 KEY(2, 6, KEY_ENTER),
445 KEY(2, 8, KEY_DOT), 445 KEY(2, 7, KEY_DOT),
446 KEY(3, 1, KEY_Z), 446 KEY(3, 0, KEY_Z),
447 KEY(3, 2, KEY_C), 447 KEY(3, 1, KEY_C),
448 KEY(3, 3, KEY_V), 448 KEY(3, 2, KEY_V),
449 KEY(3, 4, KEY_J), 449 KEY(3, 3, KEY_J),
450 KEY(3, 5, TOSA_KEY_ADDRESSBOOK), 450 KEY(3, 4, TOSA_KEY_ADDRESSBOOK),
451 KEY(3, 6, TOSA_KEY_CANCEL), 451 KEY(3, 5, TOSA_KEY_CANCEL),
452 KEY(3, 7, TOSA_KEY_CENTER), 452 KEY(3, 6, TOSA_KEY_CENTER),
453 KEY(3, 8, TOSA_KEY_OK), 453 KEY(3, 7, TOSA_KEY_OK),
454 KEY(3, 9, KEY_LEFTSHIFT), 454 KEY(3, 8, KEY_LEFTSHIFT),
455 KEY(4, 1, KEY_S), 455 KEY(4, 0, KEY_S),
456 KEY(4, 2, KEY_R), 456 KEY(4, 1, KEY_R),
457 KEY(4, 3, KEY_B), 457 KEY(4, 2, KEY_B),
458 KEY(4, 4, KEY_N), 458 KEY(4, 3, KEY_N),
459 KEY(4, 5, TOSA_KEY_CALENDAR), 459 KEY(4, 4, TOSA_KEY_CALENDAR),
460 KEY(4, 6, TOSA_KEY_HOMEPAGE), 460 KEY(4, 5, TOSA_KEY_HOMEPAGE),
461 KEY(4, 7, KEY_LEFTCTRL), 461 KEY(4, 6, KEY_LEFTCTRL),
462 KEY(4, 8, TOSA_KEY_LIGHT), 462 KEY(4, 7, TOSA_KEY_LIGHT),
463 KEY(4, 10, KEY_RIGHTSHIFT), 463 KEY(4, 9, KEY_RIGHTSHIFT),
464 KEY(5, 1, KEY_TAB), 464 KEY(5, 0, KEY_TAB),
465 KEY(5, 2, KEY_SLASH), 465 KEY(5, 1, KEY_SLASH),
466 KEY(5, 3, KEY_H), 466 KEY(5, 2, KEY_H),
467 KEY(5, 4, KEY_M), 467 KEY(5, 3, KEY_M),
468 KEY(5, 5, TOSA_KEY_MENU), 468 KEY(5, 4, TOSA_KEY_MENU),
469 KEY(5, 7, KEY_UP), 469 KEY(5, 6, KEY_UP),
470 KEY(5, 11, TOSA_KEY_FN), 470 KEY(5, 10, TOSA_KEY_FN),
471 KEY(6, 1, KEY_X), 471 KEY(6, 0, KEY_X),
472 KEY(6, 2, KEY_F), 472 KEY(6, 1, KEY_F),
473 KEY(6, 3, KEY_SPACE), 473 KEY(6, 2, KEY_SPACE),
474 KEY(6, 4, KEY_APOSTROPHE), 474 KEY(6, 3, KEY_APOSTROPHE),
475 KEY(6, 5, TOSA_KEY_MAIL), 475 KEY(6, 4, TOSA_KEY_MAIL),
476 KEY(6, 6, KEY_LEFT), 476 KEY(6, 5, KEY_LEFT),
477 KEY(6, 7, KEY_DOWN), 477 KEY(6, 6, KEY_DOWN),
478 KEY(6, 8, KEY_RIGHT), 478 KEY(6, 7, KEY_RIGHT),
479}; 479};
480 480
481static struct matrix_keymap_data tosakbd_keymap_data = { 481static struct matrix_keymap_data tosakbd_keymap_data = {
diff --git a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
index 7eb9a10fc1af..2fddf38192df 100644
--- a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
+++ b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
@@ -8,8 +8,6 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9*/ 9*/
10 10
11#include <linux/clk-provider.h>
12#include <linux/irqchip.h>
13#include <linux/of_platform.h> 11#include <linux/of_platform.h>
14 12
15#include <asm/mach/arch.h> 13#include <asm/mach/arch.h>
@@ -48,15 +46,9 @@ static void __init s3c64xx_dt_map_io(void)
48 panic("SoC is not S3C64xx!"); 46 panic("SoC is not S3C64xx!");
49} 47}
50 48
51static void __init s3c64xx_dt_init_irq(void)
52{
53 of_clk_init(NULL);
54 samsung_wdt_reset_of_init();
55 irqchip_init();
56};
57
58static void __init s3c64xx_dt_init_machine(void) 49static void __init s3c64xx_dt_init_machine(void)
59{ 50{
51 samsung_wdt_reset_of_init();
60 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 52 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
61} 53}
62 54
@@ -79,7 +71,6 @@ DT_MACHINE_START(S3C6400_DT, "Samsung S3C64xx (Flattened Device Tree)")
79 /* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */ 71 /* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */
80 .dt_compat = s3c64xx_dt_compat, 72 .dt_compat = s3c64xx_dt_compat,
81 .map_io = s3c64xx_dt_map_io, 73 .map_io = s3c64xx_dt_map_io,
82 .init_irq = s3c64xx_dt_init_irq,
83 .init_machine = s3c64xx_dt_init_machine, 74 .init_machine = s3c64xx_dt_init_machine,
84 .restart = s3c64xx_dt_restart, 75 .restart = s3c64xx_dt_restart,
85MACHINE_END 76MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 958e3cbf0ac2..8ea87bd45c33 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -483,7 +483,7 @@ static struct platform_device lcdc0_device = {
483 .id = 0, 483 .id = 0,
484 .dev = { 484 .dev = {
485 .platform_data = &lcdc0_info, 485 .platform_data = &lcdc0_info,
486 .coherent_dma_mask = ~0, 486 .coherent_dma_mask = DMA_BIT_MASK(32),
487 }, 487 },
488}; 488};
489 489
@@ -580,7 +580,7 @@ static struct platform_device hdmi_lcdc_device = {
580 .id = 1, 580 .id = 1,
581 .dev = { 581 .dev = {
582 .platform_data = &hdmi_lcdc_info, 582 .platform_data = &hdmi_lcdc_info,
583 .coherent_dma_mask = ~0, 583 .coherent_dma_mask = DMA_BIT_MASK(32),
584 }, 584 },
585}; 585};
586 586
@@ -614,6 +614,11 @@ static struct regulator_consumer_supply fixed3v3_power_consumers[] = {
614 REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), 614 REGULATOR_SUPPLY("vqmmc", "sh_mmcif"),
615}; 615};
616 616
617/* Fixed 3.3V regulator used by LCD backlight */
618static struct regulator_consumer_supply fixed5v0_power_consumers[] = {
619 REGULATOR_SUPPLY("power", "pwm-backlight.0"),
620};
621
617/* Fixed 3.3V regulator to be used by SDHI0 */ 622/* Fixed 3.3V regulator to be used by SDHI0 */
618static struct regulator_consumer_supply vcc_sdhi0_consumers[] = { 623static struct regulator_consumer_supply vcc_sdhi0_consumers[] = {
619 REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), 624 REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
@@ -1196,6 +1201,8 @@ static void __init eva_init(void)
1196 1201
1197 regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, 1202 regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
1198 ARRAY_SIZE(fixed3v3_power_consumers), 3300000); 1203 ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
1204 regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers,
1205 ARRAY_SIZE(fixed5v0_power_consumers), 5000000);
1199 1206
1200 pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map)); 1207 pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map));
1201 pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup)); 1208 pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup));
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c
index 38611526fe9a..3c4995aebd22 100644
--- a/arch/arm/mach-shmobile/board-bockw.c
+++ b/arch/arm/mach-shmobile/board-bockw.c
@@ -679,7 +679,7 @@ static void __init bockw_init(void)
679 .id = i, 679 .id = i,
680 .data = &rsnd_card_info[i], 680 .data = &rsnd_card_info[i],
681 .size_data = sizeof(struct asoc_simple_card_info), 681 .size_data = sizeof(struct asoc_simple_card_info),
682 .dma_mask = ~0, 682 .dma_mask = DMA_BIT_MASK(32),
683 }; 683 };
684 684
685 platform_device_register_full(&cardinfo); 685 platform_device_register_full(&cardinfo);
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index fe689b7fdc9e..bc40b853ffd3 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -334,7 +334,7 @@ static struct platform_device lcdc_device = {
334 .resource = lcdc_resources, 334 .resource = lcdc_resources,
335 .dev = { 335 .dev = {
336 .platform_data = &lcdc_info, 336 .platform_data = &lcdc_info,
337 .coherent_dma_mask = ~0, 337 .coherent_dma_mask = DMA_BIT_MASK(32),
338 }, 338 },
339}; 339};
340 340
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index a8d3ce646fb9..e0406fd37390 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -245,7 +245,9 @@ static void __init lager_init(void)
245{ 245{
246 lager_add_standard_devices(); 246 lager_add_standard_devices();
247 247
248 phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup); 248 if (IS_ENABLED(CONFIG_PHYLIB))
249 phy_register_fixup_for_id("r8a7790-ether-ff:01",
250 lager_ksz8041_fixup);
249} 251}
250 252
251static const char * const lager_boards_compat_dt[] __initconst = { 253static const char * const lager_boards_compat_dt[] __initconst = {
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index af06753eb809..e721d2ccceae 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -409,7 +409,7 @@ static struct platform_device lcdc_device = {
409 .resource = lcdc_resources, 409 .resource = lcdc_resources,
410 .dev = { 410 .dev = {
411 .platform_data = &lcdc_info, 411 .platform_data = &lcdc_info,
412 .coherent_dma_mask = ~0, 412 .coherent_dma_mask = DMA_BIT_MASK(32),
413 }, 413 },
414}; 414};
415 415
@@ -499,7 +499,7 @@ static struct platform_device hdmi_lcdc_device = {
499 .id = 1, 499 .id = 1,
500 .dev = { 500 .dev = {
501 .platform_data = &hdmi_lcdc_info, 501 .platform_data = &hdmi_lcdc_info,
502 .coherent_dma_mask = ~0, 502 .coherent_dma_mask = DMA_BIT_MASK(32),
503 }, 503 },
504}; 504};
505 505
diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig
index 037100a1563a..aee77f06f887 100644
--- a/arch/arm/mach-socfpga/Kconfig
+++ b/arch/arm/mach-socfpga/Kconfig
@@ -10,6 +10,7 @@ config ARCH_SOCFPGA
10 select GENERIC_CLOCKEVENTS 10 select GENERIC_CLOCKEVENTS
11 select GPIO_PL061 if GPIOLIB 11 select GPIO_PL061 if GPIOLIB
12 select HAVE_ARM_SCU 12 select HAVE_ARM_SCU
13 select HAVE_ARM_TWD if SMP
13 select HAVE_SMP 14 select HAVE_SMP
14 select MFD_SYSCON 15 select MFD_SYSCON
15 select SPARSE_IRQ 16 select SPARSE_IRQ
diff --git a/arch/arm/mach-tegra/fuse.c b/arch/arm/mach-tegra/fuse.c
index d4639c506622..3a9c1f1c219d 100644
--- a/arch/arm/mach-tegra/fuse.c
+++ b/arch/arm/mach-tegra/fuse.c
@@ -198,10 +198,12 @@ void __init tegra_init_fuse(void)
198 switch (tegra_chip_id) { 198 switch (tegra_chip_id) {
199 case TEGRA20: 199 case TEGRA20:
200 tegra20_fuse_init_randomness(); 200 tegra20_fuse_init_randomness();
201 break;
201 case TEGRA30: 202 case TEGRA30:
202 case TEGRA114: 203 case TEGRA114:
203 default: 204 default:
204 tegra30_fuse_init_randomness(); 205 tegra30_fuse_init_randomness();
206 break;
205 } 207 }
206 208
207 pr_info("Tegra Revision: %s SKU: %d CPU Process: %d Core Process: %d\n", 209 pr_info("Tegra Revision: %s SKU: %d CPU Process: %d Core Process: %d\n",
@@ -209,13 +211,3 @@ void __init tegra_init_fuse(void)
209 tegra_sku_id, tegra_cpu_process_id, 211 tegra_sku_id, tegra_cpu_process_id,
210 tegra_core_process_id); 212 tegra_core_process_id);
211} 213}
212
213unsigned long long tegra_chip_uid(void)
214{
215 unsigned long long lo, hi;
216
217 lo = tegra_fuse_readl(FUSE_UID_LOW);
218 hi = tegra_fuse_readl(FUSE_UID_HIGH);
219 return (hi << 32ull) | lo;
220}
221EXPORT_SYMBOL(tegra_chip_uid);
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 2e85c1e72535..12c7e5c03ea4 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -140,6 +140,10 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
140 /* Requires call-back bindings. */ 140 /* Requires call-back bindings. */
141 OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata), 141 OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata),
142 /* Requires DMA bindings. */ 142 /* Requires DMA bindings. */
143 OF_DEV_AUXDATA("arm,pl18x", 0x80126000, "sdi0", &mop500_sdi0_data),
144 OF_DEV_AUXDATA("arm,pl18x", 0x80118000, "sdi1", &mop500_sdi1_data),
145 OF_DEV_AUXDATA("arm,pl18x", 0x80005000, "sdi2", &mop500_sdi2_data),
146 OF_DEV_AUXDATA("arm,pl18x", 0x80114000, "sdi4", &mop500_sdi4_data),
143 OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000, 147 OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000,
144 "ux500-msp-i2s.0", &msp0_platform_data), 148 "ux500-msp-i2s.0", &msp0_platform_data),
145 OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80124000, 149 OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80124000,
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
index 033d34dcbd3f..c26ef5b92ca7 100644
--- a/arch/arm/mach-vexpress/spc.c
+++ b/arch/arm/mach-vexpress/spc.c
@@ -53,6 +53,11 @@
53#define A15_BX_ADDR0 0x68 53#define A15_BX_ADDR0 0x68
54#define A7_BX_ADDR0 0x78 54#define A7_BX_ADDR0 0x78
55 55
56/* SPC CPU/cluster reset statue */
57#define STANDBYWFI_STAT 0x3c
58#define STANDBYWFI_STAT_A15_CPU_MASK(cpu) (1 << (cpu))
59#define STANDBYWFI_STAT_A7_CPU_MASK(cpu) (1 << (3 + (cpu)))
60
56/* SPC system config interface registers */ 61/* SPC system config interface registers */
57#define SYSCFG_WDATA 0x70 62#define SYSCFG_WDATA 0x70
58#define SYSCFG_RDATA 0x74 63#define SYSCFG_RDATA 0x74
@@ -213,6 +218,41 @@ void ve_spc_powerdown(u32 cluster, bool enable)
213 writel_relaxed(enable, info->baseaddr + pwdrn_reg); 218 writel_relaxed(enable, info->baseaddr + pwdrn_reg);
214} 219}
215 220
221static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster)
222{
223 return cluster_is_a15(cluster) ?
224 STANDBYWFI_STAT_A15_CPU_MASK(cpu)
225 : STANDBYWFI_STAT_A7_CPU_MASK(cpu);
226}
227
228/**
229 * ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
230 *
231 * @cpu: mpidr[7:0] bitfield describing CPU affinity level within cluster
232 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
233 *
234 * @return: non-zero if and only if the specified CPU is in WFI
235 *
236 * Take care when interpreting the result of this function: a CPU might
237 * be in WFI temporarily due to idle, and is not necessarily safely
238 * parked.
239 */
240int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
241{
242 int ret;
243 u32 mask = standbywfi_cpu_mask(cpu, cluster);
244
245 if (cluster >= MAX_CLUSTERS)
246 return 1;
247
248 ret = readl_relaxed(info->baseaddr + STANDBYWFI_STAT);
249
250 pr_debug("%s: PCFGREG[0x%X] = 0x%08X, mask = 0x%X\n",
251 __func__, STANDBYWFI_STAT, ret, mask);
252
253 return ret & mask;
254}
255
216static int ve_spc_get_performance(int cluster, u32 *freq) 256static int ve_spc_get_performance(int cluster, u32 *freq)
217{ 257{
218 struct ve_spc_opp *opps = info->opps[cluster]; 258 struct ve_spc_opp *opps = info->opps[cluster];
diff --git a/arch/arm/mach-vexpress/spc.h b/arch/arm/mach-vexpress/spc.h
index dbd44c3720f9..793d065243b9 100644
--- a/arch/arm/mach-vexpress/spc.h
+++ b/arch/arm/mach-vexpress/spc.h
@@ -20,5 +20,6 @@ void ve_spc_global_wakeup_irq(bool set);
20void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set); 20void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set);
21void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr); 21void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr);
22void ve_spc_powerdown(u32 cluster, bool enable); 22void ve_spc_powerdown(u32 cluster, bool enable);
23int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster);
23 24
24#endif 25#endif
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index 05a364c5077a..29e7785a54bc 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -12,6 +12,7 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14 14
15#include <linux/delay.h>
15#include <linux/init.h> 16#include <linux/init.h>
16#include <linux/io.h> 17#include <linux/io.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
@@ -32,11 +33,17 @@
32#include "spc.h" 33#include "spc.h"
33 34
34/* SCC conf registers */ 35/* SCC conf registers */
36#define RESET_CTRL 0x018
37#define RESET_A15_NCORERESET(cpu) (1 << (2 + (cpu)))
38#define RESET_A7_NCORERESET(cpu) (1 << (16 + (cpu)))
39
35#define A15_CONF 0x400 40#define A15_CONF 0x400
36#define A7_CONF 0x500 41#define A7_CONF 0x500
37#define SYS_INFO 0x700 42#define SYS_INFO 0x700
38#define SPC_BASE 0xb00 43#define SPC_BASE 0xb00
39 44
45static void __iomem *scc;
46
40/* 47/*
41 * We can't use regular spinlocks. In the switcher case, it is possible 48 * We can't use regular spinlocks. In the switcher case, it is possible
42 * for an outbound CPU to call power_down() after its inbound counterpart 49 * for an outbound CPU to call power_down() after its inbound counterpart
@@ -190,6 +197,55 @@ static void tc2_pm_power_down(void)
190 tc2_pm_down(0); 197 tc2_pm_down(0);
191} 198}
192 199
200static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster)
201{
202 u32 mask = cluster ?
203 RESET_A7_NCORERESET(cpu)
204 : RESET_A15_NCORERESET(cpu);
205
206 return !(readl_relaxed(scc + RESET_CTRL) & mask);
207}
208
209#define POLL_MSEC 10
210#define TIMEOUT_MSEC 1000
211
212static int tc2_pm_power_down_finish(unsigned int cpu, unsigned int cluster)
213{
214 unsigned tries;
215
216 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
217 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
218
219 for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) {
220 /*
221 * Only examine the hardware state if the target CPU has
222 * caught up at least as far as tc2_pm_down():
223 */
224 if (ACCESS_ONCE(tc2_pm_use_count[cpu][cluster]) == 0) {
225 pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n",
226 __func__, cpu, cluster,
227 readl_relaxed(scc + RESET_CTRL));
228
229 /*
230 * We need the CPU to reach WFI, but the power
231 * controller may put the cluster in reset and
232 * power it off as soon as that happens, before
233 * we have a chance to see STANDBYWFI.
234 *
235 * So we need to check for both conditions:
236 */
237 if (tc2_core_in_reset(cpu, cluster) ||
238 ve_spc_cpu_in_wfi(cpu, cluster))
239 return 0; /* success: the CPU is halted */
240 }
241
242 /* Otherwise, wait and retry: */
243 msleep(POLL_MSEC);
244 }
245
246 return -ETIMEDOUT; /* timeout */
247}
248
193static void tc2_pm_suspend(u64 residency) 249static void tc2_pm_suspend(u64 residency)
194{ 250{
195 unsigned int mpidr, cpu, cluster; 251 unsigned int mpidr, cpu, cluster;
@@ -232,10 +288,11 @@ static void tc2_pm_powered_up(void)
232} 288}
233 289
234static const struct mcpm_platform_ops tc2_pm_power_ops = { 290static const struct mcpm_platform_ops tc2_pm_power_ops = {
235 .power_up = tc2_pm_power_up, 291 .power_up = tc2_pm_power_up,
236 .power_down = tc2_pm_power_down, 292 .power_down = tc2_pm_power_down,
237 .suspend = tc2_pm_suspend, 293 .power_down_finish = tc2_pm_power_down_finish,
238 .powered_up = tc2_pm_powered_up, 294 .suspend = tc2_pm_suspend,
295 .powered_up = tc2_pm_powered_up,
239}; 296};
240 297
241static bool __init tc2_pm_usage_count_init(void) 298static bool __init tc2_pm_usage_count_init(void)
@@ -269,7 +326,6 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
269static int __init tc2_pm_init(void) 326static int __init tc2_pm_init(void)
270{ 327{
271 int ret, irq; 328 int ret, irq;
272 void __iomem *scc;
273 u32 a15_cluster_id, a7_cluster_id, sys_info; 329 u32 a15_cluster_id, a7_cluster_id, sys_info;
274 struct device_node *np; 330 struct device_node *np;
275 331
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 79f8b39801a8..f61a5707823a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -9,6 +9,7 @@
9 * 9 *
10 * DMA uncached mapping support. 10 * DMA uncached mapping support.
11 */ 11 */
12#include <linux/bootmem.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/mm.h> 14#include <linux/mm.h>
14#include <linux/gfp.h> 15#include <linux/gfp.h>
@@ -157,6 +158,44 @@ struct dma_map_ops arm_coherent_dma_ops = {
157}; 158};
158EXPORT_SYMBOL(arm_coherent_dma_ops); 159EXPORT_SYMBOL(arm_coherent_dma_ops);
159 160
161static int __dma_supported(struct device *dev, u64 mask, bool warn)
162{
163 unsigned long max_dma_pfn;
164
165 /*
166 * If the mask allows for more memory than we can address,
167 * and we actually have that much memory, then we must
168 * indicate that DMA to this device is not supported.
169 */
170 if (sizeof(mask) != sizeof(dma_addr_t) &&
171 mask > (dma_addr_t)~0 &&
172 dma_to_pfn(dev, ~0) < max_pfn) {
173 if (warn) {
174 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
175 mask);
176 dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
177 }
178 return 0;
179 }
180
181 max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
182
183 /*
184 * Translate the device's DMA mask to a PFN limit. This
185 * PFN number includes the page which we can DMA to.
186 */
187 if (dma_to_pfn(dev, mask) < max_dma_pfn) {
188 if (warn)
189 dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
190 mask,
191 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
192 max_dma_pfn + 1);
193 return 0;
194 }
195
196 return 1;
197}
198
160static u64 get_coherent_dma_mask(struct device *dev) 199static u64 get_coherent_dma_mask(struct device *dev)
161{ 200{
162 u64 mask = (u64)DMA_BIT_MASK(32); 201 u64 mask = (u64)DMA_BIT_MASK(32);
@@ -173,32 +212,8 @@ static u64 get_coherent_dma_mask(struct device *dev)
173 return 0; 212 return 0;
174 } 213 }
175 214
176 /* 215 if (!__dma_supported(dev, mask, true))
177 * If the mask allows for more memory than we can address,
178 * and we actually have that much memory, then fail the
179 * allocation.
180 */
181 if (sizeof(mask) != sizeof(dma_addr_t) &&
182 mask > (dma_addr_t)~0 &&
183 dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) {
184 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
185 mask);
186 dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
187 return 0;
188 }
189
190 /*
191 * Now check that the mask, when translated to a PFN,
192 * fits within the allowable addresses which we can
193 * allocate.
194 */
195 if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) {
196 dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
197 mask,
198 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
199 arm_dma_pfn_limit + 1);
200 return 0; 216 return 0;
201 }
202 } 217 }
203 218
204 return mask; 219 return mask;
@@ -1027,28 +1042,7 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1027 */ 1042 */
1028int dma_supported(struct device *dev, u64 mask) 1043int dma_supported(struct device *dev, u64 mask)
1029{ 1044{
1030 unsigned long limit; 1045 return __dma_supported(dev, mask, false);
1031
1032 /*
1033 * If the mask allows for more memory than we can address,
1034 * and we actually have that much memory, then we must
1035 * indicate that DMA to this device is not supported.
1036 */
1037 if (sizeof(mask) != sizeof(dma_addr_t) &&
1038 mask > (dma_addr_t)~0 &&
1039 dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
1040 return 0;
1041
1042 /*
1043 * Translate the device's DMA mask to a PFN limit. This
1044 * PFN number includes the page which we can DMA to.
1045 */
1046 limit = dma_to_pfn(dev, mask);
1047
1048 if (limit < arm_dma_pfn_limit)
1049 return 0;
1050
1051 return 1;
1052} 1046}
1053EXPORT_SYMBOL(dma_supported); 1047EXPORT_SYMBOL(dma_supported);
1054 1048
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 6d5ba9afb16a..3387e60e4ea3 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -175,16 +175,16 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
175 unsigned long i; 175 unsigned long i;
176 if (cache_is_vipt_nonaliasing()) { 176 if (cache_is_vipt_nonaliasing()) {
177 for (i = 0; i < (1 << compound_order(page)); i++) { 177 for (i = 0; i < (1 << compound_order(page)); i++) {
178 void *addr = kmap_atomic(page); 178 void *addr = kmap_atomic(page + i);
179 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 179 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
180 kunmap_atomic(addr); 180 kunmap_atomic(addr);
181 } 181 }
182 } else { 182 } else {
183 for (i = 0; i < (1 << compound_order(page)); i++) { 183 for (i = 0; i < (1 << compound_order(page)); i++) {
184 void *addr = kmap_high_get(page); 184 void *addr = kmap_high_get(page + i);
185 if (addr) { 185 if (addr) {
186 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 186 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
187 kunmap_high(page); 187 kunmap_high(page + i);
188 } 188 }
189 } 189 }
190 } 190 }
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 3e8f106ee5fe..1f7b19a47060 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
229#ifdef CONFIG_ZONE_DMA 229#ifdef CONFIG_ZONE_DMA
230 if (mdesc->dma_zone_size) { 230 if (mdesc->dma_zone_size) {
231 arm_dma_zone_size = mdesc->dma_zone_size; 231 arm_dma_zone_size = mdesc->dma_zone_size;
232 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; 232 arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1;
233 } else 233 } else
234 arm_dma_limit = 0xffffffff; 234 arm_dma_limit = 0xffffffff;
235 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; 235 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index d27158c38eb0..5e85ed371364 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -146,7 +146,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
146 146
147 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 147 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
148 info.length = len; 148 info.length = len;
149 info.low_limit = PAGE_SIZE; 149 info.low_limit = FIRST_USER_ADDRESS;
150 info.high_limit = mm->mmap_base; 150 info.high_limit = mm->mmap_base;
151 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; 151 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
152 info.align_offset = pgoff << PAGE_SHIFT; 152 info.align_offset = pgoff << PAGE_SHIFT;
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index 0acb089d0f70..1046b373d1ae 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -87,7 +87,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
87 init_pud = pud_offset(init_pgd, 0); 87 init_pud = pud_offset(init_pgd, 0);
88 init_pmd = pmd_offset(init_pud, 0); 88 init_pmd = pmd_offset(init_pud, 0);
89 init_pte = pte_offset_map(init_pmd, 0); 89 init_pte = pte_offset_map(init_pmd, 0);
90 set_pte_ext(new_pte, *init_pte, 0); 90 set_pte_ext(new_pte + 0, init_pte[0], 0);
91 set_pte_ext(new_pte + 1, init_pte[1], 0);
91 pte_unmap(init_pte); 92 pte_unmap(init_pte);
92 pte_unmap(new_pte); 93 pte_unmap(new_pte);
93 } 94 }
diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h
index fb92abb91628..2861b155485a 100644
--- a/arch/arm/plat-omap/include/plat/dmtimer.h
+++ b/arch/arm/plat-omap/include/plat/dmtimer.h
@@ -336,8 +336,11 @@ static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer)
336 if (timer->posted) 336 if (timer->posted)
337 return; 337 return;
338 338
339 if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) 339 if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) {
340 timer->posted = OMAP_TIMER_NONPOSTED;
341 __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0);
340 return; 342 return;
343 }
341 344
342 __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 345 __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG,
343 OMAP_TIMER_CTRL_POSTED, 0); 346 OMAP_TIMER_CTRL_POSTED, 0);
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 83e4f959ee47..85501238b425 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
96 struct remap_data *info = data; 96 struct remap_data *info = data;
97 struct page *page = info->pages[info->index++]; 97 struct page *page = info->pages[info->index++];
98 unsigned long pfn = page_to_pfn(page); 98 unsigned long pfn = page_to_pfn(page);
99 pte_t pte = pfn_pte(pfn, info->prot); 99 pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
100 100
101 if (map_foreign_page(pfn, info->fgmfn, info->domid)) 101 if (map_foreign_page(pfn, info->fgmfn, info->domid))
102 return -EFAULT; 102 return -EFAULT;
@@ -224,10 +224,10 @@ static int __init xen_guest_init(void)
224 } 224 }
225 if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) 225 if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
226 return 0; 226 return 0;
227 xen_hvm_resume_frames = res.start >> PAGE_SHIFT; 227 xen_hvm_resume_frames = res.start;
228 xen_events_irq = irq_of_parse_and_map(node, 0); 228 xen_events_irq = irq_of_parse_and_map(node, 0);
229 pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", 229 pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
230 version, xen_events_irq, xen_hvm_resume_frames); 230 version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT));
231 xen_domain_type = XEN_HVM_DOMAIN; 231 xen_domain_type = XEN_HVM_DOMAIN;
232 232
233 xen_setup_features(); 233 xen_setup_features();
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index 23732cdff551..b31ee1b275b0 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -25,8 +25,9 @@ struct xen_p2m_entry {
25 struct rb_node rbnode_phys; 25 struct rb_node rbnode_phys;
26}; 26};
27 27
28rwlock_t p2m_lock; 28static rwlock_t p2m_lock;
29struct rb_root phys_to_mach = RB_ROOT; 29struct rb_root phys_to_mach = RB_ROOT;
30EXPORT_SYMBOL_GPL(phys_to_mach);
30static struct rb_root mach_to_phys = RB_ROOT; 31static struct rb_root mach_to_phys = RB_ROOT;
31 32
32static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) 33static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
@@ -200,7 +201,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
200} 201}
201EXPORT_SYMBOL_GPL(__set_phys_to_machine); 202EXPORT_SYMBOL_GPL(__set_phys_to_machine);
202 203
203int p2m_init(void) 204static int p2m_init(void)
204{ 205{
205 rwlock_init(&p2m_lock); 206 rwlock_init(&p2m_lock);
206 return 0; 207 return 0;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 88c8b6c1341a..6d4dd22ee4b7 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -159,8 +159,7 @@ config NR_CPUS
159 range 2 32 159 range 2 32
160 depends on SMP 160 depends on SMP
161 # These have to remain sorted largest to smallest 161 # These have to remain sorted largest to smallest
162 default "8" if ARCH_XGENE 162 default "8"
163 default "4"
164 163
165config HOTPLUG_CPU 164config HOTPLUG_CPU
166 bool "Support for hot-pluggable CPUs" 165 bool "Support for hot-pluggable CPUs"
diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts
index 84fcc5018284..519c4b2c0687 100644
--- a/arch/arm64/boot/dts/foundation-v8.dts
+++ b/arch/arm64/boot/dts/foundation-v8.dts
@@ -6,6 +6,8 @@
6 6
7/dts-v1/; 7/dts-v1/;
8 8
9/memreserve/ 0x80000000 0x00010000;
10
9/ { 11/ {
10 model = "Foundation-v8A"; 12 model = "Foundation-v8A";
11 compatible = "arm,foundation-aarch64", "arm,vexpress"; 13 compatible = "arm,foundation-aarch64", "arm,vexpress";
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 4cc813eddacb..572769727227 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -229,7 +229,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
229extern void __iounmap(volatile void __iomem *addr); 229extern void __iounmap(volatile void __iomem *addr);
230extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); 230extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
231 231
232#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) 232#define PROT_DEFAULT (pgprot_default | PTE_DIRTY)
233#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) 233#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
234#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) 234#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
235#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) 235#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index aa11943b8502..b2fcfbc51ecc 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -56,6 +56,9 @@ static inline void arch_local_irq_disable(void)
56#define local_fiq_enable() asm("msr daifclr, #1" : : : "memory") 56#define local_fiq_enable() asm("msr daifclr, #1" : : : "memory")
57#define local_fiq_disable() asm("msr daifset, #1" : : : "memory") 57#define local_fiq_disable() asm("msr daifset, #1" : : : "memory")
58 58
59#define local_async_enable() asm("msr daifclr, #4" : : : "memory")
60#define local_async_disable() asm("msr daifset, #4" : : : "memory")
61
59/* 62/*
60 * Save the current interrupt enable state. 63 * Save the current interrupt enable state.
61 */ 64 */
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 755f86143320..b1d2e26c3c88 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -43,7 +43,7 @@
43 * Section 43 * Section
44 */ 44 */
45#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) 45#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
46#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 2) 46#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58)
47#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */ 47#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
48#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */ 48#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
49#define PMD_SECT_S (_AT(pmdval_t, 3) << 8) 49#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 17bd3af0a117..7f2b60affbb4 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -25,10 +25,11 @@
25 * Software defined PTE bits definition. 25 * Software defined PTE bits definition.
26 */ 26 */
27#define PTE_VALID (_AT(pteval_t, 1) << 0) 27#define PTE_VALID (_AT(pteval_t, 1) << 0)
28#define PTE_PROT_NONE (_AT(pteval_t, 1) << 2) /* only when !PTE_VALID */ 28#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
29#define PTE_FILE (_AT(pteval_t, 1) << 3) /* only when !pte_present() */
30#define PTE_DIRTY (_AT(pteval_t, 1) << 55) 29#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
31#define PTE_SPECIAL (_AT(pteval_t, 1) << 56) 30#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
31 /* bit 57 for PMD_SECT_SPLITTING */
32#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
32 33
33/* 34/*
34 * VMALLOC and SPARSEMEM_VMEMMAP ranges. 35 * VMALLOC and SPARSEMEM_VMEMMAP ranges.
@@ -254,7 +255,7 @@ static inline int has_transparent_hugepage(void)
254#define pgprot_noncached(prot) \ 255#define pgprot_noncached(prot) \
255 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE)) 256 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
256#define pgprot_writecombine(prot) \ 257#define pgprot_writecombine(prot) \
257 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE)) 258 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
258#define pgprot_dmacoherent(prot) \ 259#define pgprot_dmacoherent(prot) \
259 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)) 260 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
260#define __HAVE_PHYS_MEM_ACCESS_PROT 261#define __HAVE_PHYS_MEM_ACCESS_PROT
@@ -357,18 +358,20 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
357 358
358/* 359/*
359 * Encode and decode a swap entry: 360 * Encode and decode a swap entry:
360 * bits 0, 2: present (must both be zero) 361 * bits 0-1: present (must be zero)
361 * bit 3: PTE_FILE 362 * bit 2: PTE_FILE
362 * bits 4-8: swap type 363 * bits 3-8: swap type
363 * bits 9-63: swap offset 364 * bits 9-57: swap offset
364 */ 365 */
365#define __SWP_TYPE_SHIFT 4 366#define __SWP_TYPE_SHIFT 3
366#define __SWP_TYPE_BITS 6 367#define __SWP_TYPE_BITS 6
368#define __SWP_OFFSET_BITS 49
367#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 369#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
368#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 370#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
371#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
369 372
370#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 373#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
371#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) 374#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
372#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 375#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
373 376
374#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 377#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
@@ -382,15 +385,15 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
382 385
383/* 386/*
384 * Encode and decode a file entry: 387 * Encode and decode a file entry:
385 * bits 0, 2: present (must both be zero) 388 * bits 0-1: present (must be zero)
386 * bit 3: PTE_FILE 389 * bit 2: PTE_FILE
387 * bits 4-63: file offset / PAGE_SIZE 390 * bits 3-57: file offset / PAGE_SIZE
388 */ 391 */
389#define pte_file(pte) (pte_val(pte) & PTE_FILE) 392#define pte_file(pte) (pte_val(pte) & PTE_FILE)
390#define pte_to_pgoff(x) (pte_val(x) >> 4) 393#define pte_to_pgoff(x) (pte_val(x) >> 3)
391#define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE) 394#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE)
392 395
393#define PTE_FILE_MAX_BITS 60 396#define PTE_FILE_MAX_BITS 55
394 397
395extern int kern_addr_valid(unsigned long addr); 398extern int kern_addr_valid(unsigned long addr);
396 399
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
index 2820f1a6eebe..dde3fc9c49f0 100644
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
23 unsigned long offset, size_t size, enum dma_data_direction dir, 23 unsigned long offset, size_t size, enum dma_data_direction dir,
24 struct dma_attrs *attrs) 24 struct dma_attrs *attrs)
25{ 25{
26 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
27} 26}
28 27
29static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 28static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
30 size_t size, enum dma_data_direction dir, 29 size_t size, enum dma_data_direction dir,
31 struct dma_attrs *attrs) 30 struct dma_attrs *attrs)
32{ 31{
33 __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
34} 32}
35 33
36static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 34static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
37 dma_addr_t handle, size_t size, enum dma_data_direction dir) 35 dma_addr_t handle, size_t size, enum dma_data_direction dir)
38{ 36{
39 __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
40} 37}
41 38
42static inline void xen_dma_sync_single_for_device(struct device *hwdev, 39static inline void xen_dma_sync_single_for_device(struct device *hwdev,
43 dma_addr_t handle, size_t size, enum dma_data_direction dir) 40 dma_addr_t handle, size_t size, enum dma_data_direction dir)
44{ 41{
45 __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
46} 42}
47#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ 43#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 6a0a9b132d7a..4ae68579031d 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -248,7 +248,8 @@ static int brk_handler(unsigned long addr, unsigned int esr,
248int aarch32_break_handler(struct pt_regs *regs) 248int aarch32_break_handler(struct pt_regs *regs)
249{ 249{
250 siginfo_t info; 250 siginfo_t info;
251 unsigned int instr; 251 u32 arm_instr;
252 u16 thumb_instr;
252 bool bp = false; 253 bool bp = false;
253 void __user *pc = (void __user *)instruction_pointer(regs); 254 void __user *pc = (void __user *)instruction_pointer(regs);
254 255
@@ -257,18 +258,21 @@ int aarch32_break_handler(struct pt_regs *regs)
257 258
258 if (compat_thumb_mode(regs)) { 259 if (compat_thumb_mode(regs)) {
259 /* get 16-bit Thumb instruction */ 260 /* get 16-bit Thumb instruction */
260 get_user(instr, (u16 __user *)pc); 261 get_user(thumb_instr, (u16 __user *)pc);
261 if (instr == AARCH32_BREAK_THUMB2_LO) { 262 thumb_instr = le16_to_cpu(thumb_instr);
263 if (thumb_instr == AARCH32_BREAK_THUMB2_LO) {
262 /* get second half of 32-bit Thumb-2 instruction */ 264 /* get second half of 32-bit Thumb-2 instruction */
263 get_user(instr, (u16 __user *)(pc + 2)); 265 get_user(thumb_instr, (u16 __user *)(pc + 2));
264 bp = instr == AARCH32_BREAK_THUMB2_HI; 266 thumb_instr = le16_to_cpu(thumb_instr);
267 bp = thumb_instr == AARCH32_BREAK_THUMB2_HI;
265 } else { 268 } else {
266 bp = instr == AARCH32_BREAK_THUMB; 269 bp = thumb_instr == AARCH32_BREAK_THUMB;
267 } 270 }
268 } else { 271 } else {
269 /* 32-bit ARM instruction */ 272 /* 32-bit ARM instruction */
270 get_user(instr, (u32 __user *)pc); 273 get_user(arm_instr, (u32 __user *)pc);
271 bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM; 274 arm_instr = le32_to_cpu(arm_instr);
275 bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM;
272 } 276 }
273 277
274 if (!bp) 278 if (!bp)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index e1166145ca29..4d2c6f3f0c41 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -309,15 +309,12 @@ el1_irq:
309#ifdef CONFIG_TRACE_IRQFLAGS 309#ifdef CONFIG_TRACE_IRQFLAGS
310 bl trace_hardirqs_off 310 bl trace_hardirqs_off
311#endif 311#endif
312#ifdef CONFIG_PREEMPT 312
313 get_thread_info tsk
314 ldr w24, [tsk, #TI_PREEMPT] // get preempt count
315 add w0, w24, #1 // increment it
316 str w0, [tsk, #TI_PREEMPT]
317#endif
318 irq_handler 313 irq_handler
314
319#ifdef CONFIG_PREEMPT 315#ifdef CONFIG_PREEMPT
320 str w24, [tsk, #TI_PREEMPT] // restore preempt count 316 get_thread_info tsk
317 ldr w24, [tsk, #TI_PREEMPT] // restore preempt count
321 cbnz w24, 1f // preempt count != 0 318 cbnz w24, 1f // preempt count != 0
322 ldr x0, [tsk, #TI_FLAGS] // get flags 319 ldr x0, [tsk, #TI_FLAGS] // get flags
323 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? 320 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
@@ -507,22 +504,10 @@ el0_irq_naked:
507#ifdef CONFIG_TRACE_IRQFLAGS 504#ifdef CONFIG_TRACE_IRQFLAGS
508 bl trace_hardirqs_off 505 bl trace_hardirqs_off
509#endif 506#endif
510 get_thread_info tsk 507
511#ifdef CONFIG_PREEMPT
512 ldr w24, [tsk, #TI_PREEMPT] // get preempt count
513 add w23, w24, #1 // increment it
514 str w23, [tsk, #TI_PREEMPT]
515#endif
516 irq_handler 508 irq_handler
517#ifdef CONFIG_PREEMPT 509 get_thread_info tsk
518 ldr w0, [tsk, #TI_PREEMPT] 510
519 str w24, [tsk, #TI_PREEMPT]
520 cmp w0, w23
521 b.eq 1f
522 mov x1, #0
523 str x1, [x1] // BUG
5241:
525#endif
526#ifdef CONFIG_TRACE_IRQFLAGS 511#ifdef CONFIG_TRACE_IRQFLAGS
527 bl trace_hardirqs_on 512 bl trace_hardirqs_on
528#endif 513#endif
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 7009387348b7..c68cca5c3523 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -282,8 +282,9 @@ ENDPROC(secondary_holding_pen)
282 * be used where CPUs are brought online dynamically by the kernel. 282 * be used where CPUs are brought online dynamically by the kernel.
283 */ 283 */
284ENTRY(secondary_entry) 284ENTRY(secondary_entry)
285 bl __calc_phys_offset // x2=phys offset
286 bl el2_setup // Drop to EL1 285 bl el2_setup // Drop to EL1
286 bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
287 bl set_cpu_boot_mode_flag
287 b secondary_startup 288 b secondary_startup
288ENDPROC(secondary_entry) 289ENDPROC(secondary_entry)
289 290
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index fecdbf7de82e..6a8928bba03c 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
214{ 214{
215 int err, len, type, disabled = !ctrl.enabled; 215 int err, len, type, disabled = !ctrl.enabled;
216 216
217 if (disabled) { 217 attr->disabled = disabled;
218 len = 0; 218 if (disabled)
219 type = HW_BREAKPOINT_EMPTY; 219 return 0;
220 } else { 220
221 err = arch_bp_generic_fields(ctrl, &len, &type); 221 err = arch_bp_generic_fields(ctrl, &len, &type);
222 if (err) 222 if (err)
223 return err; 223 return err;
224 224
225 switch (note_type) { 225 switch (note_type) {
226 case NT_ARM_HW_BREAK: 226 case NT_ARM_HW_BREAK:
227 if ((type & HW_BREAKPOINT_X) != type) 227 if ((type & HW_BREAKPOINT_X) != type)
228 return -EINVAL;
229 break;
230 case NT_ARM_HW_WATCH:
231 if ((type & HW_BREAKPOINT_RW) != type)
232 return -EINVAL;
233 break;
234 default:
235 return -EINVAL; 228 return -EINVAL;
236 } 229 break;
230 case NT_ARM_HW_WATCH:
231 if ((type & HW_BREAKPOINT_RW) != type)
232 return -EINVAL;
233 break;
234 default:
235 return -EINVAL;
237 } 236 }
238 237
239 attr->bp_len = len; 238 attr->bp_len = len;
240 attr->bp_type = type; 239 attr->bp_type = type;
241 attr->disabled = disabled;
242 240
243 return 0; 241 return 0;
244} 242}
@@ -636,28 +634,27 @@ static int compat_gpr_get(struct task_struct *target,
636 634
637 for (i = 0; i < num_regs; ++i) { 635 for (i = 0; i < num_regs; ++i) {
638 unsigned int idx = start + i; 636 unsigned int idx = start + i;
639 void *reg; 637 compat_ulong_t reg;
640 638
641 switch (idx) { 639 switch (idx) {
642 case 15: 640 case 15:
643 reg = (void *)&task_pt_regs(target)->pc; 641 reg = task_pt_regs(target)->pc;
644 break; 642 break;
645 case 16: 643 case 16:
646 reg = (void *)&task_pt_regs(target)->pstate; 644 reg = task_pt_regs(target)->pstate;
647 break; 645 break;
648 case 17: 646 case 17:
649 reg = (void *)&task_pt_regs(target)->orig_x0; 647 reg = task_pt_regs(target)->orig_x0;
650 break; 648 break;
651 default: 649 default:
652 reg = (void *)&task_pt_regs(target)->regs[idx]; 650 reg = task_pt_regs(target)->regs[idx];
653 } 651 }
654 652
655 ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t)); 653 ret = copy_to_user(ubuf, &reg, sizeof(reg));
656
657 if (ret) 654 if (ret)
658 break; 655 break;
659 else 656
660 ubuf += sizeof(compat_ulong_t); 657 ubuf += sizeof(reg);
661 } 658 }
662 659
663 return ret; 660 return ret;
@@ -685,28 +682,28 @@ static int compat_gpr_set(struct task_struct *target,
685 682
686 for (i = 0; i < num_regs; ++i) { 683 for (i = 0; i < num_regs; ++i) {
687 unsigned int idx = start + i; 684 unsigned int idx = start + i;
688 void *reg; 685 compat_ulong_t reg;
686
687 ret = copy_from_user(&reg, ubuf, sizeof(reg));
688 if (ret)
689 return ret;
690
691 ubuf += sizeof(reg);
689 692
690 switch (idx) { 693 switch (idx) {
691 case 15: 694 case 15:
692 reg = (void *)&newregs.pc; 695 newregs.pc = reg;
693 break; 696 break;
694 case 16: 697 case 16:
695 reg = (void *)&newregs.pstate; 698 newregs.pstate = reg;
696 break; 699 break;
697 case 17: 700 case 17:
698 reg = (void *)&newregs.orig_x0; 701 newregs.orig_x0 = reg;
699 break; 702 break;
700 default: 703 default:
701 reg = (void *)&newregs.regs[idx]; 704 newregs.regs[idx] = reg;
702 } 705 }
703 706
704 ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t));
705
706 if (ret)
707 goto out;
708 else
709 ubuf += sizeof(compat_ulong_t);
710 } 707 }
711 708
712 if (valid_user_regs(&newregs.user_regs)) 709 if (valid_user_regs(&newregs.user_regs))
@@ -714,7 +711,6 @@ static int compat_gpr_set(struct task_struct *target,
714 else 711 else
715 ret = -EINVAL; 712 ret = -EINVAL;
716 713
717out:
718 return ret; 714 return ret;
719} 715}
720 716
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 0bc5e4cbc017..bd9bbd0e44ed 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -205,6 +205,11 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
205 205
206void __init setup_arch(char **cmdline_p) 206void __init setup_arch(char **cmdline_p)
207{ 207{
208 /*
209 * Unmask asynchronous aborts early to catch possible system errors.
210 */
211 local_async_enable();
212
208 setup_processor(); 213 setup_processor();
209 214
210 setup_machine_fdt(__fdt_pointer); 215 setup_machine_fdt(__fdt_pointer);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index a5aeefab03c3..a0c2ca602cf8 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -160,6 +160,7 @@ asmlinkage void secondary_start_kernel(void)
160 160
161 local_irq_enable(); 161 local_irq_enable();
162 local_fiq_enable(); 162 local_fiq_enable();
163 local_async_enable();
163 164
164 /* 165 /*
165 * OK, it's off to the idle thread for us 166 * OK, it's off to the idle thread for us
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 421b99fd635d..0f7fec52c7f8 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -111,12 +111,12 @@ ENTRY(__cpu_setup)
111 bl __flush_dcache_all 111 bl __flush_dcache_all
112 mov lr, x28 112 mov lr, x28
113 ic iallu // I+BTB cache invalidate 113 ic iallu // I+BTB cache invalidate
114 tlbi vmalle1is // invalidate I + D TLBs
114 dsb sy 115 dsb sy
115 116
116 mov x0, #3 << 20 117 mov x0, #3 << 20
117 msr cpacr_el1, x0 // Enable FP/ASIMD 118 msr cpacr_el1, x0 // Enable FP/ASIMD
118 msr mdscr_el1, xzr // Reset mdscr_el1 119 msr mdscr_el1, xzr // Reset mdscr_el1
119 tlbi vmalle1is // invalidate I + D TLBs
120 /* 120 /*
121 * Memory region attributes for LPAE: 121 * Memory region attributes for LPAE:
122 * 122 *
diff --git a/arch/avr32/boards/favr-32/setup.c b/arch/avr32/boards/favr-32/setup.c
index 7b1f2cd85400..1f121497b517 100644
--- a/arch/avr32/boards/favr-32/setup.c
+++ b/arch/avr32/boards/favr-32/setup.c
@@ -298,8 +298,10 @@ static int __init set_abdac_rate(struct platform_device *pdev)
298 */ 298 */
299 retval = clk_round_rate(pll1, 299 retval = clk_round_rate(pll1,
300 CONFIG_BOARD_FAVR32_ABDAC_RATE * 256 * 16); 300 CONFIG_BOARD_FAVR32_ABDAC_RATE * 256 * 16);
301 if (retval < 0) 301 if (retval <= 0) {
302 retval = -EINVAL;
302 goto out_abdac; 303 goto out_abdac;
304 }
303 305
304 retval = clk_set_rate(pll1, retval); 306 retval = clk_set_rate(pll1, retval);
305 if (retval != 0) 307 if (retval != 0)
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig
index d5aff36ade92..4733e38e7ae6 100644
--- a/arch/avr32/configs/atngw100_defconfig
+++ b/arch/avr32/configs/atngw100_defconfig
@@ -59,7 +59,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
59# CONFIG_PREVENT_FIRMWARE_BUILD is not set 59# CONFIG_PREVENT_FIRMWARE_BUILD is not set
60# CONFIG_FW_LOADER is not set 60# CONFIG_FW_LOADER is not set
61CONFIG_MTD=y 61CONFIG_MTD=y
62CONFIG_MTD_PARTITIONS=y
63CONFIG_MTD_CMDLINE_PARTS=y 62CONFIG_MTD_CMDLINE_PARTS=y
64CONFIG_MTD_CHAR=y 63CONFIG_MTD_CHAR=y
65CONFIG_MTD_BLOCK=y 64CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig
index 4abcf435d599..1be0ee31bd91 100644
--- a/arch/avr32/configs/atngw100_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd100_defconfig
@@ -61,7 +61,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
61# CONFIG_PREVENT_FIRMWARE_BUILD is not set 61# CONFIG_PREVENT_FIRMWARE_BUILD is not set
62# CONFIG_FW_LOADER is not set 62# CONFIG_FW_LOADER is not set
63CONFIG_MTD=y 63CONFIG_MTD=y
64CONFIG_MTD_PARTITIONS=y
65CONFIG_MTD_CMDLINE_PARTS=y 64CONFIG_MTD_CMDLINE_PARTS=y
66CONFIG_MTD_CHAR=y 65CONFIG_MTD_CHAR=y
67CONFIG_MTD_BLOCK=y 66CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig
index 18f3fa0470ff..796e536f7bc4 100644
--- a/arch/avr32/configs/atngw100_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd101_defconfig
@@ -60,7 +60,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
60# CONFIG_PREVENT_FIRMWARE_BUILD is not set 60# CONFIG_PREVENT_FIRMWARE_BUILD is not set
61# CONFIG_FW_LOADER is not set 61# CONFIG_FW_LOADER is not set
62CONFIG_MTD=y 62CONFIG_MTD=y
63CONFIG_MTD_PARTITIONS=y
64CONFIG_MTD_CMDLINE_PARTS=y 63CONFIG_MTD_CMDLINE_PARTS=y
65CONFIG_MTD_CHAR=y 64CONFIG_MTD_CHAR=y
66CONFIG_MTD_BLOCK=y 65CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100_mrmt_defconfig b/arch/avr32/configs/atngw100_mrmt_defconfig
index 06e389cfcd12..9a57da44eb6f 100644
--- a/arch/avr32/configs/atngw100_mrmt_defconfig
+++ b/arch/avr32/configs/atngw100_mrmt_defconfig
@@ -48,7 +48,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
48# CONFIG_PREVENT_FIRMWARE_BUILD is not set 48# CONFIG_PREVENT_FIRMWARE_BUILD is not set
49# CONFIG_FW_LOADER is not set 49# CONFIG_FW_LOADER is not set
50CONFIG_MTD=y 50CONFIG_MTD=y
51CONFIG_MTD_PARTITIONS=y
52CONFIG_MTD_CMDLINE_PARTS=y 51CONFIG_MTD_CMDLINE_PARTS=y
53CONFIG_MTD_CHAR=y 52CONFIG_MTD_CHAR=y
54CONFIG_MTD_BLOCK=y 53CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig
index 2518a1368d7c..97fe1b399b06 100644
--- a/arch/avr32/configs/atngw100mkii_defconfig
+++ b/arch/avr32/configs/atngw100mkii_defconfig
@@ -59,7 +59,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
59# CONFIG_PREVENT_FIRMWARE_BUILD is not set 59# CONFIG_PREVENT_FIRMWARE_BUILD is not set
60# CONFIG_FW_LOADER is not set 60# CONFIG_FW_LOADER is not set
61CONFIG_MTD=y 61CONFIG_MTD=y
62CONFIG_MTD_PARTITIONS=y
63CONFIG_MTD_CMDLINE_PARTS=y 62CONFIG_MTD_CMDLINE_PARTS=y
64CONFIG_MTD_CHAR=y 63CONFIG_MTD_CHAR=y
65CONFIG_MTD_BLOCK=y 64CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
index 245ef6bd0fa6..a176d24467e9 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
@@ -62,7 +62,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
62# CONFIG_PREVENT_FIRMWARE_BUILD is not set 62# CONFIG_PREVENT_FIRMWARE_BUILD is not set
63# CONFIG_FW_LOADER is not set 63# CONFIG_FW_LOADER is not set
64CONFIG_MTD=y 64CONFIG_MTD=y
65CONFIG_MTD_PARTITIONS=y
66CONFIG_MTD_CMDLINE_PARTS=y 65CONFIG_MTD_CMDLINE_PARTS=y
67CONFIG_MTD_CHAR=y 66CONFIG_MTD_CHAR=y
68CONFIG_MTD_BLOCK=y 67CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
index fa6cbac6e418..d1bf6dcfc47d 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
@@ -61,7 +61,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
61# CONFIG_PREVENT_FIRMWARE_BUILD is not set 61# CONFIG_PREVENT_FIRMWARE_BUILD is not set
62# CONFIG_FW_LOADER is not set 62# CONFIG_FW_LOADER is not set
63CONFIG_MTD=y 63CONFIG_MTD=y
64CONFIG_MTD_PARTITIONS=y
65CONFIG_MTD_CMDLINE_PARTS=y 64CONFIG_MTD_CMDLINE_PARTS=y
66CONFIG_MTD_CHAR=y 65CONFIG_MTD_CHAR=y
67CONFIG_MTD_BLOCK=y 66CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig
index bbd5131021a5..2813dd2b9138 100644
--- a/arch/avr32/configs/atstk1002_defconfig
+++ b/arch/avr32/configs/atstk1002_defconfig
@@ -53,7 +53,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
53# CONFIG_PREVENT_FIRMWARE_BUILD is not set 53# CONFIG_PREVENT_FIRMWARE_BUILD is not set
54# CONFIG_FW_LOADER is not set 54# CONFIG_FW_LOADER is not set
55CONFIG_MTD=y 55CONFIG_MTD=y
56CONFIG_MTD_PARTITIONS=y
57CONFIG_MTD_CMDLINE_PARTS=y 56CONFIG_MTD_CMDLINE_PARTS=y
58CONFIG_MTD_CHAR=y 57CONFIG_MTD_CHAR=y
59CONFIG_MTD_BLOCK=y 58CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig
index c1cd726f9012..f8ff3a3baad4 100644
--- a/arch/avr32/configs/atstk1003_defconfig
+++ b/arch/avr32/configs/atstk1003_defconfig
@@ -42,7 +42,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
42# CONFIG_PREVENT_FIRMWARE_BUILD is not set 42# CONFIG_PREVENT_FIRMWARE_BUILD is not set
43# CONFIG_FW_LOADER is not set 43# CONFIG_FW_LOADER is not set
44CONFIG_MTD=y 44CONFIG_MTD=y
45CONFIG_MTD_PARTITIONS=y
46CONFIG_MTD_CMDLINE_PARTS=y 45CONFIG_MTD_CMDLINE_PARTS=y
47CONFIG_MTD_CHAR=y 46CONFIG_MTD_CHAR=y
48CONFIG_MTD_BLOCK=y 47CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atstk1004_defconfig b/arch/avr32/configs/atstk1004_defconfig
index 754ae56b2767..992228e54e38 100644
--- a/arch/avr32/configs/atstk1004_defconfig
+++ b/arch/avr32/configs/atstk1004_defconfig
@@ -42,7 +42,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
42# CONFIG_PREVENT_FIRMWARE_BUILD is not set 42# CONFIG_PREVENT_FIRMWARE_BUILD is not set
43# CONFIG_FW_LOADER is not set 43# CONFIG_FW_LOADER is not set
44CONFIG_MTD=y 44CONFIG_MTD=y
45CONFIG_MTD_PARTITIONS=y
46CONFIG_MTD_CMDLINE_PARTS=y 45CONFIG_MTD_CMDLINE_PARTS=y
47CONFIG_MTD_CHAR=y 46CONFIG_MTD_CHAR=y
48CONFIG_MTD_BLOCK=y 47CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig
index 58589d8cc0ac..b8e698b0d1fa 100644
--- a/arch/avr32/configs/atstk1006_defconfig
+++ b/arch/avr32/configs/atstk1006_defconfig
@@ -54,7 +54,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
54# CONFIG_PREVENT_FIRMWARE_BUILD is not set 54# CONFIG_PREVENT_FIRMWARE_BUILD is not set
55# CONFIG_FW_LOADER is not set 55# CONFIG_FW_LOADER is not set
56CONFIG_MTD=y 56CONFIG_MTD=y
57CONFIG_MTD_PARTITIONS=y
58CONFIG_MTD_CMDLINE_PARTS=y 57CONFIG_MTD_CMDLINE_PARTS=y
59CONFIG_MTD_CHAR=y 58CONFIG_MTD_CHAR=y
60CONFIG_MTD_BLOCK=y 59CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig
index c90fbf6d35bc..07bed3f7eb5e 100644
--- a/arch/avr32/configs/favr-32_defconfig
+++ b/arch/avr32/configs/favr-32_defconfig
@@ -58,7 +58,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
58# CONFIG_PREVENT_FIRMWARE_BUILD is not set 58# CONFIG_PREVENT_FIRMWARE_BUILD is not set
59# CONFIG_FW_LOADER is not set 59# CONFIG_FW_LOADER is not set
60CONFIG_MTD=y 60CONFIG_MTD=y
61CONFIG_MTD_PARTITIONS=y
62CONFIG_MTD_CMDLINE_PARTS=y 61CONFIG_MTD_CMDLINE_PARTS=y
63CONFIG_MTD_CHAR=y 62CONFIG_MTD_CHAR=y
64CONFIG_MTD_BLOCK=y 63CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig
index ba7c31e269cb..18db853386c8 100644
--- a/arch/avr32/configs/hammerhead_defconfig
+++ b/arch/avr32/configs/hammerhead_defconfig
@@ -58,7 +58,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
58# CONFIG_PREVENT_FIRMWARE_BUILD is not set 58# CONFIG_PREVENT_FIRMWARE_BUILD is not set
59# CONFIG_FW_LOADER is not set 59# CONFIG_FW_LOADER is not set
60CONFIG_MTD=y 60CONFIG_MTD=y
61CONFIG_MTD_PARTITIONS=y
62CONFIG_MTD_CMDLINE_PARTS=y 61CONFIG_MTD_CMDLINE_PARTS=y
63CONFIG_MTD_CHAR=y 62CONFIG_MTD_CHAR=y
64CONFIG_MTD_BLOCK=y 63CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/merisc_defconfig b/arch/avr32/configs/merisc_defconfig
index 65de4431108c..91df6b2986be 100644
--- a/arch/avr32/configs/merisc_defconfig
+++ b/arch/avr32/configs/merisc_defconfig
@@ -46,7 +46,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
46# CONFIG_FW_LOADER is not set 46# CONFIG_FW_LOADER is not set
47CONFIG_MTD=y 47CONFIG_MTD=y
48CONFIG_MTD_CONCAT=y 48CONFIG_MTD_CONCAT=y
49CONFIG_MTD_PARTITIONS=y
50CONFIG_MTD_CHAR=y 49CONFIG_MTD_CHAR=y
51CONFIG_MTD_BLOCK=y 50CONFIG_MTD_BLOCK=y
52CONFIG_MTD_CFI=y 51CONFIG_MTD_CFI=y
diff --git a/arch/avr32/configs/mimc200_defconfig b/arch/avr32/configs/mimc200_defconfig
index 0a8bfdc420e0..d630e089dd32 100644
--- a/arch/avr32/configs/mimc200_defconfig
+++ b/arch/avr32/configs/mimc200_defconfig
@@ -49,7 +49,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
49# CONFIG_PREVENT_FIRMWARE_BUILD is not set 49# CONFIG_PREVENT_FIRMWARE_BUILD is not set
50# CONFIG_FW_LOADER is not set 50# CONFIG_FW_LOADER is not set
51CONFIG_MTD=y 51CONFIG_MTD=y
52CONFIG_MTD_PARTITIONS=y
53CONFIG_MTD_CMDLINE_PARTS=y 52CONFIG_MTD_CMDLINE_PARTS=y
54CONFIG_MTD_CHAR=y 53CONFIG_MTD_CHAR=y
55CONFIG_MTD_BLOCK=y 54CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 12f828ad5058..d0f771be9e96 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -59,7 +59,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
59static struct irqaction timer_irqaction = { 59static struct irqaction timer_irqaction = {
60 .handler = timer_interrupt, 60 .handler = timer_interrupt,
61 /* Oprofile uses the same irq as the timer, so allow it to be shared */ 61 /* Oprofile uses the same irq as the timer, so allow it to be shared */
62 .flags = IRQF_TIMER | IRQF_DISABLED | IRQF_SHARED, 62 .flags = IRQF_TIMER | IRQF_SHARED,
63 .name = "avr32_comparator", 63 .name = "avr32_comparator",
64}; 64};
65 65
diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
index 32d680eb6f48..db190842b80c 100644
--- a/arch/avr32/mach-at32ap/pm.c
+++ b/arch/avr32/mach-at32ap/pm.c
@@ -181,7 +181,7 @@ static const struct platform_suspend_ops avr32_pm_ops = {
181 .enter = avr32_pm_enter, 181 .enter = avr32_pm_enter,
182}; 182};
183 183
184static unsigned long avr32_pm_offset(void *symbol) 184static unsigned long __init avr32_pm_offset(void *symbol)
185{ 185{
186 extern u8 pm_exception[]; 186 extern u8 pm_exception[];
187 187
diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
index ec1b014952b6..acacd348df89 100644
--- a/arch/parisc/configs/c3000_defconfig
+++ b/arch/parisc/configs/c3000_defconfig
@@ -50,7 +50,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
50CONFIG_IDE=y 50CONFIG_IDE=y
51CONFIG_BLK_DEV_IDECD=y 51CONFIG_BLK_DEV_IDECD=y
52CONFIG_BLK_DEV_NS87415=y 52CONFIG_BLK_DEV_NS87415=y
53CONFIG_BLK_DEV_SIIMAGE=m 53CONFIG_PATA_SIL680=m
54CONFIG_SCSI=y 54CONFIG_SCSI=y
55CONFIG_BLK_DEV_SD=y 55CONFIG_BLK_DEV_SD=y
56CONFIG_CHR_DEV_ST=y 56CONFIG_CHR_DEV_ST=y
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
index e1c8d2015c89..8249ac9d9cfc 100644
--- a/arch/parisc/configs/c8000_defconfig
+++ b/arch/parisc/configs/c8000_defconfig
@@ -20,7 +20,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
20CONFIG_MODVERSIONS=y 20CONFIG_MODVERSIONS=y
21CONFIG_BLK_DEV_INTEGRITY=y 21CONFIG_BLK_DEV_INTEGRITY=y
22CONFIG_PA8X00=y 22CONFIG_PA8X00=y
23CONFIG_MLONGCALLS=y
24CONFIG_64BIT=y 23CONFIG_64BIT=y
25CONFIG_SMP=y 24CONFIG_SMP=y
26CONFIG_PREEMPT=y 25CONFIG_PREEMPT=y
@@ -81,8 +80,6 @@ CONFIG_IDE=y
81CONFIG_BLK_DEV_IDECD=y 80CONFIG_BLK_DEV_IDECD=y
82CONFIG_BLK_DEV_PLATFORM=y 81CONFIG_BLK_DEV_PLATFORM=y
83CONFIG_BLK_DEV_GENERIC=y 82CONFIG_BLK_DEV_GENERIC=y
84CONFIG_BLK_DEV_SIIMAGE=y
85CONFIG_SCSI=y
86CONFIG_BLK_DEV_SD=y 83CONFIG_BLK_DEV_SD=y
87CONFIG_CHR_DEV_ST=m 84CONFIG_CHR_DEV_ST=m
88CONFIG_BLK_DEV_SR=m 85CONFIG_BLK_DEV_SR=m
@@ -94,6 +91,8 @@ CONFIG_SCSI_FC_ATTRS=y
94CONFIG_SCSI_SAS_LIBSAS=m 91CONFIG_SCSI_SAS_LIBSAS=m
95CONFIG_ISCSI_TCP=m 92CONFIG_ISCSI_TCP=m
96CONFIG_ISCSI_BOOT_SYSFS=m 93CONFIG_ISCSI_BOOT_SYSFS=m
94CONFIG_ATA=y
95CONFIG_PATA_SIL680=y
97CONFIG_FUSION=y 96CONFIG_FUSION=y
98CONFIG_FUSION_SPI=y 97CONFIG_FUSION_SPI=y
99CONFIG_FUSION_SAS=y 98CONFIG_FUSION_SAS=y
@@ -114,9 +113,8 @@ CONFIG_INPUT_FF_MEMLESS=m
114# CONFIG_KEYBOARD_ATKBD is not set 113# CONFIG_KEYBOARD_ATKBD is not set
115# CONFIG_KEYBOARD_HIL_OLD is not set 114# CONFIG_KEYBOARD_HIL_OLD is not set
116# CONFIG_KEYBOARD_HIL is not set 115# CONFIG_KEYBOARD_HIL is not set
117CONFIG_MOUSE_PS2=m 116# CONFIG_MOUSE_PS2 is not set
118CONFIG_INPUT_MISC=y 117CONFIG_INPUT_MISC=y
119CONFIG_INPUT_CM109=m
120CONFIG_SERIO_SERPORT=m 118CONFIG_SERIO_SERPORT=m
121CONFIG_SERIO_PARKBD=m 119CONFIG_SERIO_PARKBD=m
122CONFIG_SERIO_GSCPS2=m 120CONFIG_SERIO_GSCPS2=m
@@ -167,34 +165,6 @@ CONFIG_SND_VERBOSE_PRINTK=y
167CONFIG_SND_AD1889=m 165CONFIG_SND_AD1889=m
168# CONFIG_SND_USB is not set 166# CONFIG_SND_USB is not set
169# CONFIG_SND_GSC is not set 167# CONFIG_SND_GSC is not set
170CONFIG_HID_A4TECH=m
171CONFIG_HID_APPLE=m
172CONFIG_HID_BELKIN=m
173CONFIG_HID_CHERRY=m
174CONFIG_HID_CHICONY=m
175CONFIG_HID_CYPRESS=m
176CONFIG_HID_DRAGONRISE=m
177CONFIG_HID_EZKEY=m
178CONFIG_HID_KYE=m
179CONFIG_HID_GYRATION=m
180CONFIG_HID_TWINHAN=m
181CONFIG_HID_KENSINGTON=m
182CONFIG_HID_LOGITECH=m
183CONFIG_HID_LOGITECH_DJ=m
184CONFIG_HID_MICROSOFT=m
185CONFIG_HID_MONTEREY=m
186CONFIG_HID_NTRIG=m
187CONFIG_HID_ORTEK=m
188CONFIG_HID_PANTHERLORD=m
189CONFIG_HID_PETALYNX=m
190CONFIG_HID_SAMSUNG=m
191CONFIG_HID_SUNPLUS=m
192CONFIG_HID_GREENASIA=m
193CONFIG_HID_SMARTJOYPLUS=m
194CONFIG_HID_TOPSEED=m
195CONFIG_HID_THRUSTMASTER=m
196CONFIG_HID_ZEROPLUS=m
197CONFIG_USB_HID=m
198CONFIG_USB=y 168CONFIG_USB=y
199CONFIG_USB_OHCI_HCD=y 169CONFIG_USB_OHCI_HCD=y
200CONFIG_USB_STORAGE=y 170CONFIG_USB_STORAGE=y
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
index 5874cebee077..28c1b5de044e 100644
--- a/arch/parisc/configs/generic-64bit_defconfig
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -24,7 +24,6 @@ CONFIG_MODVERSIONS=y
24CONFIG_BLK_DEV_INTEGRITY=y 24CONFIG_BLK_DEV_INTEGRITY=y
25# CONFIG_IOSCHED_DEADLINE is not set 25# CONFIG_IOSCHED_DEADLINE is not set
26CONFIG_PA8X00=y 26CONFIG_PA8X00=y
27CONFIG_MLONGCALLS=y
28CONFIG_64BIT=y 27CONFIG_64BIT=y
29CONFIG_SMP=y 28CONFIG_SMP=y
30# CONFIG_COMPACTION is not set 29# CONFIG_COMPACTION is not set
@@ -68,7 +67,6 @@ CONFIG_IDE_GD=m
68CONFIG_IDE_GD_ATAPI=y 67CONFIG_IDE_GD_ATAPI=y
69CONFIG_BLK_DEV_IDECD=m 68CONFIG_BLK_DEV_IDECD=m
70CONFIG_BLK_DEV_NS87415=y 69CONFIG_BLK_DEV_NS87415=y
71CONFIG_BLK_DEV_SIIMAGE=y
72# CONFIG_SCSI_PROC_FS is not set 70# CONFIG_SCSI_PROC_FS is not set
73CONFIG_BLK_DEV_SD=y 71CONFIG_BLK_DEV_SD=y
74CONFIG_BLK_DEV_SR=y 72CONFIG_BLK_DEV_SR=y
@@ -82,6 +80,7 @@ CONFIG_SCSI_ZALON=y
82CONFIG_SCSI_QLA_ISCSI=m 80CONFIG_SCSI_QLA_ISCSI=m
83CONFIG_SCSI_DH=y 81CONFIG_SCSI_DH=y
84CONFIG_ATA=y 82CONFIG_ATA=y
83CONFIG_PATA_SIL680=y
85CONFIG_ATA_GENERIC=y 84CONFIG_ATA_GENERIC=y
86CONFIG_MD=y 85CONFIG_MD=y
87CONFIG_MD_LINEAR=m 86CONFIG_MD_LINEAR=m
@@ -162,7 +161,7 @@ CONFIG_SLIP_MODE_SLIP6=y
162CONFIG_INPUT_EVDEV=y 161CONFIG_INPUT_EVDEV=y
163# CONFIG_KEYBOARD_HIL_OLD is not set 162# CONFIG_KEYBOARD_HIL_OLD is not set
164# CONFIG_KEYBOARD_HIL is not set 163# CONFIG_KEYBOARD_HIL is not set
165# CONFIG_INPUT_MOUSE is not set 164# CONFIG_MOUSE_PS2 is not set
166CONFIG_INPUT_MISC=y 165CONFIG_INPUT_MISC=y
167CONFIG_SERIO_SERPORT=m 166CONFIG_SERIO_SERPORT=m
168# CONFIG_HP_SDC is not set 167# CONFIG_HP_SDC is not set
@@ -216,32 +215,7 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y
216CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y 215CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
217CONFIG_LOGO=y 216CONFIG_LOGO=y
218# CONFIG_LOGO_LINUX_MONO is not set 217# CONFIG_LOGO_LINUX_MONO is not set
219CONFIG_HID=m
220CONFIG_HIDRAW=y 218CONFIG_HIDRAW=y
221CONFIG_HID_DRAGONRISE=m
222CONFIG_DRAGONRISE_FF=y
223CONFIG_HID_KYE=m
224CONFIG_HID_GYRATION=m
225CONFIG_HID_TWINHAN=m
226CONFIG_LOGITECH_FF=y
227CONFIG_LOGIRUMBLEPAD2_FF=y
228CONFIG_HID_NTRIG=m
229CONFIG_HID_PANTHERLORD=m
230CONFIG_PANTHERLORD_FF=y
231CONFIG_HID_PETALYNX=m
232CONFIG_HID_SAMSUNG=m
233CONFIG_HID_SONY=m
234CONFIG_HID_SUNPLUS=m
235CONFIG_HID_GREENASIA=m
236CONFIG_GREENASIA_FF=y
237CONFIG_HID_SMARTJOYPLUS=m
238CONFIG_SMARTJOYPLUS_FF=y
239CONFIG_HID_TOPSEED=m
240CONFIG_HID_THRUSTMASTER=m
241CONFIG_THRUSTMASTER_FF=y
242CONFIG_HID_ZEROPLUS=m
243CONFIG_ZEROPLUS_FF=y
244CONFIG_USB_HID=m
245CONFIG_HID_PID=y 219CONFIG_HID_PID=y
246CONFIG_USB_HIDDEV=y 220CONFIG_USB_HIDDEV=y
247CONFIG_USB=y 221CONFIG_USB=y
@@ -251,13 +225,8 @@ CONFIG_USB_DYNAMIC_MINORS=y
251CONFIG_USB_MON=m 225CONFIG_USB_MON=m
252CONFIG_USB_WUSB_CBAF=m 226CONFIG_USB_WUSB_CBAF=m
253CONFIG_USB_XHCI_HCD=m 227CONFIG_USB_XHCI_HCD=m
254CONFIG_USB_EHCI_HCD=m 228CONFIG_USB_EHCI_HCD=y
255CONFIG_USB_OHCI_HCD=m 229CONFIG_USB_OHCI_HCD=y
256CONFIG_USB_R8A66597_HCD=m
257CONFIG_USB_ACM=m
258CONFIG_USB_PRINTER=m
259CONFIG_USB_WDM=m
260CONFIG_USB_TMC=m
261CONFIG_NEW_LEDS=y 230CONFIG_NEW_LEDS=y
262CONFIG_LEDS_CLASS=y 231CONFIG_LEDS_CLASS=y
263CONFIG_LEDS_TRIGGERS=y 232CONFIG_LEDS_TRIGGERS=y
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index f0e2784e7cca..2f9b751878ba 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -125,42 +125,38 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma
125void mark_rodata_ro(void); 125void mark_rodata_ro(void);
126#endif 126#endif
127 127
128#ifdef CONFIG_PA8X00
129/* Only pa8800, pa8900 needs this */
130
131#include <asm/kmap_types.h> 128#include <asm/kmap_types.h>
132 129
133#define ARCH_HAS_KMAP 130#define ARCH_HAS_KMAP
134 131
135void kunmap_parisc(void *addr);
136
137static inline void *kmap(struct page *page) 132static inline void *kmap(struct page *page)
138{ 133{
139 might_sleep(); 134 might_sleep();
135 flush_dcache_page(page);
140 return page_address(page); 136 return page_address(page);
141} 137}
142 138
143static inline void kunmap(struct page *page) 139static inline void kunmap(struct page *page)
144{ 140{
145 kunmap_parisc(page_address(page)); 141 flush_kernel_dcache_page_addr(page_address(page));
146} 142}
147 143
148static inline void *kmap_atomic(struct page *page) 144static inline void *kmap_atomic(struct page *page)
149{ 145{
150 pagefault_disable(); 146 pagefault_disable();
147 flush_dcache_page(page);
151 return page_address(page); 148 return page_address(page);
152} 149}
153 150
154static inline void __kunmap_atomic(void *addr) 151static inline void __kunmap_atomic(void *addr)
155{ 152{
156 kunmap_parisc(addr); 153 flush_kernel_dcache_page_addr(addr);
157 pagefault_enable(); 154 pagefault_enable();
158} 155}
159 156
160#define kmap_atomic_prot(page, prot) kmap_atomic(page) 157#define kmap_atomic_prot(page, prot) kmap_atomic(page)
161#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) 158#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
162#define kmap_atomic_to_page(ptr) virt_to_page(ptr) 159#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
163#endif
164 160
165#endif /* _PARISC_CACHEFLUSH_H */ 161#endif /* _PARISC_CACHEFLUSH_H */
166 162
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index b7adb2ac049c..c53fc63149e8 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -28,9 +28,8 @@ struct page;
28 28
29void clear_page_asm(void *page); 29void clear_page_asm(void *page);
30void copy_page_asm(void *to, void *from); 30void copy_page_asm(void *to, void *from);
31void clear_user_page(void *vto, unsigned long vaddr, struct page *pg); 31#define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
32void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 32#define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom)
33 struct page *pg);
34 33
35/* #define CONFIG_PARISC_TMPALIAS */ 34/* #define CONFIG_PARISC_TMPALIAS */
36 35
diff --git a/arch/parisc/include/asm/serial.h b/arch/parisc/include/asm/serial.h
index d7e3cc60dbc3..77e9b67c87ee 100644
--- a/arch/parisc/include/asm/serial.h
+++ b/arch/parisc/include/asm/serial.h
@@ -6,5 +6,3 @@
6 * This is used for 16550-compatible UARTs 6 * This is used for 16550-compatible UARTs
7 */ 7 */
8#define BASE_BAUD ( 1843200 / 16 ) 8#define BASE_BAUD ( 1843200 / 16 )
9
10#define SERIAL_PORT_DFNS
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index c035673209f7..a72545554a31 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -388,41 +388,6 @@ void flush_kernel_dcache_page_addr(void *addr)
388} 388}
389EXPORT_SYMBOL(flush_kernel_dcache_page_addr); 389EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
390 390
391void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
392{
393 clear_page_asm(vto);
394 if (!parisc_requires_coherency())
395 flush_kernel_dcache_page_asm(vto);
396}
397EXPORT_SYMBOL(clear_user_page);
398
399void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
400 struct page *pg)
401{
402 /* Copy using kernel mapping. No coherency is needed
403 (all in kmap/kunmap) on machines that don't support
404 non-equivalent aliasing. However, the `from' page
405 needs to be flushed before it can be accessed through
406 the kernel mapping. */
407 preempt_disable();
408 flush_dcache_page_asm(__pa(vfrom), vaddr);
409 preempt_enable();
410 copy_page_asm(vto, vfrom);
411 if (!parisc_requires_coherency())
412 flush_kernel_dcache_page_asm(vto);
413}
414EXPORT_SYMBOL(copy_user_page);
415
416#ifdef CONFIG_PA8X00
417
418void kunmap_parisc(void *addr)
419{
420 if (parisc_requires_coherency())
421 flush_kernel_dcache_page_addr(addr);
422}
423EXPORT_SYMBOL(kunmap_parisc);
424#endif
425
426void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) 391void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
427{ 392{
428 unsigned long flags; 393 unsigned long flags;
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index 06cb3992907e..608716f8496b 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -36,6 +36,9 @@
36 * HP PARISC Hardware Database 36 * HP PARISC Hardware Database
37 * Access to this database is only possible during bootup 37 * Access to this database is only possible during bootup
38 * so don't reference this table after starting the init process 38 * so don't reference this table after starting the init process
39 *
40 * NOTE: Product names which are listed here and ends with a '?'
41 * are guessed. If you know the correct name, please let us know.
39 */ 42 */
40 43
41static struct hp_hardware hp_hardware_list[] = { 44static struct hp_hardware hp_hardware_list[] = {
@@ -222,7 +225,7 @@ static struct hp_hardware hp_hardware_list[] = {
222 {HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"}, 225 {HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"},
223 {HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"}, 226 {HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"},
224 {HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"}, 227 {HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"},
225 {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+? (rp5470)"}, 228 {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+ (rp5470)?"},
226 {HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"}, 229 {HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"},
227 {HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"}, 230 {HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"},
228 {HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"}, 231 {HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"},
@@ -276,9 +279,11 @@ static struct hp_hardware hp_hardware_list[] = {
276 {HPHW_NPROC,0x888,0x4,0x91,"Storm Peak Fast DC-"}, 279 {HPHW_NPROC,0x888,0x4,0x91,"Storm Peak Fast DC-"},
277 {HPHW_NPROC,0x889,0x4,0x91,"Storm Peak Fast"}, 280 {HPHW_NPROC,0x889,0x4,0x91,"Storm Peak Fast"},
278 {HPHW_NPROC,0x88A,0x4,0x91,"Crestone Peak Slow"}, 281 {HPHW_NPROC,0x88A,0x4,0x91,"Crestone Peak Slow"},
282 {HPHW_NPROC,0x88B,0x4,0x91,"Crestone Peak Fast?"},
279 {HPHW_NPROC,0x88C,0x4,0x91,"Orca Mako+"}, 283 {HPHW_NPROC,0x88C,0x4,0x91,"Orca Mako+"},
280 {HPHW_NPROC,0x88D,0x4,0x91,"Rainier/Medel Mako+ Slow"}, 284 {HPHW_NPROC,0x88D,0x4,0x91,"Rainier/Medel Mako+ Slow"},
281 {HPHW_NPROC,0x88E,0x4,0x91,"Rainier/Medel Mako+ Fast"}, 285 {HPHW_NPROC,0x88E,0x4,0x91,"Rainier/Medel Mako+ Fast"},
286 {HPHW_NPROC,0x892,0x4,0x91,"Mt. Hamilton Slow Mako+?"},
282 {HPHW_NPROC,0x894,0x4,0x91,"Mt. Hamilton Fast Mako+"}, 287 {HPHW_NPROC,0x894,0x4,0x91,"Mt. Hamilton Fast Mako+"},
283 {HPHW_NPROC,0x895,0x4,0x91,"Storm Peak Slow Mako+"}, 288 {HPHW_NPROC,0x895,0x4,0x91,"Storm Peak Slow Mako+"},
284 {HPHW_NPROC,0x896,0x4,0x91,"Storm Peak Fast Mako+"}, 289 {HPHW_NPROC,0x896,0x4,0x91,"Storm Peak Fast Mako+"},
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index d2d58258aea6..d4dc588c0dc1 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -41,9 +41,7 @@ END(boot_args)
41 .import fault_vector_11,code /* IVA parisc 1.1 32 bit */ 41 .import fault_vector_11,code /* IVA parisc 1.1 32 bit */
42 .import $global$ /* forward declaration */ 42 .import $global$ /* forward declaration */
43#endif /*!CONFIG_64BIT*/ 43#endif /*!CONFIG_64BIT*/
44 .export _stext,data /* Kernel want it this way! */ 44ENTRY(parisc_kernel_start)
45_stext:
46ENTRY(stext)
47 .proc 45 .proc
48 .callinfo 46 .callinfo
49 47
@@ -347,7 +345,7 @@ smp_slave_stext:
347 .procend 345 .procend
348#endif /* CONFIG_SMP */ 346#endif /* CONFIG_SMP */
349 347
350ENDPROC(stext) 348ENDPROC(parisc_kernel_start)
351 349
352#ifndef CONFIG_64BIT 350#ifndef CONFIG_64BIT
353 .section .data..read_mostly 351 .section .data..read_mostly
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 5dfd248e3f1a..0d3a9d4927b5 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -61,8 +61,15 @@ static int get_offset(struct address_space *mapping)
61 return (unsigned long) mapping >> 8; 61 return (unsigned long) mapping >> 8;
62} 62}
63 63
64static unsigned long get_shared_area(struct address_space *mapping, 64static unsigned long shared_align_offset(struct file *filp, unsigned long pgoff)
65 unsigned long addr, unsigned long len, unsigned long pgoff) 65{
66 struct address_space *mapping = filp ? filp->f_mapping : NULL;
67
68 return (get_offset(mapping) + pgoff) << PAGE_SHIFT;
69}
70
71static unsigned long get_shared_area(struct file *filp, unsigned long addr,
72 unsigned long len, unsigned long pgoff)
66{ 73{
67 struct vm_unmapped_area_info info; 74 struct vm_unmapped_area_info info;
68 75
@@ -71,7 +78,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
71 info.low_limit = PAGE_ALIGN(addr); 78 info.low_limit = PAGE_ALIGN(addr);
72 info.high_limit = TASK_SIZE; 79 info.high_limit = TASK_SIZE;
73 info.align_mask = PAGE_MASK & (SHMLBA - 1); 80 info.align_mask = PAGE_MASK & (SHMLBA - 1);
74 info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT; 81 info.align_offset = shared_align_offset(filp, pgoff);
75 return vm_unmapped_area(&info); 82 return vm_unmapped_area(&info);
76} 83}
77 84
@@ -82,20 +89,18 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
82 return -ENOMEM; 89 return -ENOMEM;
83 if (flags & MAP_FIXED) { 90 if (flags & MAP_FIXED) {
84 if ((flags & MAP_SHARED) && 91 if ((flags & MAP_SHARED) &&
85 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) 92 (addr - shared_align_offset(filp, pgoff)) & (SHMLBA - 1))
86 return -EINVAL; 93 return -EINVAL;
87 return addr; 94 return addr;
88 } 95 }
89 if (!addr) 96 if (!addr)
90 addr = TASK_UNMAPPED_BASE; 97 addr = TASK_UNMAPPED_BASE;
91 98
92 if (filp) { 99 if (filp || (flags & MAP_SHARED))
93 addr = get_shared_area(filp->f_mapping, addr, len, pgoff); 100 addr = get_shared_area(filp, addr, len, pgoff);
94 } else if(flags & MAP_SHARED) { 101 else
95 addr = get_shared_area(NULL, addr, len, pgoff);
96 } else {
97 addr = get_unshared_area(addr, len); 102 addr = get_unshared_area(addr, len);
98 } 103
99 return addr; 104 return addr;
100} 105}
101 106
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 76ed62ed785b..ddd988b267a9 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -168,7 +168,7 @@ void unwind_table_remove(struct unwind_table *table)
168} 168}
169 169
170/* Called from setup_arch to import the kernel unwind info */ 170/* Called from setup_arch to import the kernel unwind info */
171int unwind_init(void) 171int __init unwind_init(void)
172{ 172{
173 long start, stop; 173 long start, stop;
174 register unsigned long gp __asm__ ("r27"); 174 register unsigned long gp __asm__ ("r27");
@@ -233,7 +233,6 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
233 e = find_unwind_entry(info->ip); 233 e = find_unwind_entry(info->ip);
234 if (e == NULL) { 234 if (e == NULL) {
235 unsigned long sp; 235 unsigned long sp;
236 extern char _stext[], _etext[];
237 236
238 dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip); 237 dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
239 238
@@ -281,8 +280,7 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
281 break; 280 break;
282 info->prev_ip = tmp; 281 info->prev_ip = tmp;
283 sp = info->prev_sp; 282 sp = info->prev_sp;
284 } while (info->prev_ip < (unsigned long)_stext || 283 } while (!kernel_text_address(info->prev_ip));
285 info->prev_ip > (unsigned long)_etext);
286 284
287 info->rp = 0; 285 info->rp = 0;
288 286
@@ -435,9 +433,8 @@ unsigned long return_address(unsigned int level)
435 do { 433 do {
436 if (unwind_once(&info) < 0 || info.ip == 0) 434 if (unwind_once(&info) < 0 || info.ip == 0)
437 return 0; 435 return 0;
438 if (!__kernel_text_address(info.ip)) { 436 if (!kernel_text_address(info.ip))
439 return 0; 437 return 0;
440 }
441 } while (info.ip && level--); 438 } while (info.ip && level--);
442 439
443 return info.ip; 440 return info.ip;
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 4bb095a2f6fc..0dacc5ca555a 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -6,24 +6,19 @@
6 * Copyright (C) 2000 Michael Ang <mang with subcarrier.org> 6 * Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
7 * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> 7 * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
8 * Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org> 8 * Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
9 * Copyright (C) 2006 Helge Deller <deller@gmx.de> 9 * Copyright (C) 2006-2013 Helge Deller <deller@gmx.de>
10 * 10 */
11 * 11
12 * This program is free software; you can redistribute it and/or modify 12/*
13 * it under the terms of the GNU General Public License as published by 13 * Put page table entries (swapper_pg_dir) as the first thing in .bss. This
14 * the Free Software Foundation; either version 2 of the License, or 14 * will ensure that it has .bss alignment (PAGE_SIZE).
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */ 15 */
16#define BSS_FIRST_SECTIONS *(.data..vm0.pmd) \
17 *(.data..vm0.pgd) \
18 *(.data..vm0.pte)
19
26#include <asm-generic/vmlinux.lds.h> 20#include <asm-generic/vmlinux.lds.h>
21
27/* needed for the processor specific cache alignment size */ 22/* needed for the processor specific cache alignment size */
28#include <asm/cache.h> 23#include <asm/cache.h>
29#include <asm/page.h> 24#include <asm/page.h>
@@ -39,7 +34,7 @@ OUTPUT_FORMAT("elf64-hppa-linux")
39OUTPUT_ARCH(hppa:hppa2.0w) 34OUTPUT_ARCH(hppa:hppa2.0w)
40#endif 35#endif
41 36
42ENTRY(_stext) 37ENTRY(parisc_kernel_start)
43#ifndef CONFIG_64BIT 38#ifndef CONFIG_64BIT
44jiffies = jiffies_64 + 4; 39jiffies = jiffies_64 + 4;
45#else 40#else
@@ -49,11 +44,29 @@ SECTIONS
49{ 44{
50 . = KERNEL_BINARY_TEXT_START; 45 . = KERNEL_BINARY_TEXT_START;
51 46
47 __init_begin = .;
48 HEAD_TEXT_SECTION
49 INIT_TEXT_SECTION(8)
50
51 . = ALIGN(PAGE_SIZE);
52 INIT_DATA_SECTION(PAGE_SIZE)
53 /* we have to discard exit text and such at runtime, not link time */
54 .exit.text :
55 {
56 EXIT_TEXT
57 }
58 .exit.data :
59 {
60 EXIT_DATA
61 }
62 PERCPU_SECTION(8)
63 . = ALIGN(PAGE_SIZE);
64 __init_end = .;
65 /* freed after init ends here */
66
52 _text = .; /* Text and read-only data */ 67 _text = .; /* Text and read-only data */
53 .head ALIGN(16) : { 68 _stext = .;
54 HEAD_TEXT 69 .text ALIGN(PAGE_SIZE) : {
55 } = 0
56 .text ALIGN(16) : {
57 TEXT_TEXT 70 TEXT_TEXT
58 SCHED_TEXT 71 SCHED_TEXT
59 LOCK_TEXT 72 LOCK_TEXT
@@ -68,21 +81,28 @@ SECTIONS
68 *(.lock.text) /* out-of-line lock text */ 81 *(.lock.text) /* out-of-line lock text */
69 *(.gnu.warning) 82 *(.gnu.warning)
70 } 83 }
71 /* End of text section */ 84 . = ALIGN(PAGE_SIZE);
72 _etext = .; 85 _etext = .;
86 /* End of text section */
73 87
74 /* Start of data section */ 88 /* Start of data section */
75 _sdata = .; 89 _sdata = .;
76 90
77 RODATA 91 RO_DATA_SECTION(8)
78 92
79 /* writeable */ 93#ifdef CONFIG_64BIT
80 /* Make sure this is page aligned so 94 . = ALIGN(16);
81 * that we can properly leave these 95 /* Linkage tables */
82 * as writable 96 .opd : {
83 */ 97 *(.opd)
84 . = ALIGN(PAGE_SIZE); 98 } PROVIDE (__gp = .);
85 data_start = .; 99 .plt : {
100 *(.plt)
101 }
102 .dlt : {
103 *(.dlt)
104 }
105#endif
86 106
87 /* unwind info */ 107 /* unwind info */
88 .PARISC.unwind : { 108 .PARISC.unwind : {
@@ -91,7 +111,15 @@ SECTIONS
91 __stop___unwind = .; 111 __stop___unwind = .;
92 } 112 }
93 113
94 EXCEPTION_TABLE(16) 114 /* writeable */
115 /* Make sure this is page aligned so
116 * that we can properly leave these
117 * as writable
118 */
119 . = ALIGN(PAGE_SIZE);
120 data_start = .;
121
122 EXCEPTION_TABLE(8)
95 NOTES 123 NOTES
96 124
97 /* Data */ 125 /* Data */
@@ -107,54 +135,8 @@ SECTIONS
107 _edata = .; 135 _edata = .;
108 136
109 /* BSS */ 137 /* BSS */
110 __bss_start = .; 138 BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8)
111 /* page table entries need to be PAGE_SIZE aligned */
112 . = ALIGN(PAGE_SIZE);
113 .data..vmpages : {
114 *(.data..vm0.pmd)
115 *(.data..vm0.pgd)
116 *(.data..vm0.pte)
117 }
118 .bss : {
119 *(.bss)
120 *(COMMON)
121 }
122 __bss_stop = .;
123
124#ifdef CONFIG_64BIT
125 . = ALIGN(16);
126 /* Linkage tables */
127 .opd : {
128 *(.opd)
129 } PROVIDE (__gp = .);
130 .plt : {
131 *(.plt)
132 }
133 .dlt : {
134 *(.dlt)
135 }
136#endif
137 139
138 /* reserve space for interrupt stack by aligning __init* to 16k */
139 . = ALIGN(16384);
140 __init_begin = .;
141 INIT_TEXT_SECTION(16384)
142 . = ALIGN(PAGE_SIZE);
143 INIT_DATA_SECTION(16)
144 /* we have to discard exit text and such at runtime, not link time */
145 .exit.text :
146 {
147 EXIT_TEXT
148 }
149 .exit.data :
150 {
151 EXIT_DATA
152 }
153
154 PERCPU_SECTION(L1_CACHE_BYTES)
155 . = ALIGN(PAGE_SIZE);
156 __init_end = .;
157 /* freed after init ends here */
158 _end = . ; 140 _end = . ;
159 141
160 STABS_DEBUG 142 STABS_DEBUG
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index b0f96c0e6316..96f8168cf4ec 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -32,6 +32,7 @@
32#include <asm/sections.h> 32#include <asm/sections.h>
33 33
34extern int data_start; 34extern int data_start;
35extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
35 36
36#if PT_NLEVELS == 3 37#if PT_NLEVELS == 3
37/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout 38/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
@@ -324,8 +325,9 @@ static void __init setup_bootmem(void)
324 reserve_bootmem_node(NODE_DATA(0), 0UL, 325 reserve_bootmem_node(NODE_DATA(0), 0UL,
325 (unsigned long)(PAGE0->mem_free + 326 (unsigned long)(PAGE0->mem_free +
326 PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT); 327 PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT);
327 reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text), 328 reserve_bootmem_node(NODE_DATA(0), __pa(KERNEL_BINARY_TEXT_START),
328 (unsigned long)(_end - _text), BOOTMEM_DEFAULT); 329 (unsigned long)(_end - KERNEL_BINARY_TEXT_START),
330 BOOTMEM_DEFAULT);
329 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT), 331 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
330 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT), 332 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),
331 BOOTMEM_DEFAULT); 333 BOOTMEM_DEFAULT);
@@ -378,6 +380,17 @@ static void __init setup_bootmem(void)
378 request_resource(&sysram_resources[0], &pdcdata_resource); 380 request_resource(&sysram_resources[0], &pdcdata_resource);
379} 381}
380 382
383static int __init parisc_text_address(unsigned long vaddr)
384{
385 static unsigned long head_ptr __initdata;
386
387 if (!head_ptr)
388 head_ptr = PAGE_MASK & (unsigned long)
389 dereference_function_descriptor(&parisc_kernel_start);
390
391 return core_kernel_text(vaddr) || vaddr == head_ptr;
392}
393
381static void __init map_pages(unsigned long start_vaddr, 394static void __init map_pages(unsigned long start_vaddr,
382 unsigned long start_paddr, unsigned long size, 395 unsigned long start_paddr, unsigned long size,
383 pgprot_t pgprot, int force) 396 pgprot_t pgprot, int force)
@@ -466,7 +479,7 @@ static void __init map_pages(unsigned long start_vaddr,
466 */ 479 */
467 if (force) 480 if (force)
468 pte = __mk_pte(address, pgprot); 481 pte = __mk_pte(address, pgprot);
469 else if (core_kernel_text(vaddr) && 482 else if (parisc_text_address(vaddr) &&
470 address != fv_addr) 483 address != fv_addr)
471 pte = __mk_pte(address, PAGE_KERNEL_EXEC); 484 pte = __mk_pte(address, PAGE_KERNEL_EXEC);
472 else 485 else
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 8a2463670a5b..0f4344e6fbca 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -75,8 +75,10 @@ LDEMULATION := lppc
75GNUTARGET := powerpcle 75GNUTARGET := powerpcle
76MULTIPLEWORD := -mno-multiple 76MULTIPLEWORD := -mno-multiple
77else 77else
78ifeq ($(call cc-option-yn,-mbig-endian),y)
78override CC += -mbig-endian 79override CC += -mbig-endian
79override AS += -mbig-endian 80override AS += -mbig-endian
81endif
80override LD += -EB 82override LD += -EB
81LDEMULATION := ppc 83LDEMULATION := ppc
82GNUTARGET := powerpc 84GNUTARGET := powerpc
@@ -128,7 +130,12 @@ CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5)
128CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6) 130CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6)
129CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7) 131CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7)
130 132
133# Altivec option not allowed with e500mc64 in GCC.
134ifeq ($(CONFIG_ALTIVEC),y)
135E5500_CPU := -mcpu=powerpc64
136else
131E5500_CPU := $(call cc-option,-mcpu=e500mc64,-mcpu=powerpc64) 137E5500_CPU := $(call cc-option,-mcpu=e500mc64,-mcpu=powerpc64)
138endif
132CFLAGS-$(CONFIG_E5500_CPU) += $(E5500_CPU) 139CFLAGS-$(CONFIG_E5500_CPU) += $(E5500_CPU)
133CFLAGS-$(CONFIG_E6500_CPU) += $(call cc-option,-mcpu=e6500,$(E5500_CPU)) 140CFLAGS-$(CONFIG_E6500_CPU) += $(call cc-option,-mcpu=e6500,$(E5500_CPU))
134 141
diff --git a/arch/powerpc/boot/dts/mpc5121.dtsi b/arch/powerpc/boot/dts/mpc5121.dtsi
index bd14c00e5146..2d7cb04ac962 100644
--- a/arch/powerpc/boot/dts/mpc5121.dtsi
+++ b/arch/powerpc/boot/dts/mpc5121.dtsi
@@ -77,7 +77,6 @@
77 compatible = "fsl,mpc5121-immr"; 77 compatible = "fsl,mpc5121-immr";
78 #address-cells = <1>; 78 #address-cells = <1>;
79 #size-cells = <1>; 79 #size-cells = <1>;
80 #interrupt-cells = <2>;
81 ranges = <0x0 0x80000000 0x400000>; 80 ranges = <0x0 0x80000000 0x400000>;
82 reg = <0x80000000 0x400000>; 81 reg = <0x80000000 0x400000>;
83 bus-frequency = <66000000>; /* 66 MHz ips bus */ 82 bus-frequency = <66000000>; /* 66 MHz ips bus */
diff --git a/arch/powerpc/boot/dts/mpc5125twr.dts b/arch/powerpc/boot/dts/mpc5125twr.dts
index 4177b62240c2..a618dfc13e4c 100644
--- a/arch/powerpc/boot/dts/mpc5125twr.dts
+++ b/arch/powerpc/boot/dts/mpc5125twr.dts
@@ -58,7 +58,6 @@
58 compatible = "fsl,mpc5121-immr"; 58 compatible = "fsl,mpc5121-immr";
59 #address-cells = <1>; 59 #address-cells = <1>;
60 #size-cells = <1>; 60 #size-cells = <1>;
61 #interrupt-cells = <2>;
62 ranges = <0x0 0x80000000 0x400000>; 61 ranges = <0x0 0x80000000 0x400000>;
63 reg = <0x80000000 0x400000>; 62 reg = <0x80000000 0x400000>;
64 bus-frequency = <66000000>; // 66 MHz ips bus 63 bus-frequency = <66000000>; // 66 MHz ips bus
@@ -189,6 +188,10 @@
189 reg = <0xA000 0x1000>; 188 reg = <0xA000 0x1000>;
190 }; 189 };
191 190
191 // disable USB1 port
192 // TODO:
193 // correct pinmux config and fix USB3320 ulpi dependency
194 // before re-enabling it
192 usb@3000 { 195 usb@3000 {
193 compatible = "fsl,mpc5121-usb2-dr"; 196 compatible = "fsl,mpc5121-usb2-dr";
194 reg = <0x3000 0x400>; 197 reg = <0x3000 0x400>;
@@ -197,6 +200,7 @@
197 interrupts = <43 0x8>; 200 interrupts = <43 0x8>;
198 dr_mode = "host"; 201 dr_mode = "host";
199 phy_type = "ulpi"; 202 phy_type = "ulpi";
203 status = "disabled";
200 }; 204 };
201 205
202 // 5125 PSCs are not 52xx or 5121 PSC compatible 206 // 5125 PSCs are not 52xx or 5121 PSC compatible
diff --git a/arch/powerpc/boot/dts/xcalibur1501.dts b/arch/powerpc/boot/dts/xcalibur1501.dts
index cc00f4ddd9a7..c409cbafb126 100644
--- a/arch/powerpc/boot/dts/xcalibur1501.dts
+++ b/arch/powerpc/boot/dts/xcalibur1501.dts
@@ -637,14 +637,14 @@
637 tlu@2f000 { 637 tlu@2f000 {
638 compatible = "fsl,mpc8572-tlu", "fsl_tlu"; 638 compatible = "fsl,mpc8572-tlu", "fsl_tlu";
639 reg = <0x2f000 0x1000>; 639 reg = <0x2f000 0x1000>;
640 interupts = <61 2 >; 640 interrupts = <61 2>;
641 interrupt-parent = <&mpic>; 641 interrupt-parent = <&mpic>;
642 }; 642 };
643 643
644 tlu@15000 { 644 tlu@15000 {
645 compatible = "fsl,mpc8572-tlu", "fsl_tlu"; 645 compatible = "fsl,mpc8572-tlu", "fsl_tlu";
646 reg = <0x15000 0x1000>; 646 reg = <0x15000 0x1000>;
647 interupts = <75 2>; 647 interrupts = <75 2>;
648 interrupt-parent = <&mpic>; 648 interrupt-parent = <&mpic>;
649 }; 649 };
650 }; 650 };
diff --git a/arch/powerpc/boot/dts/xpedite5301.dts b/arch/powerpc/boot/dts/xpedite5301.dts
index 53c1c6a9752f..04cb410da48b 100644
--- a/arch/powerpc/boot/dts/xpedite5301.dts
+++ b/arch/powerpc/boot/dts/xpedite5301.dts
@@ -547,14 +547,14 @@
547 tlu@2f000 { 547 tlu@2f000 {
548 compatible = "fsl,mpc8572-tlu", "fsl_tlu"; 548 compatible = "fsl,mpc8572-tlu", "fsl_tlu";
549 reg = <0x2f000 0x1000>; 549 reg = <0x2f000 0x1000>;
550 interupts = <61 2 >; 550 interrupts = <61 2>;
551 interrupt-parent = <&mpic>; 551 interrupt-parent = <&mpic>;
552 }; 552 };
553 553
554 tlu@15000 { 554 tlu@15000 {
555 compatible = "fsl,mpc8572-tlu", "fsl_tlu"; 555 compatible = "fsl,mpc8572-tlu", "fsl_tlu";
556 reg = <0x15000 0x1000>; 556 reg = <0x15000 0x1000>;
557 interupts = <75 2>; 557 interrupts = <75 2>;
558 interrupt-parent = <&mpic>; 558 interrupt-parent = <&mpic>;
559 }; 559 };
560 }; 560 };
diff --git a/arch/powerpc/boot/dts/xpedite5330.dts b/arch/powerpc/boot/dts/xpedite5330.dts
index 215225983150..73f8620f1ce7 100644
--- a/arch/powerpc/boot/dts/xpedite5330.dts
+++ b/arch/powerpc/boot/dts/xpedite5330.dts
@@ -583,14 +583,14 @@
583 tlu@2f000 { 583 tlu@2f000 {
584 compatible = "fsl,mpc8572-tlu", "fsl_tlu"; 584 compatible = "fsl,mpc8572-tlu", "fsl_tlu";
585 reg = <0x2f000 0x1000>; 585 reg = <0x2f000 0x1000>;
586 interupts = <61 2 >; 586 interrupts = <61 2>;
587 interrupt-parent = <&mpic>; 587 interrupt-parent = <&mpic>;
588 }; 588 };
589 589
590 tlu@15000 { 590 tlu@15000 {
591 compatible = "fsl,mpc8572-tlu", "fsl_tlu"; 591 compatible = "fsl,mpc8572-tlu", "fsl_tlu";
592 reg = <0x15000 0x1000>; 592 reg = <0x15000 0x1000>;
593 interupts = <75 2>; 593 interrupts = <75 2>;
594 interrupt-parent = <&mpic>; 594 interrupt-parent = <&mpic>;
595 }; 595 };
596 }; 596 };
diff --git a/arch/powerpc/boot/dts/xpedite5370.dts b/arch/powerpc/boot/dts/xpedite5370.dts
index 11dbda10d756..cd0ea2b99362 100644
--- a/arch/powerpc/boot/dts/xpedite5370.dts
+++ b/arch/powerpc/boot/dts/xpedite5370.dts
@@ -545,14 +545,14 @@
545 tlu@2f000 { 545 tlu@2f000 {
546 compatible = "fsl,mpc8572-tlu", "fsl_tlu"; 546 compatible = "fsl,mpc8572-tlu", "fsl_tlu";
547 reg = <0x2f000 0x1000>; 547 reg = <0x2f000 0x1000>;
548 interupts = <61 2 >; 548 interrupts = <61 2>;
549 interrupt-parent = <&mpic>; 549 interrupt-parent = <&mpic>;
550 }; 550 };
551 551
552 tlu@15000 { 552 tlu@15000 {
553 compatible = "fsl,mpc8572-tlu", "fsl_tlu"; 553 compatible = "fsl,mpc8572-tlu", "fsl_tlu";
554 reg = <0x15000 0x1000>; 554 reg = <0x15000 0x1000>;
555 interupts = <75 2>; 555 interrupts = <75 2>;
556 interrupt-parent = <&mpic>; 556 interrupt-parent = <&mpic>;
557 }; 557 };
558 }; 558 };
diff --git a/arch/powerpc/boot/util.S b/arch/powerpc/boot/util.S
index 5143228e3e5f..6636b1d7821b 100644
--- a/arch/powerpc/boot/util.S
+++ b/arch/powerpc/boot/util.S
@@ -71,18 +71,32 @@ udelay:
71 add r4,r4,r5 71 add r4,r4,r5
72 addi r4,r4,-1 72 addi r4,r4,-1
73 divw r4,r4,r5 /* BUS ticks */ 73 divw r4,r4,r5 /* BUS ticks */
74#ifdef CONFIG_8xx
751: mftbu r5
76 mftb r6
77 mftbu r7
78#else
741: mfspr r5, SPRN_TBRU 791: mfspr r5, SPRN_TBRU
75 mfspr r6, SPRN_TBRL 80 mfspr r6, SPRN_TBRL
76 mfspr r7, SPRN_TBRU 81 mfspr r7, SPRN_TBRU
82#endif
77 cmpw 0,r5,r7 83 cmpw 0,r5,r7
78 bne 1b /* Get [synced] base time */ 84 bne 1b /* Get [synced] base time */
79 addc r9,r6,r4 /* Compute end time */ 85 addc r9,r6,r4 /* Compute end time */
80 addze r8,r5 86 addze r8,r5
87#ifdef CONFIG_8xx
882: mftbu r5
89#else
812: mfspr r5, SPRN_TBRU 902: mfspr r5, SPRN_TBRU
91#endif
82 cmpw 0,r5,r8 92 cmpw 0,r5,r8
83 blt 2b 93 blt 2b
84 bgt 3f 94 bgt 3f
95#ifdef CONFIG_8xx
96 mftb r6
97#else
85 mfspr r6, SPRN_TBRL 98 mfspr r6, SPRN_TBRL
99#endif
86 cmpw 0,r6,r9 100 cmpw 0,r6,r9
87 blt 2b 101 blt 2b
883: blr 1023: blr
diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig
index 69b57daf402e..0b88c7b30bb9 100644
--- a/arch/powerpc/configs/52xx/cm5200_defconfig
+++ b/arch/powerpc/configs/52xx/cm5200_defconfig
@@ -12,7 +12,6 @@ CONFIG_EXPERT=y
12CONFIG_PPC_MPC52xx=y 12CONFIG_PPC_MPC52xx=y
13CONFIG_PPC_MPC5200_SIMPLE=y 13CONFIG_PPC_MPC5200_SIMPLE=y
14# CONFIG_PPC_PMAC is not set 14# CONFIG_PPC_PMAC is not set
15CONFIG_PPC_BESTCOMM=y
16CONFIG_SPARSE_IRQ=y 15CONFIG_SPARSE_IRQ=y
17CONFIG_PM=y 16CONFIG_PM=y
18# CONFIG_PCI is not set 17# CONFIG_PCI is not set
@@ -71,6 +70,8 @@ CONFIG_USB_DEVICEFS=y
71CONFIG_USB_OHCI_HCD=y 70CONFIG_USB_OHCI_HCD=y
72CONFIG_USB_OHCI_HCD_PPC_OF_BE=y 71CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
73CONFIG_USB_STORAGE=y 72CONFIG_USB_STORAGE=y
73CONFIG_DMADEVICES=y
74CONFIG_PPC_BESTCOMM=y
74CONFIG_EXT2_FS=y 75CONFIG_EXT2_FS=y
75CONFIG_EXT3_FS=y 76CONFIG_EXT3_FS=y
76# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 77# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig
index f3638ae0a627..104a332e79ab 100644
--- a/arch/powerpc/configs/52xx/lite5200b_defconfig
+++ b/arch/powerpc/configs/52xx/lite5200b_defconfig
@@ -15,7 +15,6 @@ CONFIG_PPC_MPC52xx=y
15CONFIG_PPC_MPC5200_SIMPLE=y 15CONFIG_PPC_MPC5200_SIMPLE=y
16CONFIG_PPC_LITE5200=y 16CONFIG_PPC_LITE5200=y
17# CONFIG_PPC_PMAC is not set 17# CONFIG_PPC_PMAC is not set
18CONFIG_PPC_BESTCOMM=y
19CONFIG_NO_HZ=y 18CONFIG_NO_HZ=y
20CONFIG_HIGH_RES_TIMERS=y 19CONFIG_HIGH_RES_TIMERS=y
21CONFIG_SPARSE_IRQ=y 20CONFIG_SPARSE_IRQ=y
@@ -59,6 +58,8 @@ CONFIG_I2C_CHARDEV=y
59CONFIG_I2C_MPC=y 58CONFIG_I2C_MPC=y
60# CONFIG_HWMON is not set 59# CONFIG_HWMON is not set
61CONFIG_VIDEO_OUTPUT_CONTROL=m 60CONFIG_VIDEO_OUTPUT_CONTROL=m
61CONFIG_DMADEVICES=y
62CONFIG_PPC_BESTCOMM=y
62CONFIG_EXT2_FS=y 63CONFIG_EXT2_FS=y
63CONFIG_EXT3_FS=y 64CONFIG_EXT3_FS=y
64# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 65# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig
index 0c7de9620ea6..0d13ad7e4478 100644
--- a/arch/powerpc/configs/52xx/motionpro_defconfig
+++ b/arch/powerpc/configs/52xx/motionpro_defconfig
@@ -12,7 +12,6 @@ CONFIG_EXPERT=y
12CONFIG_PPC_MPC52xx=y 12CONFIG_PPC_MPC52xx=y
13CONFIG_PPC_MPC5200_SIMPLE=y 13CONFIG_PPC_MPC5200_SIMPLE=y
14# CONFIG_PPC_PMAC is not set 14# CONFIG_PPC_PMAC is not set
15CONFIG_PPC_BESTCOMM=y
16CONFIG_SPARSE_IRQ=y 15CONFIG_SPARSE_IRQ=y
17CONFIG_PM=y 16CONFIG_PM=y
18# CONFIG_PCI is not set 17# CONFIG_PCI is not set
@@ -84,6 +83,8 @@ CONFIG_LEDS_TRIGGERS=y
84CONFIG_LEDS_TRIGGER_TIMER=y 83CONFIG_LEDS_TRIGGER_TIMER=y
85CONFIG_RTC_CLASS=y 84CONFIG_RTC_CLASS=y
86CONFIG_RTC_DRV_DS1307=y 85CONFIG_RTC_DRV_DS1307=y
86CONFIG_DMADEVICES=y
87CONFIG_PPC_BESTCOMM=y
87CONFIG_EXT2_FS=y 88CONFIG_EXT2_FS=y
88CONFIG_EXT3_FS=y 89CONFIG_EXT3_FS=y
89# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 90# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
index 22e719575c60..430aa182fa1c 100644
--- a/arch/powerpc/configs/52xx/pcm030_defconfig
+++ b/arch/powerpc/configs/52xx/pcm030_defconfig
@@ -21,7 +21,6 @@ CONFIG_MODULE_UNLOAD=y
21CONFIG_PPC_MPC52xx=y 21CONFIG_PPC_MPC52xx=y
22CONFIG_PPC_MPC5200_SIMPLE=y 22CONFIG_PPC_MPC5200_SIMPLE=y
23# CONFIG_PPC_PMAC is not set 23# CONFIG_PPC_PMAC is not set
24CONFIG_PPC_BESTCOMM=y
25CONFIG_NO_HZ=y 24CONFIG_NO_HZ=y
26CONFIG_HIGH_RES_TIMERS=y 25CONFIG_HIGH_RES_TIMERS=y
27CONFIG_HZ_100=y 26CONFIG_HZ_100=y
@@ -87,6 +86,8 @@ CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
87CONFIG_USB_STORAGE=m 86CONFIG_USB_STORAGE=m
88CONFIG_RTC_CLASS=y 87CONFIG_RTC_CLASS=y
89CONFIG_RTC_DRV_PCF8563=m 88CONFIG_RTC_DRV_PCF8563=m
89CONFIG_DMADEVICES=y
90CONFIG_PPC_BESTCOMM=y
90CONFIG_EXT2_FS=m 91CONFIG_EXT2_FS=m
91CONFIG_EXT3_FS=m 92CONFIG_EXT3_FS=m
92# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 93# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
index 716a37be16e3..7af4c5bb7c63 100644
--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
+++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
@@ -17,7 +17,6 @@ CONFIG_PPC_MPC52xx=y
17CONFIG_PPC_MPC5200_SIMPLE=y 17CONFIG_PPC_MPC5200_SIMPLE=y
18CONFIG_PPC_MPC5200_BUGFIX=y 18CONFIG_PPC_MPC5200_BUGFIX=y
19# CONFIG_PPC_PMAC is not set 19# CONFIG_PPC_PMAC is not set
20CONFIG_PPC_BESTCOMM=y
21CONFIG_PM=y 20CONFIG_PM=y
22# CONFIG_PCI is not set 21# CONFIG_PCI is not set
23CONFIG_NET=y 22CONFIG_NET=y
@@ -86,6 +85,8 @@ CONFIG_USB_STORAGE=y
86CONFIG_RTC_CLASS=y 85CONFIG_RTC_CLASS=y
87CONFIG_RTC_DRV_DS1307=y 86CONFIG_RTC_DRV_DS1307=y
88CONFIG_RTC_DRV_DS1374=y 87CONFIG_RTC_DRV_DS1374=y
88CONFIG_DMADEVICES=y
89CONFIG_PPC_BESTCOMM=y
89CONFIG_EXT2_FS=y 90CONFIG_EXT2_FS=y
90CONFIG_EXT3_FS=y 91CONFIG_EXT3_FS=y
91# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 92# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
index 6640a35bebb7..8b682d1cf4d6 100644
--- a/arch/powerpc/configs/mpc5200_defconfig
+++ b/arch/powerpc/configs/mpc5200_defconfig
@@ -15,7 +15,6 @@ CONFIG_PPC_MEDIA5200=y
15CONFIG_PPC_MPC5200_BUGFIX=y 15CONFIG_PPC_MPC5200_BUGFIX=y
16CONFIG_PPC_MPC5200_LPBFIFO=m 16CONFIG_PPC_MPC5200_LPBFIFO=m
17# CONFIG_PPC_PMAC is not set 17# CONFIG_PPC_PMAC is not set
18CONFIG_PPC_BESTCOMM=y
19CONFIG_SIMPLE_GPIO=y 18CONFIG_SIMPLE_GPIO=y
20CONFIG_NO_HZ=y 19CONFIG_NO_HZ=y
21CONFIG_HIGH_RES_TIMERS=y 20CONFIG_HIGH_RES_TIMERS=y
@@ -125,6 +124,8 @@ CONFIG_RTC_CLASS=y
125CONFIG_RTC_DRV_DS1307=y 124CONFIG_RTC_DRV_DS1307=y
126CONFIG_RTC_DRV_DS1374=y 125CONFIG_RTC_DRV_DS1374=y
127CONFIG_RTC_DRV_PCF8563=m 126CONFIG_RTC_DRV_PCF8563=m
127CONFIG_DMADEVICES=y
128CONFIG_PPC_BESTCOMM=y
128CONFIG_EXT2_FS=y 129CONFIG_EXT2_FS=y
129CONFIG_EXT3_FS=y 130CONFIG_EXT3_FS=y
130# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 131# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index bd8a6f71944f..cec044a3ff69 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -2,7 +2,6 @@ CONFIG_PPC64=y
2CONFIG_ALTIVEC=y 2CONFIG_ALTIVEC=y
3CONFIG_SMP=y 3CONFIG_SMP=y
4CONFIG_NR_CPUS=2 4CONFIG_NR_CPUS=2
5CONFIG_EXPERIMENTAL=y
6CONFIG_SYSVIPC=y 5CONFIG_SYSVIPC=y
7CONFIG_NO_HZ=y 6CONFIG_NO_HZ=y
8CONFIG_HIGH_RES_TIMERS=y 7CONFIG_HIGH_RES_TIMERS=y
@@ -45,8 +44,9 @@ CONFIG_INET_AH=y
45CONFIG_INET_ESP=y 44CONFIG_INET_ESP=y
46# CONFIG_IPV6 is not set 45# CONFIG_IPV6 is not set
47CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 46CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
47CONFIG_DEVTMPFS=y
48CONFIG_DEVTMPFS_MOUNT=y
48CONFIG_MTD=y 49CONFIG_MTD=y
49CONFIG_MTD_CHAR=y
50CONFIG_MTD_BLOCK=y 50CONFIG_MTD_BLOCK=y
51CONFIG_MTD_SLRAM=y 51CONFIG_MTD_SLRAM=y
52CONFIG_MTD_PHRAM=y 52CONFIG_MTD_PHRAM=y
@@ -88,7 +88,6 @@ CONFIG_BLK_DEV_DM=y
88CONFIG_DM_CRYPT=y 88CONFIG_DM_CRYPT=y
89CONFIG_NETDEVICES=y 89CONFIG_NETDEVICES=y
90CONFIG_DUMMY=y 90CONFIG_DUMMY=y
91CONFIG_MII=y
92CONFIG_TIGON3=y 91CONFIG_TIGON3=y
93CONFIG_E1000=y 92CONFIG_E1000=y
94CONFIG_PASEMI_MAC=y 93CONFIG_PASEMI_MAC=y
@@ -174,8 +173,8 @@ CONFIG_NLS_CODEPAGE_437=y
174CONFIG_NLS_ISO8859_1=y 173CONFIG_NLS_ISO8859_1=y
175CONFIG_CRC_CCITT=y 174CONFIG_CRC_CCITT=y
176CONFIG_PRINTK_TIME=y 175CONFIG_PRINTK_TIME=y
177CONFIG_MAGIC_SYSRQ=y
178CONFIG_DEBUG_FS=y 176CONFIG_DEBUG_FS=y
177CONFIG_MAGIC_SYSRQ=y
179CONFIG_DEBUG_KERNEL=y 178CONFIG_DEBUG_KERNEL=y
180CONFIG_DETECT_HUNG_TASK=y 179CONFIG_DETECT_HUNG_TASK=y
181# CONFIG_SCHED_DEBUG is not set 180# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 894662a5d4d5..243ce69ad685 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -284,7 +284,7 @@ do_kvm_##n: \
284 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ 284 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
285 beq- 1f; \ 285 beq- 1f; \
286 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ 286 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
2871: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ 2871: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
288 blt+ cr1,3f; /* abort if it is */ \ 288 blt+ cr1,3f; /* abort if it is */ \
289 li r1,(n); /* will be reloaded later */ \ 289 li r1,(n); /* will be reloaded later */ \
290 sth r1,PACA_TRAP_SAVE(r13); \ 290 sth r1,PACA_TRAP_SAVE(r13); \
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 4a594b76674d..bc23b1ba7980 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void);
192extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); 192extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
193extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); 193extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
194extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); 194extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
195extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
196 struct kvm_vcpu *vcpu);
197extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
198 struct kvmppc_book3s_shadow_vcpu *svcpu);
195 199
196static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) 200static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
197{ 201{
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 0bd9348a4db9..192917d2239c 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -79,6 +79,7 @@ struct kvmppc_host_state {
79 ulong vmhandler; 79 ulong vmhandler;
80 ulong scratch0; 80 ulong scratch0;
81 ulong scratch1; 81 ulong scratch1;
82 ulong scratch2;
82 u8 in_guest; 83 u8 in_guest;
83 u8 restore_hid5; 84 u8 restore_hid5;
84 u8 napping; 85 u8 napping;
@@ -106,6 +107,7 @@ struct kvmppc_host_state {
106}; 107};
107 108
108struct kvmppc_book3s_shadow_vcpu { 109struct kvmppc_book3s_shadow_vcpu {
110 bool in_use;
109 ulong gpr[14]; 111 ulong gpr[14];
110 u32 cr; 112 u32 cr;
111 u32 xer; 113 u32 xer;
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 033c06be1d84..7bdcf340016c 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -720,13 +720,13 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
720int64_t opal_pci_poll(uint64_t phb_id); 720int64_t opal_pci_poll(uint64_t phb_id);
721int64_t opal_return_cpu(void); 721int64_t opal_return_cpu(void);
722 722
723int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val); 723int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val);
724int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); 724int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val);
725 725
726int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, 726int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
727 uint32_t addr, uint32_t data, uint32_t sz); 727 uint32_t addr, uint32_t data, uint32_t sz);
728int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, 728int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
729 uint32_t addr, uint32_t *data, uint32_t sz); 729 uint32_t addr, __be32 *data, uint32_t sz);
730int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); 730int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
731int64_t opal_manage_flash(uint8_t op); 731int64_t opal_manage_flash(uint8_t op);
732int64_t opal_update_flash(uint64_t blk_list); 732int64_t opal_update_flash(uint64_t blk_list);
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
index 27b2386f738a..842846c1b711 100644
--- a/arch/powerpc/include/asm/pgalloc-32.h
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -84,10 +84,8 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
84static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, 84static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
85 unsigned long address) 85 unsigned long address)
86{ 86{
87 struct page *page = page_address(table);
88
89 tlb_flush_pgtable(tlb, address); 87 tlb_flush_pgtable(tlb, address);
90 pgtable_page_dtor(page); 88 pgtable_page_dtor(table);
91 pgtable_free_tlb(tlb, page, 0); 89 pgtable_free_tlb(tlb, page_address(table), 0);
92} 90}
93#endif /* _ASM_POWERPC_PGALLOC_32_H */ 91#endif /* _ASM_POWERPC_PGALLOC_32_H */
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index 16cb92d215d2..4b0be20fcbfd 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -16,6 +16,7 @@ struct vmemmap_backing {
16 unsigned long phys; 16 unsigned long phys;
17 unsigned long virt_addr; 17 unsigned long virt_addr;
18}; 18};
19extern struct vmemmap_backing *vmemmap_list;
19 20
20/* 21/*
21 * Functions that deal with pagetables that could be at any level of 22 * Functions that deal with pagetables that could be at any level of
@@ -147,11 +148,9 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
147static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, 148static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
148 unsigned long address) 149 unsigned long address)
149{ 150{
150 struct page *page = page_address(table);
151
152 tlb_flush_pgtable(tlb, address); 151 tlb_flush_pgtable(tlb, address);
153 pgtable_page_dtor(page); 152 pgtable_page_dtor(table);
154 pgtable_free_tlb(tlb, page, 0); 153 pgtable_free_tlb(tlb, page_address(table), 0);
155} 154}
156 155
157#else /* if CONFIG_PPC_64K_PAGES */ 156#else /* if CONFIG_PPC_64K_PAGES */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 3c1acc31a092..f595b98079ee 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -366,6 +366,8 @@ BEGIN_FTR_SECTION_NESTED(96); \
366 cmpwi dest,0; \ 366 cmpwi dest,0; \
367 beq- 90b; \ 367 beq- 90b; \
368END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) 368END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
369#elif defined(CONFIG_8xx)
370#define MFTB(dest) mftb dest
369#else 371#else
370#define MFTB(dest) mfspr dest, SPRN_TBRL 372#define MFTB(dest) mfspr dest, SPRN_TBRL
371#endif 373#endif
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 5c45787d551e..fa8388ed94c5 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1174,12 +1174,19 @@
1174 1174
1175#else /* __powerpc64__ */ 1175#else /* __powerpc64__ */
1176 1176
1177#if defined(CONFIG_8xx)
1178#define mftbl() ({unsigned long rval; \
1179 asm volatile("mftbl %0" : "=r" (rval)); rval;})
1180#define mftbu() ({unsigned long rval; \
1181 asm volatile("mftbu %0" : "=r" (rval)); rval;})
1182#else
1177#define mftbl() ({unsigned long rval; \ 1183#define mftbl() ({unsigned long rval; \
1178 asm volatile("mfspr %0, %1" : "=r" (rval) : \ 1184 asm volatile("mfspr %0, %1" : "=r" (rval) : \
1179 "i" (SPRN_TBRL)); rval;}) 1185 "i" (SPRN_TBRL)); rval;})
1180#define mftbu() ({unsigned long rval; \ 1186#define mftbu() ({unsigned long rval; \
1181 asm volatile("mfspr %0, %1" : "=r" (rval) : \ 1187 asm volatile("mfspr %0, %1" : "=r" (rval) : \
1182 "i" (SPRN_TBRU)); rval;}) 1188 "i" (SPRN_TBRU)); rval;})
1189#endif
1183#endif /* !__powerpc64__ */ 1190#endif /* !__powerpc64__ */
1184 1191
1185#define mttbl(v) asm volatile("mttbl %0":: "r"(v)) 1192#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 9ee12610af02..aace90547614 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *);
35extern void enable_kernel_spe(void); 35extern void enable_kernel_spe(void);
36extern void giveup_spe(struct task_struct *); 36extern void giveup_spe(struct task_struct *);
37extern void load_up_spe(struct task_struct *); 37extern void load_up_spe(struct task_struct *);
38extern void switch_booke_debug_regs(struct thread_struct *new_thread); 38extern void switch_booke_debug_regs(struct debug_reg *new_debug);
39 39
40#ifndef CONFIG_SMP 40#ifndef CONFIG_SMP
41extern void discard_lazy_cpu_state(void); 41extern void discard_lazy_cpu_state(void);
diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h
index 18908caa1f3b..2cf846edb3fc 100644
--- a/arch/powerpc/include/asm/timex.h
+++ b/arch/powerpc/include/asm/timex.h
@@ -29,7 +29,11 @@ static inline cycles_t get_cycles(void)
29 ret = 0; 29 ret = 0;
30 30
31 __asm__ __volatile__( 31 __asm__ __volatile__(
32#ifdef CONFIG_8xx
33 "97: mftb %0\n"
34#else
32 "97: mfspr %0, %2\n" 35 "97: mfspr %0, %2\n"
36#endif
33 "99:\n" 37 "99:\n"
34 ".section __ftr_fixup,\"a\"\n" 38 ".section __ftr_fixup,\"a\"\n"
35 ".align 2\n" 39 ".align 2\n"
@@ -41,7 +45,11 @@ static inline cycles_t get_cycles(void)
41 " .long 0\n" 45 " .long 0\n"
42 " .long 0\n" 46 " .long 0\n"
43 ".previous" 47 ".previous"
48#ifdef CONFIG_8xx
49 : "=r" (ret) : "i" (CPU_FTR_601));
50#else
44 : "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL)); 51 : "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL));
52#endif
45 return ret; 53 return ret;
46#endif 54#endif
47} 55}
diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h
index 5f1b1e3c2137..8296381ae432 100644
--- a/arch/powerpc/include/asm/unaligned.h
+++ b/arch/powerpc/include/asm/unaligned.h
@@ -4,13 +4,18 @@
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6/* 6/*
7 * The PowerPC can do unaligned accesses itself in big endian mode. 7 * The PowerPC can do unaligned accesses itself based on its endian mode.
8 */ 8 */
9#include <linux/unaligned/access_ok.h> 9#include <linux/unaligned/access_ok.h>
10#include <linux/unaligned/generic.h> 10#include <linux/unaligned/generic.h>
11 11
12#ifdef __LITTLE_ENDIAN__
13#define get_unaligned __get_unaligned_le
14#define put_unaligned __put_unaligned_le
15#else
12#define get_unaligned __get_unaligned_be 16#define get_unaligned __get_unaligned_be
13#define put_unaligned __put_unaligned_be 17#define put_unaligned __put_unaligned_be
18#endif
14 19
15#endif /* __KERNEL__ */ 20#endif /* __KERNEL__ */
16#endif /* _ASM_POWERPC_UNALIGNED_H */ 21#endif /* _ASM_POWERPC_UNALIGNED_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 2ea5cc033ec8..d3de01066f7d 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -576,6 +576,7 @@ int main(void)
576 HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); 576 HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
577 HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); 577 HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
578 HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); 578 HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
579 HSTATE_FIELD(HSTATE_SCRATCH2, scratch2);
579 HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); 580 HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
580 HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); 581 HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
581 HSTATE_FIELD(HSTATE_NAPPING, napping); 582 HSTATE_FIELD(HSTATE_NAPPING, napping);
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 779a78c26435..11c1d069d920 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -124,15 +124,15 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
124void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) 124void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
125{ 125{
126 unsigned long addr; 126 unsigned long addr;
127 const u32 *basep, *sizep; 127 const __be32 *basep, *sizep;
128 unsigned int rtas_start = 0, rtas_end = 0; 128 unsigned int rtas_start = 0, rtas_end = 0;
129 129
130 basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); 130 basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
131 sizep = of_get_property(rtas.dev, "rtas-size", NULL); 131 sizep = of_get_property(rtas.dev, "rtas-size", NULL);
132 132
133 if (basep && sizep) { 133 if (basep && sizep) {
134 rtas_start = *basep; 134 rtas_start = be32_to_cpup(basep);
135 rtas_end = *basep + *sizep; 135 rtas_end = rtas_start + be32_to_cpup(sizep);
136 } 136 }
137 137
138 for (addr = begin; addr < end; addr += PAGE_SIZE) { 138 for (addr = begin; addr < end; addr += PAGE_SIZE) {
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 2ae41aba4053..4f0946de2d5c 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -80,6 +80,7 @@ END_FTR_SECTION(0, 1)
80 * of the function that the cpu should jump to to continue 80 * of the function that the cpu should jump to to continue
81 * initialization. 81 * initialization.
82 */ 82 */
83 .balign 8
83 .globl __secondary_hold_spinloop 84 .globl __secondary_hold_spinloop
84__secondary_hold_spinloop: 85__secondary_hold_spinloop:
85 .llong 0x0 86 .llong 0x0
@@ -470,6 +471,7 @@ _STATIC(__after_prom_start)
470 mtctr r8 471 mtctr r8
471 bctr 472 bctr
472 473
474.balign 8
473p_end: .llong _end - _stext 475p_end: .llong _end - _stext
474 476
4754: /* Now copy the rest of the kernel up to _end */ 4774: /* Now copy the rest of the kernel up to _end */
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index e1ec57e87b3b..75d4f7340da8 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -18,6 +18,7 @@
18#include <linux/ftrace.h> 18#include <linux/ftrace.h>
19 19
20#include <asm/machdep.h> 20#include <asm/machdep.h>
21#include <asm/pgalloc.h>
21#include <asm/prom.h> 22#include <asm/prom.h>
22#include <asm/sections.h> 23#include <asm/sections.h>
23 24
@@ -75,6 +76,17 @@ void arch_crash_save_vmcoreinfo(void)
75#ifndef CONFIG_NEED_MULTIPLE_NODES 76#ifndef CONFIG_NEED_MULTIPLE_NODES
76 VMCOREINFO_SYMBOL(contig_page_data); 77 VMCOREINFO_SYMBOL(contig_page_data);
77#endif 78#endif
79#if defined(CONFIG_PPC64) && defined(CONFIG_SPARSEMEM_VMEMMAP)
80 VMCOREINFO_SYMBOL(vmemmap_list);
81 VMCOREINFO_SYMBOL(mmu_vmemmap_psize);
82 VMCOREINFO_SYMBOL(mmu_psize_defs);
83 VMCOREINFO_STRUCT_SIZE(vmemmap_backing);
84 VMCOREINFO_OFFSET(vmemmap_backing, list);
85 VMCOREINFO_OFFSET(vmemmap_backing, phys);
86 VMCOREINFO_OFFSET(vmemmap_backing, virt_addr);
87 VMCOREINFO_STRUCT_SIZE(mmu_psize_def);
88 VMCOREINFO_OFFSET(mmu_psize_def, shift);
89#endif
78} 90}
79 91
80/* 92/*
@@ -136,7 +148,7 @@ void __init reserve_crashkernel(void)
136 * a small SLB (128MB) since the crash kernel needs to place 148 * a small SLB (128MB) since the crash kernel needs to place
137 * itself and some stacks to be in the first segment. 149 * itself and some stacks to be in the first segment.
138 */ 150 */
139 crashk_res.start = min(0x80000000ULL, (ppc64_rma_size / 2)); 151 crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2));
140#else 152#else
141 crashk_res.start = KDUMP_KERNELBASE; 153 crashk_res.start = KDUMP_KERNELBASE;
142#endif 154#endif
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index e59caf874d05..64bf8db12b15 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -246,8 +246,8 @@ _GLOBAL(__bswapdi2)
246 or r3,r7,r9 246 or r3,r7,r9
247 blr 247 blr
248 248
249#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
250 249
250#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
251_GLOBAL(rmci_on) 251_GLOBAL(rmci_on)
252 sync 252 sync
253 isync 253 isync
@@ -277,6 +277,9 @@ _GLOBAL(rmci_off)
277 isync 277 isync
278 sync 278 sync
279 blr 279 blr
280#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
281
282#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
280 283
281/* 284/*
282 * Do an IO access in real mode 285 * Do an IO access in real mode
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index fd82c289ab1c..28b898e68185 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -210,7 +210,7 @@ static void __init nvram_print_partitions(char * label)
210 printk(KERN_WARNING "--------%s---------\n", label); 210 printk(KERN_WARNING "--------%s---------\n", label);
211 printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n"); 211 printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n");
212 list_for_each_entry(tmp_part, &nvram_partitions, partition) { 212 list_for_each_entry(tmp_part, &nvram_partitions, partition) {
213 printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%12s\n", 213 printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%12.12s\n",
214 tmp_part->index, tmp_part->header.signature, 214 tmp_part->index, tmp_part->header.signature,
215 tmp_part->header.checksum, tmp_part->header.length, 215 tmp_part->header.checksum, tmp_part->header.length,
216 tmp_part->header.name); 216 tmp_part->header.name);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 3386d8ab7eb0..4a96556fd2d4 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
339#endif 339#endif
340} 340}
341 341
342static void prime_debug_regs(struct thread_struct *thread) 342static void prime_debug_regs(struct debug_reg *debug)
343{ 343{
344 /* 344 /*
345 * We could have inherited MSR_DE from userspace, since 345 * We could have inherited MSR_DE from userspace, since
@@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread)
348 */ 348 */
349 mtmsr(mfmsr() & ~MSR_DE); 349 mtmsr(mfmsr() & ~MSR_DE);
350 350
351 mtspr(SPRN_IAC1, thread->debug.iac1); 351 mtspr(SPRN_IAC1, debug->iac1);
352 mtspr(SPRN_IAC2, thread->debug.iac2); 352 mtspr(SPRN_IAC2, debug->iac2);
353#if CONFIG_PPC_ADV_DEBUG_IACS > 2 353#if CONFIG_PPC_ADV_DEBUG_IACS > 2
354 mtspr(SPRN_IAC3, thread->debug.iac3); 354 mtspr(SPRN_IAC3, debug->iac3);
355 mtspr(SPRN_IAC4, thread->debug.iac4); 355 mtspr(SPRN_IAC4, debug->iac4);
356#endif 356#endif
357 mtspr(SPRN_DAC1, thread->debug.dac1); 357 mtspr(SPRN_DAC1, debug->dac1);
358 mtspr(SPRN_DAC2, thread->debug.dac2); 358 mtspr(SPRN_DAC2, debug->dac2);
359#if CONFIG_PPC_ADV_DEBUG_DVCS > 0 359#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
360 mtspr(SPRN_DVC1, thread->debug.dvc1); 360 mtspr(SPRN_DVC1, debug->dvc1);
361 mtspr(SPRN_DVC2, thread->debug.dvc2); 361 mtspr(SPRN_DVC2, debug->dvc2);
362#endif 362#endif
363 mtspr(SPRN_DBCR0, thread->debug.dbcr0); 363 mtspr(SPRN_DBCR0, debug->dbcr0);
364 mtspr(SPRN_DBCR1, thread->debug.dbcr1); 364 mtspr(SPRN_DBCR1, debug->dbcr1);
365#ifdef CONFIG_BOOKE 365#ifdef CONFIG_BOOKE
366 mtspr(SPRN_DBCR2, thread->debug.dbcr2); 366 mtspr(SPRN_DBCR2, debug->dbcr2);
367#endif 367#endif
368} 368}
369/* 369/*
@@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread)
371 * debug registers, set the debug registers from the values 371 * debug registers, set the debug registers from the values
372 * stored in the new thread. 372 * stored in the new thread.
373 */ 373 */
374void switch_booke_debug_regs(struct thread_struct *new_thread) 374void switch_booke_debug_regs(struct debug_reg *new_debug)
375{ 375{
376 if ((current->thread.debug.dbcr0 & DBCR0_IDM) 376 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
377 || (new_thread->debug.dbcr0 & DBCR0_IDM)) 377 || (new_debug->dbcr0 & DBCR0_IDM))
378 prime_debug_regs(new_thread); 378 prime_debug_regs(new_debug);
379} 379}
380EXPORT_SYMBOL_GPL(switch_booke_debug_regs); 380EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
381#else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 381#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
@@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
683#endif /* CONFIG_SMP */ 683#endif /* CONFIG_SMP */
684 684
685#ifdef CONFIG_PPC_ADV_DEBUG_REGS 685#ifdef CONFIG_PPC_ADV_DEBUG_REGS
686 switch_booke_debug_regs(&new->thread); 686 switch_booke_debug_regs(&new->thread.debug);
687#else 687#else
688/* 688/*
689 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would 689 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 75fb40498b41..2e3d2bf536c5 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1555,7 +1555,7 @@ long arch_ptrace(struct task_struct *child, long request,
1555 1555
1556 flush_fp_to_thread(child); 1556 flush_fp_to_thread(child);
1557 if (fpidx < (PT_FPSCR - PT_FPR0)) 1557 if (fpidx < (PT_FPSCR - PT_FPR0))
1558 memcpy(&tmp, &child->thread.fp_state.fpr, 1558 memcpy(&tmp, &child->thread.TS_FPR(fpidx),
1559 sizeof(long)); 1559 sizeof(long));
1560 else 1560 else
1561 tmp = child->thread.fp_state.fpscr; 1561 tmp = child->thread.fp_state.fpscr;
@@ -1588,7 +1588,7 @@ long arch_ptrace(struct task_struct *child, long request,
1588 1588
1589 flush_fp_to_thread(child); 1589 flush_fp_to_thread(child);
1590 if (fpidx < (PT_FPSCR - PT_FPR0)) 1590 if (fpidx < (PT_FPSCR - PT_FPR0))
1591 memcpy(&child->thread.fp_state.fpr, &data, 1591 memcpy(&child->thread.TS_FPR(fpidx), &data,
1592 sizeof(long)); 1592 sizeof(long));
1593 else 1593 else
1594 child->thread.fp_state.fpscr = data; 1594 child->thread.fp_state.fpscr = data;
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index febc80445d25..bc76cc6b419c 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void)
479 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && 479 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) &&
480 (dn = of_find_node_by_path("/rtas"))) { 480 (dn = of_find_node_by_path("/rtas"))) {
481 int num_addr_cell, num_size_cell, maxcpus; 481 int num_addr_cell, num_size_cell, maxcpus;
482 const unsigned int *ireg; 482 const __be32 *ireg;
483 483
484 num_addr_cell = of_n_addr_cells(dn); 484 num_addr_cell = of_n_addr_cells(dn);
485 num_size_cell = of_n_size_cells(dn); 485 num_size_cell = of_n_size_cells(dn);
@@ -489,7 +489,7 @@ void __init smp_setup_cpu_maps(void)
489 if (!ireg) 489 if (!ireg)
490 goto out; 490 goto out;
491 491
492 maxcpus = ireg[num_addr_cell + num_size_cell]; 492 maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell);
493 493
494 /* Double maxcpus for processors which have SMT capability */ 494 /* Double maxcpus for processors which have SMT capability */
495 if (cpu_has_feature(CPU_FTR_SMT)) 495 if (cpu_has_feature(CPU_FTR_SMT))
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 1844298f5ea4..68027bfa5f8e 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -445,6 +445,12 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
445#endif /* CONFIG_ALTIVEC */ 445#endif /* CONFIG_ALTIVEC */
446 if (copy_fpr_to_user(&frame->mc_fregs, current)) 446 if (copy_fpr_to_user(&frame->mc_fregs, current))
447 return 1; 447 return 1;
448
449 /*
450 * Clear the MSR VSX bit to indicate there is no valid state attached
451 * to this context, except in the specific case below where we set it.
452 */
453 msr &= ~MSR_VSX;
448#ifdef CONFIG_VSX 454#ifdef CONFIG_VSX
449 /* 455 /*
450 * Copy VSR 0-31 upper half from thread_struct to local 456 * Copy VSR 0-31 upper half from thread_struct to local
@@ -457,15 +463,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
457 if (copy_vsx_to_user(&frame->mc_vsregs, current)) 463 if (copy_vsx_to_user(&frame->mc_vsregs, current))
458 return 1; 464 return 1;
459 msr |= MSR_VSX; 465 msr |= MSR_VSX;
460 } else if (!ctx_has_vsx_region) 466 }
461 /*
462 * With a small context structure we can't hold the VSX
463 * registers, hence clear the MSR value to indicate the state
464 * was not saved.
465 */
466 msr &= ~MSR_VSX;
467
468
469#endif /* CONFIG_VSX */ 467#endif /* CONFIG_VSX */
470#ifdef CONFIG_SPE 468#ifdef CONFIG_SPE
471 /* save spe registers */ 469 /* save spe registers */
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index e66f67b8b9e6..42991045349f 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -122,6 +122,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
122 flush_fp_to_thread(current); 122 flush_fp_to_thread(current);
123 /* copy fpr regs and fpscr */ 123 /* copy fpr regs and fpscr */
124 err |= copy_fpr_to_user(&sc->fp_regs, current); 124 err |= copy_fpr_to_user(&sc->fp_regs, current);
125
126 /*
127 * Clear the MSR VSX bit to indicate there is no valid state attached
128 * to this context, except in the specific case below where we set it.
129 */
130 msr &= ~MSR_VSX;
125#ifdef CONFIG_VSX 131#ifdef CONFIG_VSX
126 /* 132 /*
127 * Copy VSX low doubleword to local buffer for formatting, 133 * Copy VSX low doubleword to local buffer for formatting,
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index a3b64f3bf9a2..c1cf4a1522d9 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -580,7 +580,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
580int cpu_to_core_id(int cpu) 580int cpu_to_core_id(int cpu)
581{ 581{
582 struct device_node *np; 582 struct device_node *np;
583 const int *reg; 583 const __be32 *reg;
584 int id = -1; 584 int id = -1;
585 585
586 np = of_get_cpu_node(cpu, NULL); 586 np = of_get_cpu_node(cpu, NULL);
@@ -591,7 +591,7 @@ int cpu_to_core_id(int cpu)
591 if (!reg) 591 if (!reg)
592 goto out; 592 goto out;
593 593
594 id = *reg; 594 id = be32_to_cpup(reg);
595out: 595out:
596 of_node_put(np); 596 of_node_put(np);
597 return id; 597 return id;
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
index 6b1f2a6d5517..6b2b69616e77 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -232,9 +232,15 @@ __do_get_tspec:
232 lwz r6,(CFG_TB_ORIG_STAMP+4)(r9) 232 lwz r6,(CFG_TB_ORIG_STAMP+4)(r9)
233 233
234 /* Get a stable TB value */ 234 /* Get a stable TB value */
235#ifdef CONFIG_8xx
2362: mftbu r3
237 mftbl r4
238 mftbu r0
239#else
2352: mfspr r3, SPRN_TBRU 2402: mfspr r3, SPRN_TBRU
236 mfspr r4, SPRN_TBRL 241 mfspr r4, SPRN_TBRL
237 mfspr r0, SPRN_TBRU 242 mfspr r0, SPRN_TBRU
243#endif
238 cmplw cr0,r3,r0 244 cmplw cr0,r3,r0
239 bne- 2b 245 bne- 2b
240 246
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index f3ff587a8b7d..c5d148434c08 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
469 slb_v = vcpu->kvm->arch.vrma_slb_v; 469 slb_v = vcpu->kvm->arch.vrma_slb_v;
470 } 470 }
471 471
472 preempt_disable();
472 /* Find the HPTE in the hash table */ 473 /* Find the HPTE in the hash table */
473 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, 474 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
474 HPTE_V_VALID | HPTE_V_ABSENT); 475 HPTE_V_VALID | HPTE_V_ABSENT);
475 if (index < 0) 476 if (index < 0) {
477 preempt_enable();
476 return -ENOENT; 478 return -ENOENT;
479 }
477 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); 480 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
478 v = hptep[0] & ~HPTE_V_HVLOCK; 481 v = hptep[0] & ~HPTE_V_HVLOCK;
479 gr = kvm->arch.revmap[index].guest_rpte; 482 gr = kvm->arch.revmap[index].guest_rpte;
@@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
481 /* Unlock the HPTE */ 484 /* Unlock the HPTE */
482 asm volatile("lwsync" : : : "memory"); 485 asm volatile("lwsync" : : : "memory");
483 hptep[0] = v; 486 hptep[0] = v;
487 preempt_enable();
484 488
485 gpte->eaddr = eaddr; 489 gpte->eaddr = eaddr;
486 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); 490 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
@@ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
665 return -EFAULT; 669 return -EFAULT;
666 } else { 670 } else {
667 page = pages[0]; 671 page = pages[0];
672 pfn = page_to_pfn(page);
668 if (PageHuge(page)) { 673 if (PageHuge(page)) {
669 page = compound_head(page); 674 page = compound_head(page);
670 pte_size <<= compound_order(page); 675 pte_size <<= compound_order(page);
@@ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
689 } 694 }
690 rcu_read_unlock_sched(); 695 rcu_read_unlock_sched();
691 } 696 }
692 pfn = page_to_pfn(page);
693 } 697 }
694 698
695 ret = -EFAULT; 699 ret = -EFAULT;
@@ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
707 r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; 711 r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
708 } 712 }
709 713
710 /* Set the HPTE to point to pfn */ 714 /*
711 r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT); 715 * Set the HPTE to point to pfn.
716 * Since the pfn is at PAGE_SIZE granularity, make sure we
717 * don't mask out lower-order bits if psize < PAGE_SIZE.
718 */
719 if (psize < PAGE_SIZE)
720 psize = PAGE_SIZE;
721 r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1));
712 if (hpte_is_writable(r) && !write_ok) 722 if (hpte_is_writable(r) && !write_ok)
713 r = hpte_make_readonly(r); 723 r = hpte_make_readonly(r);
714 ret = RESUME_GUEST; 724 ret = RESUME_GUEST;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 072287f1c3bc..b51d5db78068 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
131static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) 131static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
132{ 132{
133 struct kvmppc_vcore *vc = vcpu->arch.vcore; 133 struct kvmppc_vcore *vc = vcpu->arch.vcore;
134 unsigned long flags;
134 135
135 spin_lock(&vcpu->arch.tbacct_lock); 136 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
136 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && 137 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
137 vc->preempt_tb != TB_NIL) { 138 vc->preempt_tb != TB_NIL) {
138 vc->stolen_tb += mftb() - vc->preempt_tb; 139 vc->stolen_tb += mftb() - vc->preempt_tb;
@@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
143 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; 144 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
144 vcpu->arch.busy_preempt = TB_NIL; 145 vcpu->arch.busy_preempt = TB_NIL;
145 } 146 }
146 spin_unlock(&vcpu->arch.tbacct_lock); 147 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
147} 148}
148 149
149static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) 150static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
150{ 151{
151 struct kvmppc_vcore *vc = vcpu->arch.vcore; 152 struct kvmppc_vcore *vc = vcpu->arch.vcore;
153 unsigned long flags;
152 154
153 spin_lock(&vcpu->arch.tbacct_lock); 155 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
154 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) 156 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
155 vc->preempt_tb = mftb(); 157 vc->preempt_tb = mftb();
156 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) 158 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
157 vcpu->arch.busy_preempt = mftb(); 159 vcpu->arch.busy_preempt = mftb();
158 spin_unlock(&vcpu->arch.tbacct_lock); 160 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
159} 161}
160 162
161static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) 163static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
@@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
486 */ 488 */
487 if (vc->vcore_state != VCORE_INACTIVE && 489 if (vc->vcore_state != VCORE_INACTIVE &&
488 vc->runner->arch.run_task != current) { 490 vc->runner->arch.run_task != current) {
489 spin_lock(&vc->runner->arch.tbacct_lock); 491 spin_lock_irq(&vc->runner->arch.tbacct_lock);
490 p = vc->stolen_tb; 492 p = vc->stolen_tb;
491 if (vc->preempt_tb != TB_NIL) 493 if (vc->preempt_tb != TB_NIL)
492 p += now - vc->preempt_tb; 494 p += now - vc->preempt_tb;
493 spin_unlock(&vc->runner->arch.tbacct_lock); 495 spin_unlock_irq(&vc->runner->arch.tbacct_lock);
494 } else { 496 } else {
495 p = vc->stolen_tb; 497 p = vc->stolen_tb;
496 } 498 }
@@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
512 core_stolen = vcore_stolen_time(vc, now); 514 core_stolen = vcore_stolen_time(vc, now);
513 stolen = core_stolen - vcpu->arch.stolen_logged; 515 stolen = core_stolen - vcpu->arch.stolen_logged;
514 vcpu->arch.stolen_logged = core_stolen; 516 vcpu->arch.stolen_logged = core_stolen;
515 spin_lock(&vcpu->arch.tbacct_lock); 517 spin_lock_irq(&vcpu->arch.tbacct_lock);
516 stolen += vcpu->arch.busy_stolen; 518 stolen += vcpu->arch.busy_stolen;
517 vcpu->arch.busy_stolen = 0; 519 vcpu->arch.busy_stolen = 0;
518 spin_unlock(&vcpu->arch.tbacct_lock); 520 spin_unlock_irq(&vcpu->arch.tbacct_lock);
519 if (!dt || !vpa) 521 if (!dt || !vpa)
520 return; 522 return;
521 memset(dt, 0, sizeof(struct dtl_entry)); 523 memset(dt, 0, sizeof(struct dtl_entry));
@@ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
589 if (list_empty(&vcpu->kvm->arch.rtas_tokens)) 591 if (list_empty(&vcpu->kvm->arch.rtas_tokens))
590 return RESUME_HOST; 592 return RESUME_HOST;
591 593
594 idx = srcu_read_lock(&vcpu->kvm->srcu);
592 rc = kvmppc_rtas_hcall(vcpu); 595 rc = kvmppc_rtas_hcall(vcpu);
596 srcu_read_unlock(&vcpu->kvm->srcu, idx);
593 597
594 if (rc == -ENOENT) 598 if (rc == -ENOENT)
595 return RESUME_HOST; 599 return RESUME_HOST;
@@ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
1115 1119
1116 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) 1120 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
1117 return; 1121 return;
1118 spin_lock(&vcpu->arch.tbacct_lock); 1122 spin_lock_irq(&vcpu->arch.tbacct_lock);
1119 now = mftb(); 1123 now = mftb();
1120 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - 1124 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
1121 vcpu->arch.stolen_logged; 1125 vcpu->arch.stolen_logged;
1122 vcpu->arch.busy_preempt = now; 1126 vcpu->arch.busy_preempt = now;
1123 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; 1127 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
1124 spin_unlock(&vcpu->arch.tbacct_lock); 1128 spin_unlock_irq(&vcpu->arch.tbacct_lock);
1125 --vc->n_runnable; 1129 --vc->n_runnable;
1126 list_del(&vcpu->arch.run_list); 1130 list_del(&vcpu->arch.run_list);
1127} 1131}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 9c515440ad1a..8689e2e30857 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
225 is_io = pa & (HPTE_R_I | HPTE_R_W); 225 is_io = pa & (HPTE_R_I | HPTE_R_W);
226 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); 226 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
227 pa &= PAGE_MASK; 227 pa &= PAGE_MASK;
228 pa |= gpa & ~PAGE_MASK;
228 } else { 229 } else {
229 /* Translate to host virtual address */ 230 /* Translate to host virtual address */
230 hva = __gfn_to_hva_memslot(memslot, gfn); 231 hva = __gfn_to_hva_memslot(memslot, gfn);
@@ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
238 ptel = hpte_make_readonly(ptel); 239 ptel = hpte_make_readonly(ptel);
239 is_io = hpte_cache_bits(pte_val(pte)); 240 is_io = hpte_cache_bits(pte_val(pte));
240 pa = pte_pfn(pte) << PAGE_SHIFT; 241 pa = pte_pfn(pte) << PAGE_SHIFT;
242 pa |= hva & (pte_size - 1);
243 pa |= gpa & ~PAGE_MASK;
241 } 244 }
242 } 245 }
243 246
244 if (pte_size < psize) 247 if (pte_size < psize)
245 return H_PARAMETER; 248 return H_PARAMETER;
246 if (pa && pte_size > psize)
247 pa |= gpa & (pte_size - 1);
248 249
249 ptel &= ~(HPTE_R_PP0 - psize); 250 ptel &= ~(HPTE_R_PP0 - psize);
250 ptel |= pa; 251 ptel |= pa;
@@ -749,6 +750,10 @@ static int slb_base_page_shift[4] = {
749 20, /* 1M, unsupported */ 750 20, /* 1M, unsupported */
750}; 751};
751 752
753/* When called from virtmode, this func should be protected by
754 * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
755 * can trigger deadlock issue.
756 */
752long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, 757long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
753 unsigned long valid) 758 unsigned long valid)
754{ 759{
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index bc8de75b1925..be4fa04a37c9 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
153 153
15413: b machine_check_fwnmi 15413: b machine_check_fwnmi
155 155
156
157/* 156/*
158 * We come in here when wakened from nap mode on a secondary hw thread. 157 * We come in here when wakened from nap mode on a secondary hw thread.
159 * Relocation is off and most register values are lost. 158 * Relocation is off and most register values are lost.
@@ -224,6 +223,11 @@ kvm_start_guest:
224 /* Clear our vcpu pointer so we don't come back in early */ 223 /* Clear our vcpu pointer so we don't come back in early */
225 li r0, 0 224 li r0, 0
226 std r0, HSTATE_KVM_VCPU(r13) 225 std r0, HSTATE_KVM_VCPU(r13)
226 /*
227 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
228 * the nap_count, because once the increment to nap_count is
229 * visible we could be given another vcpu.
230 */
227 lwsync 231 lwsync
228 /* Clear any pending IPI - we're an offline thread */ 232 /* Clear any pending IPI - we're an offline thread */
229 ld r5, HSTATE_XICS_PHYS(r13) 233 ld r5, HSTATE_XICS_PHYS(r13)
@@ -241,7 +245,6 @@ kvm_start_guest:
241 /* increment the nap count and then go to nap mode */ 245 /* increment the nap count and then go to nap mode */
242 ld r4, HSTATE_KVM_VCORE(r13) 246 ld r4, HSTATE_KVM_VCORE(r13)
243 addi r4, r4, VCORE_NAP_COUNT 247 addi r4, r4, VCORE_NAP_COUNT
244 lwsync /* make previous updates visible */
24551: lwarx r3, 0, r4 24851: lwarx r3, 0, r4
246 addi r3, r3, 1 249 addi r3, r3, 1
247 stwcx. r3, 0, r4 250 stwcx. r3, 0, r4
@@ -751,15 +754,14 @@ kvmppc_interrupt_hv:
751 * guest CR, R12 saved in shadow VCPU SCRATCH1/0 754 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
752 * guest R13 saved in SPRN_SCRATCH0 755 * guest R13 saved in SPRN_SCRATCH0
753 */ 756 */
754 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ 757 std r9, HSTATE_SCRATCH2(r13)
755 std r9, HSTATE_HOST_R2(r13)
756 758
757 lbz r9, HSTATE_IN_GUEST(r13) 759 lbz r9, HSTATE_IN_GUEST(r13)
758 cmpwi r9, KVM_GUEST_MODE_HOST_HV 760 cmpwi r9, KVM_GUEST_MODE_HOST_HV
759 beq kvmppc_bad_host_intr 761 beq kvmppc_bad_host_intr
760#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 762#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
761 cmpwi r9, KVM_GUEST_MODE_GUEST 763 cmpwi r9, KVM_GUEST_MODE_GUEST
762 ld r9, HSTATE_HOST_R2(r13) 764 ld r9, HSTATE_SCRATCH2(r13)
763 beq kvmppc_interrupt_pr 765 beq kvmppc_interrupt_pr
764#endif 766#endif
765 /* We're now back in the host but in guest MMU context */ 767 /* We're now back in the host but in guest MMU context */
@@ -779,7 +781,7 @@ kvmppc_interrupt_hv:
779 std r6, VCPU_GPR(R6)(r9) 781 std r6, VCPU_GPR(R6)(r9)
780 std r7, VCPU_GPR(R7)(r9) 782 std r7, VCPU_GPR(R7)(r9)
781 std r8, VCPU_GPR(R8)(r9) 783 std r8, VCPU_GPR(R8)(r9)
782 ld r0, HSTATE_HOST_R2(r13) 784 ld r0, HSTATE_SCRATCH2(r13)
783 std r0, VCPU_GPR(R9)(r9) 785 std r0, VCPU_GPR(R9)(r9)
784 std r10, VCPU_GPR(R10)(r9) 786 std r10, VCPU_GPR(R10)(r9)
785 std r11, VCPU_GPR(R11)(r9) 787 std r11, VCPU_GPR(R11)(r9)
@@ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
990 */ 992 */
991 /* Increment the threads-exiting-guest count in the 0xff00 993 /* Increment the threads-exiting-guest count in the 0xff00
992 bits of vcore->entry_exit_count */ 994 bits of vcore->entry_exit_count */
993 lwsync
994 ld r5,HSTATE_KVM_VCORE(r13) 995 ld r5,HSTATE_KVM_VCORE(r13)
995 addi r6,r5,VCORE_ENTRY_EXIT 996 addi r6,r5,VCORE_ENTRY_EXIT
99641: lwarx r3,0,r6 99741: lwarx r3,0,r6
997 addi r0,r3,0x100 998 addi r0,r3,0x100
998 stwcx. r0,0,r6 999 stwcx. r0,0,r6
999 bne 41b 1000 bne 41b
1000 lwsync 1001 isync /* order stwcx. vs. reading napping_threads */
1001 1002
1002 /* 1003 /*
1003 * At this point we have an interrupt that we have to pass 1004 * At this point we have an interrupt that we have to pass
@@ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1030 sld r0,r0,r4 1031 sld r0,r0,r4
1031 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ 1032 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
1032 beq 43f 1033 beq 43f
1034 /* Order entry/exit update vs. IPIs */
1035 sync
1033 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ 1036 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
1034 subf r6,r4,r13 1037 subf r6,r4,r13
103542: andi. r0,r3,1 103842: andi. r0,r3,1
@@ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1638 bge kvm_cede_exit 1641 bge kvm_cede_exit
1639 stwcx. r4,0,r6 1642 stwcx. r4,0,r6
1640 bne 31b 1643 bne 31b
1644 /* order napping_threads update vs testing entry_exit_count */
1645 isync
1641 li r0,1 1646 li r0,1
1642 stb r0,HSTATE_NAPPING(r13) 1647 stb r0,HSTATE_NAPPING(r13)
1643 /* order napping_threads update vs testing entry_exit_count */
1644 lwsync
1645 mr r4,r3 1648 mr r4,r3
1646 lwz r7,VCORE_ENTRY_EXIT(r5) 1649 lwz r7,VCORE_ENTRY_EXIT(r5)
1647 cmpwi r7,0x100 1650 cmpwi r7,0x100
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index f4dd041c14ea..f779450cb07c 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -129,29 +129,32 @@ kvm_start_lightweight:
129 * R12 = exit handler id 129 * R12 = exit handler id
130 * R13 = PACA 130 * R13 = PACA
131 * SVCPU.* = guest * 131 * SVCPU.* = guest *
132 * MSR.EE = 1
132 * 133 *
133 */ 134 */
134 135
136 PPC_LL r3, GPR4(r1) /* vcpu pointer */
137
138 /*
139 * kvmppc_copy_from_svcpu can clobber volatile registers, save
140 * the exit handler id to the vcpu and restore it from there later.
141 */
142 stw r12, VCPU_TRAP(r3)
143
135 /* Transfer reg values from shadow vcpu back to vcpu struct */ 144 /* Transfer reg values from shadow vcpu back to vcpu struct */
136 /* On 64-bit, interrupts are still off at this point */ 145 /* On 64-bit, interrupts are still off at this point */
137 PPC_LL r3, GPR4(r1) /* vcpu pointer */ 146
138 GET_SHADOW_VCPU(r4) 147 GET_SHADOW_VCPU(r4)
139 bl FUNC(kvmppc_copy_from_svcpu) 148 bl FUNC(kvmppc_copy_from_svcpu)
140 nop 149 nop
141 150
142#ifdef CONFIG_PPC_BOOK3S_64 151#ifdef CONFIG_PPC_BOOK3S_64
143 /* Re-enable interrupts */
144 ld r3, HSTATE_HOST_MSR(r13)
145 ori r3, r3, MSR_EE
146 MTMSR_EERI(r3)
147
148 /* 152 /*
149 * Reload kernel SPRG3 value. 153 * Reload kernel SPRG3 value.
150 * No need to save guest value as usermode can't modify SPRG3. 154 * No need to save guest value as usermode can't modify SPRG3.
151 */ 155 */
152 ld r3, PACA_SPRG3(r13) 156 ld r3, PACA_SPRG3(r13)
153 mtspr SPRN_SPRG3, r3 157 mtspr SPRN_SPRG3, r3
154
155#endif /* CONFIG_PPC_BOOK3S_64 */ 158#endif /* CONFIG_PPC_BOOK3S_64 */
156 159
157 /* R7 = vcpu */ 160 /* R7 = vcpu */
@@ -177,7 +180,7 @@ kvm_start_lightweight:
177 PPC_STL r31, VCPU_GPR(R31)(r7) 180 PPC_STL r31, VCPU_GPR(R31)(r7)
178 181
179 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ 182 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
180 mr r5, r12 183 lwz r5, VCPU_TRAP(r7)
181 184
182 /* Restore r3 (kvm_run) and r4 (vcpu) */ 185 /* Restore r3 (kvm_run) and r4 (vcpu) */
183 REST_2GPRS(3, r1) 186 REST_2GPRS(3, r1)
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index fe14ca3dd171..5b9e9063cfaf 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
66 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 66 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
67 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); 67 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
68 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; 68 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
69 svcpu->in_use = 0;
69 svcpu_put(svcpu); 70 svcpu_put(svcpu);
70#endif 71#endif
71 vcpu->cpu = smp_processor_id(); 72 vcpu->cpu = smp_processor_id();
@@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
78{ 79{
79#ifdef CONFIG_PPC_BOOK3S_64 80#ifdef CONFIG_PPC_BOOK3S_64
80 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 81 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
82 if (svcpu->in_use) {
83 kvmppc_copy_from_svcpu(vcpu, svcpu);
84 }
81 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); 85 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
82 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; 86 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
83 svcpu_put(svcpu); 87 svcpu_put(svcpu);
@@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
110 svcpu->ctr = vcpu->arch.ctr; 114 svcpu->ctr = vcpu->arch.ctr;
111 svcpu->lr = vcpu->arch.lr; 115 svcpu->lr = vcpu->arch.lr;
112 svcpu->pc = vcpu->arch.pc; 116 svcpu->pc = vcpu->arch.pc;
117 svcpu->in_use = true;
113} 118}
114 119
115/* Copy data touched by real-mode code from shadow vcpu back to vcpu */ 120/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
116void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, 121void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
117 struct kvmppc_book3s_shadow_vcpu *svcpu) 122 struct kvmppc_book3s_shadow_vcpu *svcpu)
118{ 123{
124 /*
125 * vcpu_put would just call us again because in_use hasn't
126 * been updated yet.
127 */
128 preempt_disable();
129
130 /*
131 * Maybe we were already preempted and synced the svcpu from
132 * our preempt notifiers. Don't bother touching this svcpu then.
133 */
134 if (!svcpu->in_use)
135 goto out;
136
119 vcpu->arch.gpr[0] = svcpu->gpr[0]; 137 vcpu->arch.gpr[0] = svcpu->gpr[0];
120 vcpu->arch.gpr[1] = svcpu->gpr[1]; 138 vcpu->arch.gpr[1] = svcpu->gpr[1];
121 vcpu->arch.gpr[2] = svcpu->gpr[2]; 139 vcpu->arch.gpr[2] = svcpu->gpr[2];
@@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
139 vcpu->arch.fault_dar = svcpu->fault_dar; 157 vcpu->arch.fault_dar = svcpu->fault_dar;
140 vcpu->arch.fault_dsisr = svcpu->fault_dsisr; 158 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
141 vcpu->arch.last_inst = svcpu->last_inst; 159 vcpu->arch.last_inst = svcpu->last_inst;
160 svcpu->in_use = false;
161
162out:
163 preempt_enable();
142} 164}
143 165
144static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) 166static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index a38c4c9edab8..c3c5231adade 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline)
153 153
154 li r6, MSR_IR | MSR_DR 154 li r6, MSR_IR | MSR_DR
155 andc r6, r5, r6 /* Clear DR and IR in MSR value */ 155 andc r6, r5, r6 /* Clear DR and IR in MSR value */
156#ifdef CONFIG_PPC_BOOK3S_32
157 /* 156 /*
158 * Set EE in HOST_MSR so that it's enabled when we get into our 157 * Set EE in HOST_MSR so that it's enabled when we get into our
159 * C exit handler function. On 64-bit we delay enabling 158 * C exit handler function.
160 * interrupts until we have finished transferring stuff
161 * to or from the PACA.
162 */ 159 */
163 ori r5, r5, MSR_EE 160 ori r5, r5, MSR_EE
164#endif
165 mtsrr0 r7 161 mtsrr0 r7
166 mtsrr1 r6 162 mtsrr1 r6
167 RFI 163 RFI
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 53e65a210b9a..0591e05db74b 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
681int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 681int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
682{ 682{
683 int ret, s; 683 int ret, s;
684 struct thread_struct thread; 684 struct debug_reg debug;
685#ifdef CONFIG_PPC_FPU 685#ifdef CONFIG_PPC_FPU
686 struct thread_fp_state fp; 686 struct thread_fp_state fp;
687 int fpexc_mode; 687 int fpexc_mode;
@@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
723#endif 723#endif
724 724
725 /* Switch to guest debug context */ 725 /* Switch to guest debug context */
726 thread.debug = vcpu->arch.shadow_dbg_reg; 726 debug = vcpu->arch.shadow_dbg_reg;
727 switch_booke_debug_regs(&thread); 727 switch_booke_debug_regs(&debug);
728 thread.debug = current->thread.debug; 728 debug = current->thread.debug;
729 current->thread.debug = vcpu->arch.shadow_dbg_reg; 729 current->thread.debug = vcpu->arch.shadow_dbg_reg;
730 730
731 kvmppc_fix_ee_before_entry(); 731 kvmppc_fix_ee_before_entry();
@@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
736 We also get here with interrupts enabled. */ 736 We also get here with interrupts enabled. */
737 737
738 /* Switch back to user space debug context */ 738 /* Switch back to user space debug context */
739 switch_booke_debug_regs(&thread); 739 switch_booke_debug_regs(&debug);
740 current->thread.debug = thread.debug; 740 current->thread.debug = debug;
741 741
742#ifdef CONFIG_PPC_FPU 742#ifdef CONFIG_PPC_FPU
743 kvmppc_save_guest_fp(vcpu); 743 kvmppc_save_guest_fp(vcpu);
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index d73a59014900..596a285c0755 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -9,6 +9,14 @@
9#include <asm/processor.h> 9#include <asm/processor.h>
10#include <asm/ppc_asm.h> 10#include <asm/ppc_asm.h>
11 11
12#ifdef __BIG_ENDIAN__
13#define sLd sld /* Shift towards low-numbered address. */
14#define sHd srd /* Shift towards high-numbered address. */
15#else
16#define sLd srd /* Shift towards low-numbered address. */
17#define sHd sld /* Shift towards high-numbered address. */
18#endif
19
12 .align 7 20 .align 7
13_GLOBAL(__copy_tofrom_user) 21_GLOBAL(__copy_tofrom_user)
14BEGIN_FTR_SECTION 22BEGIN_FTR_SECTION
@@ -118,10 +126,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
118 126
11924: ld r9,0(r4) /* 3+2n loads, 2+2n stores */ 12724: ld r9,0(r4) /* 3+2n loads, 2+2n stores */
12025: ld r0,8(r4) 12825: ld r0,8(r4)
121 sld r6,r9,r10 129 sLd r6,r9,r10
12226: ldu r9,16(r4) 13026: ldu r9,16(r4)
123 srd r7,r0,r11 131 sHd r7,r0,r11
124 sld r8,r0,r10 132 sLd r8,r0,r10
125 or r7,r7,r6 133 or r7,r7,r6
126 blt cr6,79f 134 blt cr6,79f
12727: ld r0,8(r4) 13527: ld r0,8(r4)
@@ -129,35 +137,35 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
129 137
13028: ld r0,0(r4) /* 4+2n loads, 3+2n stores */ 13828: ld r0,0(r4) /* 4+2n loads, 3+2n stores */
13129: ldu r9,8(r4) 13929: ldu r9,8(r4)
132 sld r8,r0,r10 140 sLd r8,r0,r10
133 addi r3,r3,-8 141 addi r3,r3,-8
134 blt cr6,5f 142 blt cr6,5f
13530: ld r0,8(r4) 14330: ld r0,8(r4)
136 srd r12,r9,r11 144 sHd r12,r9,r11
137 sld r6,r9,r10 145 sLd r6,r9,r10
13831: ldu r9,16(r4) 14631: ldu r9,16(r4)
139 or r12,r8,r12 147 or r12,r8,r12
140 srd r7,r0,r11 148 sHd r7,r0,r11
141 sld r8,r0,r10 149 sLd r8,r0,r10
142 addi r3,r3,16 150 addi r3,r3,16
143 beq cr6,78f 151 beq cr6,78f
144 152
1451: or r7,r7,r6 1531: or r7,r7,r6
14632: ld r0,8(r4) 15432: ld r0,8(r4)
14776: std r12,8(r3) 15576: std r12,8(r3)
1482: srd r12,r9,r11 1562: sHd r12,r9,r11
149 sld r6,r9,r10 157 sLd r6,r9,r10
15033: ldu r9,16(r4) 15833: ldu r9,16(r4)
151 or r12,r8,r12 159 or r12,r8,r12
15277: stdu r7,16(r3) 16077: stdu r7,16(r3)
153 srd r7,r0,r11 161 sHd r7,r0,r11
154 sld r8,r0,r10 162 sLd r8,r0,r10
155 bdnz 1b 163 bdnz 1b
156 164
15778: std r12,8(r3) 16578: std r12,8(r3)
158 or r7,r7,r6 166 or r7,r7,r6
15979: std r7,16(r3) 16779: std r7,16(r3)
1605: srd r12,r9,r11 1685: sHd r12,r9,r11
161 or r12,r8,r12 169 or r12,r8,r12
16280: std r12,24(r3) 17080: std r12,24(r3)
163 bne 6f 171 bne 6f
@@ -165,23 +173,38 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
165 blr 173 blr
1666: cmpwi cr1,r5,8 1746: cmpwi cr1,r5,8
167 addi r3,r3,32 175 addi r3,r3,32
168 sld r9,r9,r10 176 sLd r9,r9,r10
169 ble cr1,7f 177 ble cr1,7f
17034: ld r0,8(r4) 17834: ld r0,8(r4)
171 srd r7,r0,r11 179 sHd r7,r0,r11
172 or r9,r7,r9 180 or r9,r7,r9
1737: 1817:
174 bf cr7*4+1,1f 182 bf cr7*4+1,1f
183#ifdef __BIG_ENDIAN__
175 rotldi r9,r9,32 184 rotldi r9,r9,32
185#endif
17694: stw r9,0(r3) 18694: stw r9,0(r3)
187#ifdef __LITTLE_ENDIAN__
188 rotrdi r9,r9,32
189#endif
177 addi r3,r3,4 190 addi r3,r3,4
1781: bf cr7*4+2,2f 1911: bf cr7*4+2,2f
192#ifdef __BIG_ENDIAN__
179 rotldi r9,r9,16 193 rotldi r9,r9,16
194#endif
18095: sth r9,0(r3) 19595: sth r9,0(r3)
196#ifdef __LITTLE_ENDIAN__
197 rotrdi r9,r9,16
198#endif
181 addi r3,r3,2 199 addi r3,r3,2
1822: bf cr7*4+3,3f 2002: bf cr7*4+3,3f
201#ifdef __BIG_ENDIAN__
183 rotldi r9,r9,8 202 rotldi r9,r9,8
203#endif
18496: stb r9,0(r3) 20496: stb r9,0(r3)
205#ifdef __LITTLE_ENDIAN__
206 rotrdi r9,r9,8
207#endif
1853: li r3,0 2083: li r3,0
186 blr 209 blr
187 210
diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c
index 3bc700655fc8..74551b5e41e5 100644
--- a/arch/powerpc/mm/hugetlbpage-book3e.c
+++ b/arch/powerpc/mm/hugetlbpage-book3e.c
@@ -117,6 +117,5 @@ void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
117 struct hstate *hstate = hstate_file(vma->vm_file); 117 struct hstate *hstate = hstate_file(vma->vm_file);
118 unsigned long tsize = huge_page_shift(hstate) - 10; 118 unsigned long tsize = huge_page_shift(hstate) - 10;
119 119
120 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, tsize, 0); 120 __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0);
121
122} 121}
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 41cd68dee681..358d74303138 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -305,7 +305,7 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
305void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 305void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
306{ 306{
307#ifdef CONFIG_HUGETLB_PAGE 307#ifdef CONFIG_HUGETLB_PAGE
308 if (is_vm_hugetlb_page(vma)) 308 if (vma && is_vm_hugetlb_page(vma))
309 flush_hugetlb_page(vma, vmaddr); 309 flush_hugetlb_page(vma, vmaddr);
310#endif 310#endif
311 311
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 132f8726a257..bca2465a9c34 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -404,13 +404,27 @@ config PPC_DOORBELL
404 404
405endmenu 405endmenu
406 406
407config CPU_LITTLE_ENDIAN 407choice
408 bool "Build little endian kernel" 408 prompt "Endianness selection"
409 default n 409 default CPU_BIG_ENDIAN
410 help 410 help
411 This option selects whether a big endian or little endian kernel will 411 This option selects whether a big endian or little endian kernel will
412 be built. 412 be built.
413 413
414config CPU_BIG_ENDIAN
415 bool "Build big endian kernel"
416 help
417 Build a big endian kernel.
418
419 If unsure, select this option.
420
421config CPU_LITTLE_ENDIAN
422 bool "Build little endian kernel"
423 help
424 Build a little endian kernel.
425
414 Note that if cross compiling a little endian kernel, 426 Note that if cross compiling a little endian kernel,
415 CROSS_COMPILE must point to a toolchain capable of targeting 427 CROSS_COMPILE must point to a toolchain capable of targeting
416 little endian powerpc. 428 little endian powerpc.
429
430endchoice
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index 02245cee7818..d7ddcee7feb8 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -36,7 +36,6 @@
36#include "powernv.h" 36#include "powernv.h"
37#include "pci.h" 37#include "pci.h"
38 38
39static char *hub_diag = NULL;
40static int ioda_eeh_nb_init = 0; 39static int ioda_eeh_nb_init = 0;
41 40
42static int ioda_eeh_event(struct notifier_block *nb, 41static int ioda_eeh_event(struct notifier_block *nb,
@@ -140,15 +139,6 @@ static int ioda_eeh_post_init(struct pci_controller *hose)
140 ioda_eeh_nb_init = 1; 139 ioda_eeh_nb_init = 1;
141 } 140 }
142 141
143 /* We needn't HUB diag-data on PHB3 */
144 if (phb->type == PNV_PHB_IODA1 && !hub_diag) {
145 hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
146 if (!hub_diag) {
147 pr_err("%s: Out of memory !\n", __func__);
148 return -ENOMEM;
149 }
150 }
151
152#ifdef CONFIG_DEBUG_FS 142#ifdef CONFIG_DEBUG_FS
153 if (phb->dbgfs) { 143 if (phb->dbgfs) {
154 debugfs_create_file("err_injct_outbound", 0600, 144 debugfs_create_file("err_injct_outbound", 0600,
@@ -633,11 +623,10 @@ static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
633static void ioda_eeh_hub_diag(struct pci_controller *hose) 623static void ioda_eeh_hub_diag(struct pci_controller *hose)
634{ 624{
635 struct pnv_phb *phb = hose->private_data; 625 struct pnv_phb *phb = hose->private_data;
636 struct OpalIoP7IOCErrorData *data; 626 struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
637 long rc; 627 long rc;
638 628
639 data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag; 629 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
640 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE);
641 if (rc != OPAL_SUCCESS) { 630 if (rc != OPAL_SUCCESS) {
642 pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n", 631 pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
643 __func__, phb->hub_id, rc); 632 __func__, phb->hub_id, rc);
@@ -820,14 +809,15 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose)
820 struct OpalIoPhbErrorCommon *common; 809 struct OpalIoPhbErrorCommon *common;
821 long rc; 810 long rc;
822 811
823 common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; 812 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
824 rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE); 813 PNV_PCI_DIAG_BUF_SIZE);
825 if (rc != OPAL_SUCCESS) { 814 if (rc != OPAL_SUCCESS) {
826 pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", 815 pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
827 __func__, hose->global_number, rc); 816 __func__, hose->global_number, rc);
828 return; 817 return;
829 } 818 }
830 819
820 common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
831 switch (common->ioType) { 821 switch (common->ioType) {
832 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: 822 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
833 ioda_eeh_p7ioc_phb_diag(hose, common); 823 ioda_eeh_p7ioc_phb_diag(hose, common);
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
index e7e59e4f9892..79d83cad3d67 100644
--- a/arch/powerpc/platforms/powernv/opal-lpc.c
+++ b/arch/powerpc/platforms/powernv/opal-lpc.c
@@ -24,25 +24,25 @@ static int opal_lpc_chip_id = -1;
24static u8 opal_lpc_inb(unsigned long port) 24static u8 opal_lpc_inb(unsigned long port)
25{ 25{
26 int64_t rc; 26 int64_t rc;
27 uint32_t data; 27 __be32 data;
28 28
29 if (opal_lpc_chip_id < 0 || port > 0xffff) 29 if (opal_lpc_chip_id < 0 || port > 0xffff)
30 return 0xff; 30 return 0xff;
31 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1); 31 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1);
32 return rc ? 0xff : data; 32 return rc ? 0xff : be32_to_cpu(data);
33} 33}
34 34
35static __le16 __opal_lpc_inw(unsigned long port) 35static __le16 __opal_lpc_inw(unsigned long port)
36{ 36{
37 int64_t rc; 37 int64_t rc;
38 uint32_t data; 38 __be32 data;
39 39
40 if (opal_lpc_chip_id < 0 || port > 0xfffe) 40 if (opal_lpc_chip_id < 0 || port > 0xfffe)
41 return 0xffff; 41 return 0xffff;
42 if (port & 1) 42 if (port & 1)
43 return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1); 43 return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1);
44 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2); 44 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2);
45 return rc ? 0xffff : data; 45 return rc ? 0xffff : be32_to_cpu(data);
46} 46}
47static u16 opal_lpc_inw(unsigned long port) 47static u16 opal_lpc_inw(unsigned long port)
48{ 48{
@@ -52,7 +52,7 @@ static u16 opal_lpc_inw(unsigned long port)
52static __le32 __opal_lpc_inl(unsigned long port) 52static __le32 __opal_lpc_inl(unsigned long port)
53{ 53{
54 int64_t rc; 54 int64_t rc;
55 uint32_t data; 55 __be32 data;
56 56
57 if (opal_lpc_chip_id < 0 || port > 0xfffc) 57 if (opal_lpc_chip_id < 0 || port > 0xfffc)
58 return 0xffffffff; 58 return 0xffffffff;
@@ -62,7 +62,7 @@ static __le32 __opal_lpc_inl(unsigned long port)
62 (__le32)opal_lpc_inb(port + 2) << 8 | 62 (__le32)opal_lpc_inb(port + 2) << 8 |
63 opal_lpc_inb(port + 3); 63 opal_lpc_inb(port + 3);
64 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4); 64 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4);
65 return rc ? 0xffffffff : data; 65 return rc ? 0xffffffff : be32_to_cpu(data);
66} 66}
67 67
68static u32 opal_lpc_inl(unsigned long port) 68static u32 opal_lpc_inl(unsigned long port)
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
index 4d99a8fd55ac..4fbf276ac99e 100644
--- a/arch/powerpc/platforms/powernv/opal-xscom.c
+++ b/arch/powerpc/platforms/powernv/opal-xscom.c
@@ -96,9 +96,11 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value)
96{ 96{
97 struct opal_scom_map *m = map; 97 struct opal_scom_map *m = map;
98 int64_t rc; 98 int64_t rc;
99 __be64 v;
99 100
100 reg = opal_scom_unmangle(reg); 101 reg = opal_scom_unmangle(reg);
101 rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value)); 102 rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v));
103 *value = be64_to_cpu(v);
102 return opal_xscom_err_xlate(rc); 104 return opal_xscom_err_xlate(rc);
103} 105}
104 106
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 084cdfa40682..2c6d173842b2 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -720,6 +720,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
720 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE; 720 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
721 } 721 }
722 iommu_init_table(tbl, phb->hose->node); 722 iommu_init_table(tbl, phb->hose->node);
723 iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number);
723 724
724 if (pe->pdev) 725 if (pe->pdev)
725 set_iommu_table_base(&pe->pdev->dev, tbl); 726 set_iommu_table_base(&pe->pdev->dev, tbl);
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 911c24ef033e..1ed8d5f40f5a 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -172,11 +172,13 @@ struct pnv_phb {
172 } ioda; 172 } ioda;
173 }; 173 };
174 174
175 /* PHB status structure */ 175 /* PHB and hub status structure */
176 union { 176 union {
177 unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; 177 unsigned char blob[PNV_PCI_DIAG_BUF_SIZE];
178 struct OpalIoP7IOCPhbErrorData p7ioc; 178 struct OpalIoP7IOCPhbErrorData p7ioc;
179 struct OpalIoP7IOCErrorData hub_diag;
179 } diag; 180 } diag;
181
180}; 182};
181 183
182extern struct pci_ops pnv_pci_ops; 184extern struct pci_ops pnv_pci_ops;
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index e738007eae64..c9fecf09b8fa 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -157,7 +157,7 @@ static void parse_ppp_data(struct seq_file *m)
157{ 157{
158 struct hvcall_ppp_data ppp_data; 158 struct hvcall_ppp_data ppp_data;
159 struct device_node *root; 159 struct device_node *root;
160 const int *perf_level; 160 const __be32 *perf_level;
161 int rc; 161 int rc;
162 162
163 rc = h_get_ppp(&ppp_data); 163 rc = h_get_ppp(&ppp_data);
@@ -201,7 +201,7 @@ static void parse_ppp_data(struct seq_file *m)
201 perf_level = of_get_property(root, 201 perf_level = of_get_property(root,
202 "ibm,partition-performance-parameters-level", 202 "ibm,partition-performance-parameters-level",
203 NULL); 203 NULL);
204 if (perf_level && (*perf_level >= 1)) { 204 if (perf_level && (be32_to_cpup(perf_level) >= 1)) {
205 seq_printf(m, 205 seq_printf(m,
206 "physical_procs_allocated_to_virtualization=%d\n", 206 "physical_procs_allocated_to_virtualization=%d\n",
207 ppp_data.phys_platform_procs); 207 ppp_data.phys_platform_procs);
@@ -435,7 +435,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
435 int partition_potential_processors; 435 int partition_potential_processors;
436 int partition_active_processors; 436 int partition_active_processors;
437 struct device_node *rtas_node; 437 struct device_node *rtas_node;
438 const int *lrdrp = NULL; 438 const __be32 *lrdrp = NULL;
439 439
440 rtas_node = of_find_node_by_path("/rtas"); 440 rtas_node = of_find_node_by_path("/rtas");
441 if (rtas_node) 441 if (rtas_node)
@@ -444,7 +444,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
444 if (lrdrp == NULL) { 444 if (lrdrp == NULL) {
445 partition_potential_processors = vdso_data->processorCount; 445 partition_potential_processors = vdso_data->processorCount;
446 } else { 446 } else {
447 partition_potential_processors = *(lrdrp + 4); 447 partition_potential_processors = be32_to_cpup(lrdrp + 4);
448 } 448 }
449 of_node_put(rtas_node); 449 of_node_put(rtas_node);
450 450
@@ -654,7 +654,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
654 const char *model = ""; 654 const char *model = "";
655 const char *system_id = ""; 655 const char *system_id = "";
656 const char *tmp; 656 const char *tmp;
657 const unsigned int *lp_index_ptr; 657 const __be32 *lp_index_ptr;
658 unsigned int lp_index = 0; 658 unsigned int lp_index = 0;
659 659
660 seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS); 660 seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS);
@@ -670,7 +670,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
670 lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", 670 lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",
671 NULL); 671 NULL);
672 if (lp_index_ptr) 672 if (lp_index_ptr)
673 lp_index = *lp_index_ptr; 673 lp_index = be32_to_cpup(lp_index_ptr);
674 of_node_put(rootdn); 674 of_node_put(rootdn);
675 } 675 }
676 seq_printf(m, "serial_number=%s\n", system_id); 676 seq_printf(m, "serial_number=%s\n", system_id);
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 6d2f0abce6fa..0c882e83c4ce 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -130,7 +130,8 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
130{ 130{
131 struct device_node *dn; 131 struct device_node *dn;
132 struct pci_dn *pdn; 132 struct pci_dn *pdn;
133 const u32 *req_msi; 133 const __be32 *p;
134 u32 req_msi;
134 135
135 pdn = pci_get_pdn(pdev); 136 pdn = pci_get_pdn(pdev);
136 if (!pdn) 137 if (!pdn)
@@ -138,19 +139,20 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
138 139
139 dn = pdn->node; 140 dn = pdn->node;
140 141
141 req_msi = of_get_property(dn, prop_name, NULL); 142 p = of_get_property(dn, prop_name, NULL);
142 if (!req_msi) { 143 if (!p) {
143 pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name); 144 pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name);
144 return -ENOENT; 145 return -ENOENT;
145 } 146 }
146 147
147 if (*req_msi < nvec) { 148 req_msi = be32_to_cpup(p);
149 if (req_msi < nvec) {
148 pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); 150 pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec);
149 151
150 if (*req_msi == 0) /* Be paranoid */ 152 if (req_msi == 0) /* Be paranoid */
151 return -ENOSPC; 153 return -ENOSPC;
152 154
153 return *req_msi; 155 return req_msi;
154 } 156 }
155 157
156 return 0; 158 return 0;
@@ -171,7 +173,7 @@ static int check_req_msix(struct pci_dev *pdev, int nvec)
171static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) 173static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
172{ 174{
173 struct device_node *dn; 175 struct device_node *dn;
174 const u32 *p; 176 const __be32 *p;
175 177
176 dn = of_node_get(pci_device_to_OF_node(dev)); 178 dn = of_node_get(pci_device_to_OF_node(dev));
177 while (dn) { 179 while (dn) {
@@ -179,7 +181,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
179 if (p) { 181 if (p) {
180 pr_debug("rtas_msi: found prop on dn %s\n", 182 pr_debug("rtas_msi: found prop on dn %s\n",
181 dn->full_name); 183 dn->full_name);
182 *total = *p; 184 *total = be32_to_cpup(p);
183 return dn; 185 return dn;
184 } 186 }
185 187
@@ -232,13 +234,13 @@ struct msi_counts {
232static void *count_non_bridge_devices(struct device_node *dn, void *data) 234static void *count_non_bridge_devices(struct device_node *dn, void *data)
233{ 235{
234 struct msi_counts *counts = data; 236 struct msi_counts *counts = data;
235 const u32 *p; 237 const __be32 *p;
236 u32 class; 238 u32 class;
237 239
238 pr_debug("rtas_msi: counting %s\n", dn->full_name); 240 pr_debug("rtas_msi: counting %s\n", dn->full_name);
239 241
240 p = of_get_property(dn, "class-code", NULL); 242 p = of_get_property(dn, "class-code", NULL);
241 class = p ? *p : 0; 243 class = p ? be32_to_cpup(p) : 0;
242 244
243 if ((class >> 8) != PCI_CLASS_BRIDGE_PCI) 245 if ((class >> 8) != PCI_CLASS_BRIDGE_PCI)
244 counts->num_devices++; 246 counts->num_devices++;
@@ -249,7 +251,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data)
249static void *count_spare_msis(struct device_node *dn, void *data) 251static void *count_spare_msis(struct device_node *dn, void *data)
250{ 252{
251 struct msi_counts *counts = data; 253 struct msi_counts *counts = data;
252 const u32 *p; 254 const __be32 *p;
253 int req; 255 int req;
254 256
255 if (dn == counts->requestor) 257 if (dn == counts->requestor)
@@ -260,11 +262,11 @@ static void *count_spare_msis(struct device_node *dn, void *data)
260 req = 0; 262 req = 0;
261 p = of_get_property(dn, "ibm,req#msi", NULL); 263 p = of_get_property(dn, "ibm,req#msi", NULL);
262 if (p) 264 if (p)
263 req = *p; 265 req = be32_to_cpup(p);
264 266
265 p = of_get_property(dn, "ibm,req#msi-x", NULL); 267 p = of_get_property(dn, "ibm,req#msi-x", NULL);
266 if (p) 268 if (p)
267 req = max(req, (int)*p); 269 req = max(req, (int)be32_to_cpup(p));
268 } 270 }
269 271
270 if (req < counts->quota) 272 if (req < counts->quota)
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 7bfaf58d4664..d7096f2f7751 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -43,8 +43,8 @@ static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */
43static DEFINE_SPINLOCK(nvram_lock); 43static DEFINE_SPINLOCK(nvram_lock);
44 44
45struct err_log_info { 45struct err_log_info {
46 int error_type; 46 __be32 error_type;
47 unsigned int seq_num; 47 __be32 seq_num;
48}; 48};
49 49
50struct nvram_os_partition { 50struct nvram_os_partition {
@@ -79,9 +79,9 @@ static const char *pseries_nvram_os_partitions[] = {
79}; 79};
80 80
81struct oops_log_info { 81struct oops_log_info {
82 u16 version; 82 __be16 version;
83 u16 report_length; 83 __be16 report_length;
84 u64 timestamp; 84 __be64 timestamp;
85} __attribute__((packed)); 85} __attribute__((packed));
86 86
87static void oops_to_nvram(struct kmsg_dumper *dumper, 87static void oops_to_nvram(struct kmsg_dumper *dumper,
@@ -291,8 +291,8 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff,
291 length = part->size; 291 length = part->size;
292 } 292 }
293 293
294 info.error_type = err_type; 294 info.error_type = cpu_to_be32(err_type);
295 info.seq_num = error_log_cnt; 295 info.seq_num = cpu_to_be32(error_log_cnt);
296 296
297 tmp_index = part->index; 297 tmp_index = part->index;
298 298
@@ -364,8 +364,8 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff,
364 } 364 }
365 365
366 if (part->os_partition) { 366 if (part->os_partition) {
367 *error_log_cnt = info.seq_num; 367 *error_log_cnt = be32_to_cpu(info.seq_num);
368 *err_type = info.error_type; 368 *err_type = be32_to_cpu(info.error_type);
369 } 369 }
370 370
371 return 0; 371 return 0;
@@ -529,9 +529,9 @@ static int zip_oops(size_t text_len)
529 pr_err("nvram: logging uncompressed oops/panic report\n"); 529 pr_err("nvram: logging uncompressed oops/panic report\n");
530 return -1; 530 return -1;
531 } 531 }
532 oops_hdr->version = OOPS_HDR_VERSION; 532 oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
533 oops_hdr->report_length = (u16) zipped_len; 533 oops_hdr->report_length = cpu_to_be16(zipped_len);
534 oops_hdr->timestamp = get_seconds(); 534 oops_hdr->timestamp = cpu_to_be64(get_seconds());
535 return 0; 535 return 0;
536} 536}
537 537
@@ -574,9 +574,9 @@ static int nvram_pstore_write(enum pstore_type_id type,
574 clobbering_unread_rtas_event()) 574 clobbering_unread_rtas_event())
575 return -1; 575 return -1;
576 576
577 oops_hdr->version = OOPS_HDR_VERSION; 577 oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
578 oops_hdr->report_length = (u16) size; 578 oops_hdr->report_length = cpu_to_be16(size);
579 oops_hdr->timestamp = get_seconds(); 579 oops_hdr->timestamp = cpu_to_be64(get_seconds());
580 580
581 if (compressed) 581 if (compressed)
582 err_type = ERR_TYPE_KERNEL_PANIC_GZ; 582 err_type = ERR_TYPE_KERNEL_PANIC_GZ;
@@ -670,16 +670,16 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
670 size_t length, hdr_size; 670 size_t length, hdr_size;
671 671
672 oops_hdr = (struct oops_log_info *)buff; 672 oops_hdr = (struct oops_log_info *)buff;
673 if (oops_hdr->version < OOPS_HDR_VERSION) { 673 if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) {
674 /* Old format oops header had 2-byte record size */ 674 /* Old format oops header had 2-byte record size */
675 hdr_size = sizeof(u16); 675 hdr_size = sizeof(u16);
676 length = oops_hdr->version; 676 length = be16_to_cpu(oops_hdr->version);
677 time->tv_sec = 0; 677 time->tv_sec = 0;
678 time->tv_nsec = 0; 678 time->tv_nsec = 0;
679 } else { 679 } else {
680 hdr_size = sizeof(*oops_hdr); 680 hdr_size = sizeof(*oops_hdr);
681 length = oops_hdr->report_length; 681 length = be16_to_cpu(oops_hdr->report_length);
682 time->tv_sec = oops_hdr->timestamp; 682 time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
683 time->tv_nsec = 0; 683 time->tv_nsec = 0;
684 } 684 }
685 *buf = kmalloc(length, GFP_KERNEL); 685 *buf = kmalloc(length, GFP_KERNEL);
@@ -889,13 +889,13 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
889 kmsg_dump_get_buffer(dumper, false, 889 kmsg_dump_get_buffer(dumper, false,
890 oops_data, oops_data_sz, &text_len); 890 oops_data, oops_data_sz, &text_len);
891 err_type = ERR_TYPE_KERNEL_PANIC; 891 err_type = ERR_TYPE_KERNEL_PANIC;
892 oops_hdr->version = OOPS_HDR_VERSION; 892 oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
893 oops_hdr->report_length = (u16) text_len; 893 oops_hdr->report_length = cpu_to_be16(text_len);
894 oops_hdr->timestamp = get_seconds(); 894 oops_hdr->timestamp = cpu_to_be64(get_seconds());
895 } 895 }
896 896
897 (void) nvram_write_os_partition(&oops_log_partition, oops_buf, 897 (void) nvram_write_os_partition(&oops_log_partition, oops_buf,
898 (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type, 898 (int) (sizeof(*oops_hdr) + text_len), err_type,
899 ++oops_count); 899 ++oops_count);
900 900
901 spin_unlock_irqrestore(&lock, flags); 901 spin_unlock_irqrestore(&lock, flags);
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 5f93856cdf47..70670a2d9cf2 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -113,7 +113,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
113{ 113{
114 struct device_node *dn, *pdn; 114 struct device_node *dn, *pdn;
115 struct pci_bus *bus; 115 struct pci_bus *bus;
116 const uint32_t *pcie_link_speed_stats; 116 const __be32 *pcie_link_speed_stats;
117 117
118 bus = bridge->bus; 118 bus = bridge->bus;
119 119
@@ -122,7 +122,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
122 return 0; 122 return 0;
123 123
124 for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { 124 for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
125 pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn, 125 pcie_link_speed_stats = of_get_property(pdn,
126 "ibm,pcie-link-speed-stats", NULL); 126 "ibm,pcie-link-speed-stats", NULL);
127 if (pcie_link_speed_stats) 127 if (pcie_link_speed_stats)
128 break; 128 break;
@@ -135,7 +135,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
135 return 0; 135 return 0;
136 } 136 }
137 137
138 switch (pcie_link_speed_stats[0]) { 138 switch (be32_to_cpup(pcie_link_speed_stats)) {
139 case 0x01: 139 case 0x01:
140 bus->max_bus_speed = PCIE_SPEED_2_5GT; 140 bus->max_bus_speed = PCIE_SPEED_2_5GT;
141 break; 141 break;
@@ -147,7 +147,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
147 break; 147 break;
148 } 148 }
149 149
150 switch (pcie_link_speed_stats[1]) { 150 switch (be32_to_cpup(pcie_link_speed_stats)) {
151 case 0x01: 151 case 0x01:
152 bus->cur_bus_speed = PCIE_SPEED_2_5GT; 152 bus->cur_bus_speed = PCIE_SPEED_2_5GT;
153 break; 153 break;
diff --git a/arch/powerpc/sysdev/ppc4xx_ocm.c b/arch/powerpc/sysdev/ppc4xx_ocm.c
index b7c43453236d..85d9e37f5ccb 100644
--- a/arch/powerpc/sysdev/ppc4xx_ocm.c
+++ b/arch/powerpc/sysdev/ppc4xx_ocm.c
@@ -339,7 +339,7 @@ void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align,
339 if (IS_ERR_VALUE(offset)) 339 if (IS_ERR_VALUE(offset))
340 continue; 340 continue;
341 341
342 ocm_blk = kzalloc(sizeof(struct ocm_block *), GFP_KERNEL); 342 ocm_blk = kzalloc(sizeof(struct ocm_block), GFP_KERNEL);
343 if (!ocm_blk) { 343 if (!ocm_blk) {
344 printk(KERN_ERR "PPC4XX OCM: could not allocate ocm block"); 344 printk(KERN_ERR "PPC4XX OCM: could not allocate ocm block");
345 rh_free(ocm_reg->rh, offset); 345 rh_free(ocm_reg->rh, offset);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 314fced4fc14..e9f312532526 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -101,7 +101,7 @@ config S390
101 select GENERIC_CPU_DEVICES if !SMP 101 select GENERIC_CPU_DEVICES if !SMP
102 select GENERIC_FIND_FIRST_BIT 102 select GENERIC_FIND_FIRST_BIT
103 select GENERIC_SMP_IDLE_THREAD 103 select GENERIC_SMP_IDLE_THREAD
104 select GENERIC_TIME_VSYSCALL_OLD 104 select GENERIC_TIME_VSYSCALL
105 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 105 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
106 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 106 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
107 select HAVE_ARCH_SECCOMP_FILTER 107 select HAVE_ARCH_SECCOMP_FILTER
@@ -135,7 +135,6 @@ config S390
135 select HAVE_SYSCALL_TRACEPOINTS 135 select HAVE_SYSCALL_TRACEPOINTS
136 select HAVE_UID16 if 32BIT 136 select HAVE_UID16 if 32BIT
137 select HAVE_VIRT_CPU_ACCOUNTING 137 select HAVE_VIRT_CPU_ACCOUNTING
138 select INIT_ALL_POSSIBLE
139 select KTIME_SCALAR if 32BIT 138 select KTIME_SCALAR if 32BIT
140 select MODULES_USE_ELF_RELA 139 select MODULES_USE_ELF_RELA
141 select OLD_SIGACTION 140 select OLD_SIGACTION
@@ -347,14 +346,14 @@ config SMP
347 Even if you don't know what to do here, say Y. 346 Even if you don't know what to do here, say Y.
348 347
349config NR_CPUS 348config NR_CPUS
350 int "Maximum number of CPUs (2-64)" 349 int "Maximum number of CPUs (2-256)"
351 range 2 64 350 range 2 256
352 depends on SMP 351 depends on SMP
353 default "32" if !64BIT 352 default "32" if !64BIT
354 default "64" if 64BIT 353 default "64" if 64BIT
355 help 354 help
356 This allows you to specify the maximum number of CPUs which this 355 This allows you to specify the maximum number of CPUs which this
357 kernel will support. The maximum supported value is 64 and the 356 kernel will support. The maximum supported value is 256 and the
358 minimum value which makes sense is 2. 357 minimum value which makes sense is 2.
359 358
360 This is purely to save memory - each supported CPU adds 359 This is purely to save memory - each supported CPU adds
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 46cae138ece2..b3feabd39f31 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -35,7 +35,6 @@ static u8 *ctrblk;
35static char keylen_flag; 35static char keylen_flag;
36 36
37struct s390_aes_ctx { 37struct s390_aes_ctx {
38 u8 iv[AES_BLOCK_SIZE];
39 u8 key[AES_MAX_KEY_SIZE]; 38 u8 key[AES_MAX_KEY_SIZE];
40 long enc; 39 long enc;
41 long dec; 40 long dec;
@@ -56,8 +55,7 @@ struct pcc_param {
56 55
57struct s390_xts_ctx { 56struct s390_xts_ctx {
58 u8 key[32]; 57 u8 key[32];
59 u8 xts_param[16]; 58 u8 pcc_key[32];
60 struct pcc_param pcc;
61 long enc; 59 long enc;
62 long dec; 60 long dec;
63 int key_len; 61 int key_len;
@@ -441,30 +439,36 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
441 return aes_set_key(tfm, in_key, key_len); 439 return aes_set_key(tfm, in_key, key_len);
442} 440}
443 441
444static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param, 442static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
445 struct blkcipher_walk *walk) 443 struct blkcipher_walk *walk)
446{ 444{
445 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
447 int ret = blkcipher_walk_virt(desc, walk); 446 int ret = blkcipher_walk_virt(desc, walk);
448 unsigned int nbytes = walk->nbytes; 447 unsigned int nbytes = walk->nbytes;
448 struct {
449 u8 iv[AES_BLOCK_SIZE];
450 u8 key[AES_MAX_KEY_SIZE];
451 } param;
449 452
450 if (!nbytes) 453 if (!nbytes)
451 goto out; 454 goto out;
452 455
453 memcpy(param, walk->iv, AES_BLOCK_SIZE); 456 memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
457 memcpy(param.key, sctx->key, sctx->key_len);
454 do { 458 do {
455 /* only use complete blocks */ 459 /* only use complete blocks */
456 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); 460 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
457 u8 *out = walk->dst.virt.addr; 461 u8 *out = walk->dst.virt.addr;
458 u8 *in = walk->src.virt.addr; 462 u8 *in = walk->src.virt.addr;
459 463
460 ret = crypt_s390_kmc(func, param, out, in, n); 464 ret = crypt_s390_kmc(func, &param, out, in, n);
461 if (ret < 0 || ret != n) 465 if (ret < 0 || ret != n)
462 return -EIO; 466 return -EIO;
463 467
464 nbytes &= AES_BLOCK_SIZE - 1; 468 nbytes &= AES_BLOCK_SIZE - 1;
465 ret = blkcipher_walk_done(desc, walk, nbytes); 469 ret = blkcipher_walk_done(desc, walk, nbytes);
466 } while ((nbytes = walk->nbytes)); 470 } while ((nbytes = walk->nbytes));
467 memcpy(walk->iv, param, AES_BLOCK_SIZE); 471 memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
468 472
469out: 473out:
470 return ret; 474 return ret;
@@ -481,7 +485,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
481 return fallback_blk_enc(desc, dst, src, nbytes); 485 return fallback_blk_enc(desc, dst, src, nbytes);
482 486
483 blkcipher_walk_init(&walk, dst, src, nbytes); 487 blkcipher_walk_init(&walk, dst, src, nbytes);
484 return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk); 488 return cbc_aes_crypt(desc, sctx->enc, &walk);
485} 489}
486 490
487static int cbc_aes_decrypt(struct blkcipher_desc *desc, 491static int cbc_aes_decrypt(struct blkcipher_desc *desc,
@@ -495,7 +499,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
495 return fallback_blk_dec(desc, dst, src, nbytes); 499 return fallback_blk_dec(desc, dst, src, nbytes);
496 500
497 blkcipher_walk_init(&walk, dst, src, nbytes); 501 blkcipher_walk_init(&walk, dst, src, nbytes);
498 return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk); 502 return cbc_aes_crypt(desc, sctx->dec, &walk);
499} 503}
500 504
501static struct crypto_alg cbc_aes_alg = { 505static struct crypto_alg cbc_aes_alg = {
@@ -586,7 +590,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
586 xts_ctx->enc = KM_XTS_128_ENCRYPT; 590 xts_ctx->enc = KM_XTS_128_ENCRYPT;
587 xts_ctx->dec = KM_XTS_128_DECRYPT; 591 xts_ctx->dec = KM_XTS_128_DECRYPT;
588 memcpy(xts_ctx->key + 16, in_key, 16); 592 memcpy(xts_ctx->key + 16, in_key, 16);
589 memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16); 593 memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
590 break; 594 break;
591 case 48: 595 case 48:
592 xts_ctx->enc = 0; 596 xts_ctx->enc = 0;
@@ -597,7 +601,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
597 xts_ctx->enc = KM_XTS_256_ENCRYPT; 601 xts_ctx->enc = KM_XTS_256_ENCRYPT;
598 xts_ctx->dec = KM_XTS_256_DECRYPT; 602 xts_ctx->dec = KM_XTS_256_DECRYPT;
599 memcpy(xts_ctx->key, in_key, 32); 603 memcpy(xts_ctx->key, in_key, 32);
600 memcpy(xts_ctx->pcc.key, in_key + 32, 32); 604 memcpy(xts_ctx->pcc_key, in_key + 32, 32);
601 break; 605 break;
602 default: 606 default:
603 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 607 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
@@ -616,29 +620,33 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
616 unsigned int nbytes = walk->nbytes; 620 unsigned int nbytes = walk->nbytes;
617 unsigned int n; 621 unsigned int n;
618 u8 *in, *out; 622 u8 *in, *out;
619 void *param; 623 struct pcc_param pcc_param;
624 struct {
625 u8 key[32];
626 u8 init[16];
627 } xts_param;
620 628
621 if (!nbytes) 629 if (!nbytes)
622 goto out; 630 goto out;
623 631
624 memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block)); 632 memset(pcc_param.block, 0, sizeof(pcc_param.block));
625 memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit)); 633 memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
626 memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts)); 634 memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
627 memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak)); 635 memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
628 param = xts_ctx->pcc.key + offset; 636 memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
629 ret = crypt_s390_pcc(func, param); 637 ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
630 if (ret < 0) 638 if (ret < 0)
631 return -EIO; 639 return -EIO;
632 640
633 memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16); 641 memcpy(xts_param.key, xts_ctx->key, 32);
634 param = xts_ctx->key + offset; 642 memcpy(xts_param.init, pcc_param.xts, 16);
635 do { 643 do {
636 /* only use complete blocks */ 644 /* only use complete blocks */
637 n = nbytes & ~(AES_BLOCK_SIZE - 1); 645 n = nbytes & ~(AES_BLOCK_SIZE - 1);
638 out = walk->dst.virt.addr; 646 out = walk->dst.virt.addr;
639 in = walk->src.virt.addr; 647 in = walk->src.virt.addr;
640 648
641 ret = crypt_s390_km(func, param, out, in, n); 649 ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
642 if (ret < 0 || ret != n) 650 if (ret < 0 || ret != n)
643 return -EIO; 651 return -EIO;
644 652
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 316c8503a3b4..114258eeaacd 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -48,33 +48,21 @@ static inline void clear_page(void *page)
48 : "memory", "cc"); 48 : "memory", "cc");
49} 49}
50 50
51/*
52 * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
53 * bypass caches when copying a page. Especially when copying huge pages
54 * this keeps L1 and L2 data caches alive.
55 */
51static inline void copy_page(void *to, void *from) 56static inline void copy_page(void *to, void *from)
52{ 57{
53 if (MACHINE_HAS_MVPG) { 58 register void *reg2 asm ("2") = to;
54 register unsigned long reg0 asm ("0") = 0; 59 register unsigned long reg3 asm ("3") = 0x1000;
55 asm volatile( 60 register void *reg4 asm ("4") = from;
56 " mvpg %0,%1" 61 register unsigned long reg5 asm ("5") = 0xb0001000;
57 : : "a" (to), "a" (from), "d" (reg0) 62 asm volatile(
58 : "memory", "cc"); 63 " mvcl 2,4"
59 } else 64 : "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5)
60 asm volatile( 65 : : "memory", "cc");
61 " mvc 0(256,%0),0(%1)\n"
62 " mvc 256(256,%0),256(%1)\n"
63 " mvc 512(256,%0),512(%1)\n"
64 " mvc 768(256,%0),768(%1)\n"
65 " mvc 1024(256,%0),1024(%1)\n"
66 " mvc 1280(256,%0),1280(%1)\n"
67 " mvc 1536(256,%0),1536(%1)\n"
68 " mvc 1792(256,%0),1792(%1)\n"
69 " mvc 2048(256,%0),2048(%1)\n"
70 " mvc 2304(256,%0),2304(%1)\n"
71 " mvc 2560(256,%0),2560(%1)\n"
72 " mvc 2816(256,%0),2816(%1)\n"
73 " mvc 3072(256,%0),3072(%1)\n"
74 " mvc 3328(256,%0),3328(%1)\n"
75 " mvc 3584(256,%0),3584(%1)\n"
76 " mvc 3840(256,%0),3840(%1)\n"
77 : : "a" (to), "a" (from) : "memory");
78} 66}
79 67
80#define clear_user_page(page, vaddr, pg) clear_page(page) 68#define clear_user_page(page, vaddr, pg) clear_page(page)
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 30ef748bc161..2f390956c7c1 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -8,6 +8,7 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <asm/chpid.h> 10#include <asm/chpid.h>
11#include <asm/cpu.h>
11 12
12#define SCLP_CHP_INFO_MASK_SIZE 32 13#define SCLP_CHP_INFO_MASK_SIZE 32
13 14
@@ -37,7 +38,7 @@ struct sclp_cpu_info {
37 unsigned int standby; 38 unsigned int standby;
38 unsigned int combined; 39 unsigned int combined;
39 int has_cpu_type; 40 int has_cpu_type;
40 struct sclp_cpu_entry cpu[255]; 41 struct sclp_cpu_entry cpu[MAX_CPU_ADDRESS + 1];
41}; 42};
42 43
43int sclp_get_cpu_info(struct sclp_cpu_info *info); 44int sclp_get_cpu_info(struct sclp_cpu_info *info);
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index ac9bed8e103f..160779394096 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -31,6 +31,7 @@ extern void smp_yield(void);
31extern void smp_stop_cpu(void); 31extern void smp_stop_cpu(void);
32extern void smp_cpu_set_polarization(int cpu, int val); 32extern void smp_cpu_set_polarization(int cpu, int val);
33extern int smp_cpu_get_polarization(int cpu); 33extern int smp_cpu_get_polarization(int cpu);
34extern void smp_fill_possible_mask(void);
34 35
35#else /* CONFIG_SMP */ 36#else /* CONFIG_SMP */
36 37
@@ -50,6 +51,7 @@ static inline int smp_vcpu_scheduled(int cpu) { return 1; }
50static inline void smp_yield_cpu(int cpu) { } 51static inline void smp_yield_cpu(int cpu) { }
51static inline void smp_yield(void) { } 52static inline void smp_yield(void) { }
52static inline void smp_stop_cpu(void) { } 53static inline void smp_stop_cpu(void) { }
54static inline void smp_fill_possible_mask(void) { }
53 55
54#endif /* CONFIG_SMP */ 56#endif /* CONFIG_SMP */
55 57
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index a73eb2e1e918..bc9746a7d47c 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -26,8 +26,9 @@ struct vdso_data {
26 __u64 wtom_clock_nsec; /* 0x28 */ 26 __u64 wtom_clock_nsec; /* 0x28 */
27 __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ 27 __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
28 __u32 tz_dsttime; /* Type of dst correction 0x34 */ 28 __u32 tz_dsttime; /* Type of dst correction 0x34 */
29 __u32 ectg_available; 29 __u32 ectg_available; /* ECTG instruction present 0x38 */
30 __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */ 30 __u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */
31 __u32 tk_shift; /* Shift used for xtime_nsec 0x40 */
31}; 32};
32 33
33struct vdso_per_cpu_data { 34struct vdso_per_cpu_data {
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 2416138ebd3e..e4c99a183651 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -65,12 +65,14 @@ int main(void)
65 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); 65 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
66 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); 66 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
67 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); 67 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
68 DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult)); 68 DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult));
69 DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift));
69 DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); 70 DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
70 DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); 71 DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
71 /* constants used by the vdso */ 72 /* constants used by the vdso */
72 DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME); 73 DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
73 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC); 74 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
75 DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
74 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); 76 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
75 BLANK(); 77 BLANK();
76 /* idle data offsets */ 78 /* idle data offsets */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 6e2442978409..95e7ba0fbb7e 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -194,7 +194,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
194 return -EINVAL; 194 return -EINVAL;
195 195
196 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ 196 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
197 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 197 regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
198 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 | 198 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
199 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 | 199 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 |
200 (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE); 200 (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index 4a460c44e17e..813ec7260878 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -78,7 +78,7 @@ PGM_CHECK_DEFAULT /* 34 */
78PGM_CHECK_DEFAULT /* 35 */ 78PGM_CHECK_DEFAULT /* 35 */
79PGM_CHECK_DEFAULT /* 36 */ 79PGM_CHECK_DEFAULT /* 36 */
80PGM_CHECK_DEFAULT /* 37 */ 80PGM_CHECK_DEFAULT /* 37 */
81PGM_CHECK_DEFAULT /* 38 */ 81PGM_CHECK_64BIT(do_dat_exception) /* 38 */
82PGM_CHECK_64BIT(do_dat_exception) /* 39 */ 82PGM_CHECK_64BIT(do_dat_exception) /* 39 */
83PGM_CHECK_64BIT(do_dat_exception) /* 3a */ 83PGM_CHECK_64BIT(do_dat_exception) /* 3a */
84PGM_CHECK_64BIT(do_dat_exception) /* 3b */ 84PGM_CHECK_64BIT(do_dat_exception) /* 3b */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 4444875266ee..0f3d44ecbfc6 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -1023,6 +1023,7 @@ void __init setup_arch(char **cmdline_p)
1023 setup_vmcoreinfo(); 1023 setup_vmcoreinfo();
1024 setup_lowcore(); 1024 setup_lowcore();
1025 1025
1026 smp_fill_possible_mask();
1026 cpu_init(); 1027 cpu_init();
1027 s390_init_cpu_topology(); 1028 s390_init_cpu_topology();
1028 1029
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index fb535874a246..d8fd508ccd1e 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -94,7 +94,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
94 return -EINVAL; 94 return -EINVAL;
95 95
96 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ 96 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
97 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 97 regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
98 (user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI)); 98 (user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
99 /* Check for invalid user address space control. */ 99 /* Check for invalid user address space control. */
100 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME) 100 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index dc4a53465060..958704798f4a 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -721,18 +721,14 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
721 return 0; 721 return 0;
722} 722}
723 723
724static int __init setup_possible_cpus(char *s) 724static unsigned int setup_possible_cpus __initdata;
725{
726 int max, cpu;
727 725
728 if (kstrtoint(s, 0, &max) < 0) 726static int __init _setup_possible_cpus(char *s)
729 return 0; 727{
730 init_cpu_possible(cpumask_of(0)); 728 get_option(&s, &setup_possible_cpus);
731 for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++)
732 set_cpu_possible(cpu, true);
733 return 0; 729 return 0;
734} 730}
735early_param("possible_cpus", setup_possible_cpus); 731early_param("possible_cpus", _setup_possible_cpus);
736 732
737#ifdef CONFIG_HOTPLUG_CPU 733#ifdef CONFIG_HOTPLUG_CPU
738 734
@@ -775,6 +771,17 @@ void __noreturn cpu_die(void)
775 771
776#endif /* CONFIG_HOTPLUG_CPU */ 772#endif /* CONFIG_HOTPLUG_CPU */
777 773
774void __init smp_fill_possible_mask(void)
775{
776 unsigned int possible, cpu;
777
778 possible = setup_possible_cpus;
779 if (!possible)
780 possible = MACHINE_IS_VM ? 64 : nr_cpu_ids;
781 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
782 set_cpu_possible(cpu, true);
783}
784
778void __init smp_prepare_cpus(unsigned int max_cpus) 785void __init smp_prepare_cpus(unsigned int max_cpus)
779{ 786{
780 /* request the 0x1201 emergency signal external interrupt */ 787 /* request the 0x1201 emergency signal external interrupt */
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 064c3082ab33..dd95f1631621 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -108,20 +108,10 @@ static void fixup_clock_comparator(unsigned long long delta)
108 set_clock_comparator(S390_lowcore.clock_comparator); 108 set_clock_comparator(S390_lowcore.clock_comparator);
109} 109}
110 110
111static int s390_next_ktime(ktime_t expires, 111static int s390_next_event(unsigned long delta,
112 struct clock_event_device *evt) 112 struct clock_event_device *evt)
113{ 113{
114 struct timespec ts; 114 S390_lowcore.clock_comparator = get_tod_clock() + delta;
115 u64 nsecs;
116
117 ts.tv_sec = ts.tv_nsec = 0;
118 monotonic_to_bootbased(&ts);
119 nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
120 do_div(nsecs, 125);
121 S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
122 /* Program the maximum value if we have an overflow (== year 2042) */
123 if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
124 S390_lowcore.clock_comparator = -1ULL;
125 set_clock_comparator(S390_lowcore.clock_comparator); 115 set_clock_comparator(S390_lowcore.clock_comparator);
126 return 0; 116 return 0;
127} 117}
@@ -146,15 +136,14 @@ void init_cpu_timer(void)
146 cpu = smp_processor_id(); 136 cpu = smp_processor_id();
147 cd = &per_cpu(comparators, cpu); 137 cd = &per_cpu(comparators, cpu);
148 cd->name = "comparator"; 138 cd->name = "comparator";
149 cd->features = CLOCK_EVT_FEAT_ONESHOT | 139 cd->features = CLOCK_EVT_FEAT_ONESHOT;
150 CLOCK_EVT_FEAT_KTIME;
151 cd->mult = 16777; 140 cd->mult = 16777;
152 cd->shift = 12; 141 cd->shift = 12;
153 cd->min_delta_ns = 1; 142 cd->min_delta_ns = 1;
154 cd->max_delta_ns = LONG_MAX; 143 cd->max_delta_ns = LONG_MAX;
155 cd->rating = 400; 144 cd->rating = 400;
156 cd->cpumask = cpumask_of(cpu); 145 cd->cpumask = cpumask_of(cpu);
157 cd->set_next_ktime = s390_next_ktime; 146 cd->set_next_event = s390_next_event;
158 cd->set_mode = s390_set_mode; 147 cd->set_mode = s390_set_mode;
159 148
160 clockevents_register_device(cd); 149 clockevents_register_device(cd);
@@ -221,21 +210,30 @@ struct clocksource * __init clocksource_default_clock(void)
221 return &clocksource_tod; 210 return &clocksource_tod;
222} 211}
223 212
224void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, 213void update_vsyscall(struct timekeeper *tk)
225 struct clocksource *clock, u32 mult)
226{ 214{
227 if (clock != &clocksource_tod) 215 u64 nsecps;
216
217 if (tk->clock != &clocksource_tod)
228 return; 218 return;
229 219
230 /* Make userspace gettimeofday spin until we're done. */ 220 /* Make userspace gettimeofday spin until we're done. */
231 ++vdso_data->tb_update_count; 221 ++vdso_data->tb_update_count;
232 smp_wmb(); 222 smp_wmb();
233 vdso_data->xtime_tod_stamp = clock->cycle_last; 223 vdso_data->xtime_tod_stamp = tk->clock->cycle_last;
234 vdso_data->xtime_clock_sec = wall_time->tv_sec; 224 vdso_data->xtime_clock_sec = tk->xtime_sec;
235 vdso_data->xtime_clock_nsec = wall_time->tv_nsec; 225 vdso_data->xtime_clock_nsec = tk->xtime_nsec;
236 vdso_data->wtom_clock_sec = wtm->tv_sec; 226 vdso_data->wtom_clock_sec =
237 vdso_data->wtom_clock_nsec = wtm->tv_nsec; 227 tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
238 vdso_data->ntp_mult = mult; 228 vdso_data->wtom_clock_nsec = tk->xtime_nsec +
229 + (tk->wall_to_monotonic.tv_nsec << tk->shift);
230 nsecps = (u64) NSEC_PER_SEC << tk->shift;
231 while (vdso_data->wtom_clock_nsec >= nsecps) {
232 vdso_data->wtom_clock_nsec -= nsecps;
233 vdso_data->wtom_clock_sec++;
234 }
235 vdso_data->tk_mult = tk->mult;
236 vdso_data->tk_shift = tk->shift;
239 smp_wmb(); 237 smp_wmb();
240 ++vdso_data->tb_update_count; 238 ++vdso_data->tb_update_count;
241} 239}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index a84476f2a9bb..613649096783 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -125,7 +125,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
125 psal[i] = 0x80000000; 125 psal[i] = 0x80000000;
126 126
127 lowcore->paste[4] = (u32)(addr_t) psal; 127 lowcore->paste[4] = (u32)(addr_t) psal;
128 psal[0] = 0x20000000; 128 psal[0] = 0x02000000;
129 psal[2] = (u32)(addr_t) aste; 129 psal[2] = (u32)(addr_t) aste;
130 *(unsigned long *) (aste + 2) = segment_table + 130 *(unsigned long *) (aste + 2) = segment_table +
131 _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT; 131 _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index b2224e0b974c..65fc3979c2f1 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -38,25 +38,21 @@ __kernel_clock_gettime:
38 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 38 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
39 brc 3,2f 39 brc 3,2f
40 ahi %r0,-1 40 ahi %r0,-1
412: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ 412: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
42 lr %r2,%r0 42 lr %r2,%r0
43 l %r0,__VDSO_NTP_MULT(%r5) 43 l %r0,__VDSO_TK_MULT(%r5)
44 ltr %r1,%r1 44 ltr %r1,%r1
45 mr %r0,%r0 45 mr %r0,%r0
46 jnm 3f 46 jnm 3f
47 a %r0,__VDSO_NTP_MULT(%r5) 47 a %r0,__VDSO_TK_MULT(%r5)
483: alr %r0,%r2 483: alr %r0,%r2
49 srdl %r0,12 49 al %r0,__VDSO_WTOM_NSEC(%r5)
50 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
51 al %r1,__VDSO_XTIME_NSEC+4(%r5)
52 brc 12,4f
53 ahi %r0,1
544: l %r2,__VDSO_XTIME_SEC+4(%r5)
55 al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
56 al %r1,__VDSO_WTOM_NSEC+4(%r5) 50 al %r1,__VDSO_WTOM_NSEC+4(%r5)
57 brc 12,5f 51 brc 12,5f
58 ahi %r0,1 52 ahi %r0,1
595: al %r2,__VDSO_WTOM_SEC+4(%r5) 535: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
54 srdl %r0,0(%r2) /* >> tk->shift */
55 l %r2,__VDSO_WTOM_SEC+4(%r5)
60 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 56 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
61 jne 1b 57 jne 1b
62 basr %r5,0 58 basr %r5,0
@@ -86,20 +82,21 @@ __kernel_clock_gettime:
86 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 82 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
87 brc 3,12f 83 brc 3,12f
88 ahi %r0,-1 84 ahi %r0,-1
8912: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ 8512: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
90 lr %r2,%r0 86 lr %r2,%r0
91 l %r0,__VDSO_NTP_MULT(%r5) 87 l %r0,__VDSO_TK_MULT(%r5)
92 ltr %r1,%r1 88 ltr %r1,%r1
93 mr %r0,%r0 89 mr %r0,%r0
94 jnm 13f 90 jnm 13f
95 a %r0,__VDSO_NTP_MULT(%r5) 91 a %r0,__VDSO_TK_MULT(%r5)
9613: alr %r0,%r2 9213: alr %r0,%r2
97 srdl %r0,12 93 al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
98 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
99 al %r1,__VDSO_XTIME_NSEC+4(%r5) 94 al %r1,__VDSO_XTIME_NSEC+4(%r5)
100 brc 12,14f 95 brc 12,14f
101 ahi %r0,1 96 ahi %r0,1
10214: l %r2,__VDSO_XTIME_SEC+4(%r5) 9714: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
98 srdl %r0,0(%r2) /* >> tk->shift */
99 l %r2,__VDSO_XTIME_SEC+4(%r5)
103 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 100 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
104 jne 11b 101 jne 11b
105 basr %r5,0 102 basr %r5,0
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index 2d3633175e3b..fd621a950f7c 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -35,15 +35,14 @@ __kernel_gettimeofday:
35 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 35 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
36 brc 3,3f 36 brc 3,3f
37 ahi %r0,-1 37 ahi %r0,-1
383: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ 383: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
39 st %r0,24(%r15) 39 st %r0,24(%r15)
40 l %r0,__VDSO_NTP_MULT(%r5) 40 l %r0,__VDSO_TK_MULT(%r5)
41 ltr %r1,%r1 41 ltr %r1,%r1
42 mr %r0,%r0 42 mr %r0,%r0
43 jnm 4f 43 jnm 4f
44 a %r0,__VDSO_NTP_MULT(%r5) 44 a %r0,__VDSO_TK_MULT(%r5)
454: al %r0,24(%r15) 454: al %r0,24(%r15)
46 srdl %r0,12
47 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 46 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
48 al %r1,__VDSO_XTIME_NSEC+4(%r5) 47 al %r1,__VDSO_XTIME_NSEC+4(%r5)
49 brc 12,5f 48 brc 12,5f
@@ -51,6 +50,8 @@ __kernel_gettimeofday:
515: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5) 505: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5)
52 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 51 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
53 jne 1b 52 jne 1b
53 l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
54 srdl %r0,0(%r4) /* >> tk->shift */
54 l %r4,24(%r15) /* get tv_sec from stack */ 55 l %r4,24(%r15) /* get tv_sec from stack */
55 basr %r5,0 56 basr %r5,0
566: ltr %r0,%r0 576: ltr %r0,%r0
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
index 176e1f75f9aa..34deba7c7ed1 100644
--- a/arch/s390/kernel/vdso64/clock_getres.S
+++ b/arch/s390/kernel/vdso64/clock_getres.S
@@ -23,7 +23,9 @@ __kernel_clock_getres:
23 je 0f 23 je 0f
24 cghi %r2,__CLOCK_MONOTONIC 24 cghi %r2,__CLOCK_MONOTONIC
25 je 0f 25 je 0f
26 cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */ 26 cghi %r2,__CLOCK_THREAD_CPUTIME_ID
27 je 0f
28 cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
27 jne 2f 29 jne 2f
28 larl %r5,_vdso_data 30 larl %r5,_vdso_data
29 icm %r0,15,__LC_ECTG_OK(%r5) 31 icm %r0,15,__LC_ECTG_OK(%r5)
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index d46c95ed5f19..91940ed33a4a 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -22,7 +22,9 @@ __kernel_clock_gettime:
22 larl %r5,_vdso_data 22 larl %r5,_vdso_data
23 cghi %r2,__CLOCK_REALTIME 23 cghi %r2,__CLOCK_REALTIME
24 je 4f 24 je 4f
25 cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */ 25 cghi %r2,__CLOCK_THREAD_CPUTIME_ID
26 je 9f
27 cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
26 je 9f 28 je 9f
27 cghi %r2,__CLOCK_MONOTONIC 29 cghi %r2,__CLOCK_MONOTONIC
28 jne 12f 30 jne 12f
@@ -34,14 +36,13 @@ __kernel_clock_gettime:
34 tmll %r4,0x0001 /* pending update ? loop */ 36 tmll %r4,0x0001 /* pending update ? loop */
35 jnz 0b 37 jnz 0b
36 stck 48(%r15) /* Store TOD clock */ 38 stck 48(%r15) /* Store TOD clock */
39 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
40 lg %r0,__VDSO_WTOM_SEC(%r5)
37 lg %r1,48(%r15) 41 lg %r1,48(%r15)
38 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 42 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
39 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ 43 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
40 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 44 alg %r1,__VDSO_WTOM_NSEC(%r5)
41 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 45 srlg %r1,%r1,0(%r2) /* >> tk->shift */
42 lg %r0,__VDSO_XTIME_SEC(%r5)
43 alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
44 alg %r0,__VDSO_WTOM_SEC(%r5)
45 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ 46 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
46 jne 0b 47 jne 0b
47 larl %r5,13f 48 larl %r5,13f
@@ -62,12 +63,13 @@ __kernel_clock_gettime:
62 tmll %r4,0x0001 /* pending update ? loop */ 63 tmll %r4,0x0001 /* pending update ? loop */
63 jnz 5b 64 jnz 5b
64 stck 48(%r15) /* Store TOD clock */ 65 stck 48(%r15) /* Store TOD clock */
66 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
65 lg %r1,48(%r15) 67 lg %r1,48(%r15)
66 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 68 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
67 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ 69 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
68 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 70 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
69 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 71 srlg %r1,%r1,0(%r2) /* >> tk->shift */
70 lg %r0,__VDSO_XTIME_SEC(%r5) 72 lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
71 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ 73 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
72 jne 5b 74 jne 5b
73 larl %r5,13f 75 larl %r5,13f
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index 36ee674722ec..d0860d1d0ccc 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -31,12 +31,13 @@ __kernel_gettimeofday:
31 stck 48(%r15) /* Store TOD clock */ 31 stck 48(%r15) /* Store TOD clock */
32 lg %r1,48(%r15) 32 lg %r1,48(%r15)
33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
34 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ 34 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
35 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 35 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
36 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */ 36 lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
37 lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */
38 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ 37 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
39 jne 0b 38 jne 0b
39 lgf %r5,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
40 srlg %r1,%r1,0(%r5) /* >> tk->shift */
40 larl %r5,5f 41 larl %r5,5f
412: clg %r1,0(%r5) 422: clg %r1,0(%r5)
42 jl 3f 43 jl 3f
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 97e03caf7825..dbdab3e7a1a6 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -78,11 +78,14 @@ static size_t copy_in_kernel(size_t count, void __user *to,
78 * contains the (negative) exception code. 78 * contains the (negative) exception code.
79 */ 79 */
80#ifdef CONFIG_64BIT 80#ifdef CONFIG_64BIT
81
81static unsigned long follow_table(struct mm_struct *mm, 82static unsigned long follow_table(struct mm_struct *mm,
82 unsigned long address, int write) 83 unsigned long address, int write)
83{ 84{
84 unsigned long *table = (unsigned long *)__pa(mm->pgd); 85 unsigned long *table = (unsigned long *)__pa(mm->pgd);
85 86
87 if (unlikely(address > mm->context.asce_limit - 1))
88 return -0x38UL;
86 switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { 89 switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
87 case _ASCE_TYPE_REGION1: 90 case _ASCE_TYPE_REGION1:
88 table = table + ((address >> 53) & 0x7ff); 91 table = table + ((address >> 53) & 0x7ff);
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 800f064b0da7..069607209a30 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -75,6 +75,7 @@ void zpci_event_availability(void *data)
75 if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED) 75 if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED)
76 break; 76 break;
77 zdev->state = ZPCI_FN_STATE_CONFIGURED; 77 zdev->state = ZPCI_FN_STATE_CONFIGURED;
78 zdev->fh = ccdf->fh;
78 ret = zpci_enable_device(zdev); 79 ret = zpci_enable_device(zdev);
79 if (ret) 80 if (ret)
80 break; 81 break;
@@ -101,6 +102,7 @@ void zpci_event_availability(void *data)
101 if (pdev) 102 if (pdev)
102 pci_stop_and_remove_bus_device(pdev); 103 pci_stop_and_remove_bus_device(pdev);
103 104
105 zdev->fh = ccdf->fh;
104 zpci_disable_device(zdev); 106 zpci_disable_device(zdev);
105 zdev->state = ZPCI_FN_STATE_STANDBY; 107 zdev->state = ZPCI_FN_STATE_STANDBY;
106 break; 108 break;
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index 2a0a596ebf67..d77f2f6c7ff0 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -20,6 +20,11 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
20EXPORT_SYMBOL(copy_page); 20EXPORT_SYMBOL(copy_page);
21EXPORT_SYMBOL(__clear_user); 21EXPORT_SYMBOL(__clear_user);
22EXPORT_SYMBOL(empty_zero_page); 22EXPORT_SYMBOL(empty_zero_page);
23#ifdef CONFIG_FLATMEM
24/* need in pfn_valid macro */
25EXPORT_SYMBOL(min_low_pfn);
26EXPORT_SYMBOL(max_low_pfn);
27#endif
23 28
24#define DECLARE_EXPORT(name) \ 29#define DECLARE_EXPORT(name) \
25 extern void name(void);EXPORT_SYMBOL(name) 30 extern void name(void);EXPORT_SYMBOL(name)
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile
index 7b95f29e3174..3baff31e58cf 100644
--- a/arch/sh/lib/Makefile
+++ b/arch/sh/lib/Makefile
@@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \
6 checksum.o strlen.o div64.o div64-generic.o 6 checksum.o strlen.o div64.o div64-generic.o
7 7
8# Extracted from libgcc 8# Extracted from libgcc
9lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ 9obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
10 ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ 10 ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
11 udiv_qrnnd.o 11 udiv_qrnnd.o
12 12
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 8358dc144959..0f9e94537eee 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte)
619} 619}
620 620
621#define pte_accessible pte_accessible 621#define pte_accessible pte_accessible
622static inline unsigned long pte_accessible(pte_t a) 622static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
623{ 623{
624 return pte_val(a) & _PAGE_VALID; 624 return pte_val(a) & _PAGE_VALID;
625} 625}
@@ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
847 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U 847 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
848 * and SUN4V pte layout, so this inline test is fine. 848 * and SUN4V pte layout, so this inline test is fine.
849 */ 849 */
850 if (likely(mm != &init_mm) && pte_accessible(orig)) 850 if (likely(mm != &init_mm) && pte_accessible(mm, orig))
851 tlb_batch_add(mm, addr, ptep, orig, fullmm); 851 tlb_batch_add(mm, addr, ptep, orig, fullmm);
852} 852}
853 853
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index e562d3caee57..ad7e178337f1 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -262,8 +262,8 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long);
262extern __must_check long strlen_user(const char __user *str); 262extern __must_check long strlen_user(const char __user *str);
263extern __must_check long strnlen_user(const char __user *str, long n); 263extern __must_check long strnlen_user(const char __user *str, long n);
264 264
265#define __copy_to_user_inatomic ___copy_to_user 265#define __copy_to_user_inatomic __copy_to_user
266#define __copy_from_user_inatomic ___copy_from_user 266#define __copy_from_user_inatomic __copy_from_user
267 267
268struct pt_regs; 268struct pt_regs;
269extern unsigned long compute_effective_address(struct pt_regs *, 269extern unsigned long compute_effective_address(struct pt_regs *,
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 070ed141aac7..76663b019eb5 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -854,7 +854,7 @@ int dma_supported(struct device *dev, u64 device_mask)
854 return 1; 854 return 1;
855 855
856#ifdef CONFIG_PCI 856#ifdef CONFIG_PCI
857 if (dev->bus == &pci_bus_type) 857 if (dev_is_pci(dev))
858 return pci64_dma_supported(to_pci_dev(dev), device_mask); 858 return pci64_dma_supported(to_pci_dev(dev), device_mask);
859#endif 859#endif
860 860
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 2096468de9b2..e7e215dfa866 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -666,10 +666,9 @@ EXPORT_SYMBOL(dma_ops);
666 */ 666 */
667int dma_supported(struct device *dev, u64 mask) 667int dma_supported(struct device *dev, u64 mask)
668{ 668{
669#ifdef CONFIG_PCI 669 if (dev_is_pci(dev))
670 if (dev->bus == &pci_bus_type)
671 return 1; 670 return 1;
672#endif 671
673 return 0; 672 return 0;
674} 673}
675EXPORT_SYMBOL(dma_supported); 674EXPORT_SYMBOL(dma_supported);
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index 60b19f50c80a..b45fe3fb4d2c 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -6,6 +6,7 @@
6#include <linux/kgdb.h> 6#include <linux/kgdb.h>
7#include <linux/kdebug.h> 7#include <linux/kdebug.h>
8#include <linux/ftrace.h> 8#include <linux/ftrace.h>
9#include <linux/context_tracking.h>
9 10
10#include <asm/cacheflush.h> 11#include <asm/cacheflush.h>
11#include <asm/kdebug.h> 12#include <asm/kdebug.h>
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index b66a5338231e..b085311dcd0e 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -123,11 +123,12 @@ void smp_callin(void)
123 rmb(); 123 rmb();
124 124
125 set_cpu_online(cpuid, true); 125 set_cpu_online(cpuid, true);
126 local_irq_enable();
127 126
128 /* idle thread is expected to have preempt disabled */ 127 /* idle thread is expected to have preempt disabled */
129 preempt_disable(); 128 preempt_disable();
130 129
130 local_irq_enable();
131
131 cpu_startup_entry(CPUHP_ONLINE); 132 cpu_startup_entry(CPUHP_ONLINE);
132} 133}
133 134
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 48d92bbe62e9..36e658a4291c 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -33,12 +33,11 @@ MODE_INCLUDE += -I$(srctree)/$(ARCH_DIR)/include/shared/skas
33 33
34HEADER_ARCH := $(SUBARCH) 34HEADER_ARCH := $(SUBARCH)
35 35
36# Additional ARCH settings for x86 36ifneq ($(filter $(SUBARCH),x86 x86_64 i386),)
37ifeq ($(SUBARCH),i386) 37 HEADER_ARCH := x86
38 HEADER_ARCH := x86
39endif 38endif
40ifeq ($(SUBARCH),x86_64) 39
41 HEADER_ARCH := x86 40ifdef CONFIG_64BIT
42 KBUILD_CFLAGS += -mcmodel=large 41 KBUILD_CFLAGS += -mcmodel=large
43endif 42endif
44 43
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c
index 4d6fdf68edf3..799d7e413bf5 100644
--- a/arch/um/kernel/sysrq.c
+++ b/arch/um/kernel/sysrq.c
@@ -19,7 +19,7 @@ struct stack_frame {
19 unsigned long return_address; 19 unsigned long return_address;
20}; 20};
21 21
22static void print_stack_trace(unsigned long *sp, unsigned long bp) 22static void do_stack_trace(unsigned long *sp, unsigned long bp)
23{ 23{
24 int reliable; 24 int reliable;
25 unsigned long addr; 25 unsigned long addr;
@@ -94,5 +94,5 @@ void show_stack(struct task_struct *task, unsigned long *stack)
94 } 94 }
95 printk(KERN_CONT "\n"); 95 printk(KERN_CONT "\n");
96 96
97 print_stack_trace(sp, bp); 97 do_stack_trace(sp, bp);
98} 98}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e903c71f7e69..0952ecd60eca 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -26,6 +26,7 @@ config X86
26 select HAVE_AOUT if X86_32 26 select HAVE_AOUT if X86_32
27 select HAVE_UNSTABLE_SCHED_CLOCK 27 select HAVE_UNSTABLE_SCHED_CLOCK
28 select ARCH_SUPPORTS_NUMA_BALANCING 28 select ARCH_SUPPORTS_NUMA_BALANCING
29 select ARCH_SUPPORTS_INT128 if X86_64
29 select ARCH_WANTS_PROT_NUMA_PROT_NONE 30 select ARCH_WANTS_PROT_NUMA_PROT_NONE
30 select HAVE_IDE 31 select HAVE_IDE
31 select HAVE_OPROFILE 32 select HAVE_OPROFILE
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 41250fb33985..57d021507120 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -31,6 +31,9 @@ ifeq ($(CONFIG_X86_32),y)
31 31
32 KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return 32 KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
33 33
34 # Don't autogenerate MMX or SSE instructions
35 KBUILD_CFLAGS += -mno-mmx -mno-sse
36
34 # Never want PIC in a 32-bit kernel, prevent breakage with GCC built 37 # Never want PIC in a 32-bit kernel, prevent breakage with GCC built
35 # with nonstandard options 38 # with nonstandard options
36 KBUILD_CFLAGS += -fno-pic 39 KBUILD_CFLAGS += -fno-pic
@@ -57,8 +60,11 @@ else
57 KBUILD_AFLAGS += -m64 60 KBUILD_AFLAGS += -m64
58 KBUILD_CFLAGS += -m64 61 KBUILD_CFLAGS += -m64
59 62
63 # Don't autogenerate MMX or SSE instructions
64 KBUILD_CFLAGS += -mno-mmx -mno-sse
65
60 # Use -mpreferred-stack-boundary=3 if supported. 66 # Use -mpreferred-stack-boundary=3 if supported.
61 KBUILD_CFLAGS += $(call cc-option,-mno-sse -mpreferred-stack-boundary=3) 67 KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
62 68
63 # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu) 69 # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
64 cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8) 70 cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index dce69a256896..d9c11956fce0 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -53,18 +53,18 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
53 53
54# How to compile the 16-bit code. Note we always compile for -march=i386, 54# How to compile the 16-bit code. Note we always compile for -march=i386,
55# that way we can complain to the user if the CPU is insufficient. 55# that way we can complain to the user if the CPU is insufficient.
56KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \ 56KBUILD_CFLAGS := $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
57 -DDISABLE_BRANCH_PROFILING \ 57 -DDISABLE_BRANCH_PROFILING \
58 -Wall -Wstrict-prototypes \ 58 -Wall -Wstrict-prototypes \
59 -march=i386 -mregparm=3 \ 59 -march=i386 -mregparm=3 \
60 -include $(srctree)/$(src)/code16gcc.h \ 60 -include $(srctree)/$(src)/code16gcc.h \
61 -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ 61 -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
62 -mno-mmx -mno-sse \
62 $(call cc-option, -ffreestanding) \ 63 $(call cc-option, -ffreestanding) \
63 $(call cc-option, -fno-toplevel-reorder,\ 64 $(call cc-option, -fno-toplevel-reorder,\
64 $(call cc-option, -fno-unit-at-a-time)) \ 65 $(call cc-option, -fno-unit-at-a-time)) \
65 $(call cc-option, -fno-stack-protector) \ 66 $(call cc-option, -fno-stack-protector) \
66 $(call cc-option, -mpreferred-stack-boundary=2) 67 $(call cc-option, -mpreferred-stack-boundary=2)
67KBUILD_CFLAGS += $(call cc-option, -m32)
68KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ 68KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
69GCOV_PROFILE := n 69GCOV_PROFILE := n
70 70
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index dcd90df10ab4..c8a6792e7842 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -13,6 +13,7 @@ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
13cflags-$(CONFIG_X86_32) := -march=i386 13cflags-$(CONFIG_X86_32) := -march=i386
14cflags-$(CONFIG_X86_64) := -mcmodel=small 14cflags-$(CONFIG_X86_64) := -mcmodel=small
15KBUILD_CFLAGS += $(cflags-y) 15KBUILD_CFLAGS += $(cflags-y)
16KBUILD_CFLAGS += -mno-mmx -mno-sse
16KBUILD_CFLAGS += $(call cc-option,-ffreestanding) 17KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
17KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) 18KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
18 19
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 7d6ba9db1be9..e0fc24db234a 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -3,8 +3,9 @@
3# 3#
4 4
5avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no) 5avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no)
6avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
7 $(comma)4)$(comma)%ymm2,yes,no)
6 8
7obj-$(CONFIG_CRYPTO_ABLK_HELPER_X86) += ablk_helper.o
8obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o 9obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o
9 10
10obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o 11obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index f80e668785c0..835488b745ee 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -34,7 +34,7 @@
34#include <asm/cpu_device_id.h> 34#include <asm/cpu_device_id.h>
35#include <asm/i387.h> 35#include <asm/i387.h>
36#include <asm/crypto/aes.h> 36#include <asm/crypto/aes.h>
37#include <asm/crypto/ablk_helper.h> 37#include <crypto/ablk_helper.h>
38#include <crypto/scatterwalk.h> 38#include <crypto/scatterwalk.h>
39#include <crypto/internal/aead.h> 39#include <crypto/internal/aead.h>
40#include <linux/workqueue.h> 40#include <linux/workqueue.h>
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
index 414fe5d7946b..4209a76fcdaa 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
@@ -14,6 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <crypto/ablk_helper.h>
17#include <crypto/algapi.h> 18#include <crypto/algapi.h>
18#include <crypto/ctr.h> 19#include <crypto/ctr.h>
19#include <crypto/lrw.h> 20#include <crypto/lrw.h>
@@ -21,7 +22,6 @@
21#include <asm/xcr.h> 22#include <asm/xcr.h>
22#include <asm/xsave.h> 23#include <asm/xsave.h>
23#include <asm/crypto/camellia.h> 24#include <asm/crypto/camellia.h>
24#include <asm/crypto/ablk_helper.h>
25#include <asm/crypto/glue_helper.h> 25#include <asm/crypto/glue_helper.h>
26 26
27#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 27#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index 37fd0c0a81ea..87a041a10f4a 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -14,6 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <crypto/ablk_helper.h>
17#include <crypto/algapi.h> 18#include <crypto/algapi.h>
18#include <crypto/ctr.h> 19#include <crypto/ctr.h>
19#include <crypto/lrw.h> 20#include <crypto/lrw.h>
@@ -21,7 +22,6 @@
21#include <asm/xcr.h> 22#include <asm/xcr.h>
22#include <asm/xsave.h> 23#include <asm/xsave.h>
23#include <asm/crypto/camellia.h> 24#include <asm/crypto/camellia.h>
24#include <asm/crypto/ablk_helper.h>
25#include <asm/crypto/glue_helper.h> 25#include <asm/crypto/glue_helper.h>
26 26
27#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 27#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index c6631813dc11..e6a3700489b9 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -26,13 +26,13 @@
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/crypto.h> 27#include <linux/crypto.h>
28#include <linux/err.h> 28#include <linux/err.h>
29#include <crypto/ablk_helper.h>
29#include <crypto/algapi.h> 30#include <crypto/algapi.h>
30#include <crypto/cast5.h> 31#include <crypto/cast5.h>
31#include <crypto/cryptd.h> 32#include <crypto/cryptd.h>
32#include <crypto/ctr.h> 33#include <crypto/ctr.h>
33#include <asm/xcr.h> 34#include <asm/xcr.h>
34#include <asm/xsave.h> 35#include <asm/xsave.h>
35#include <asm/crypto/ablk_helper.h>
36#include <asm/crypto/glue_helper.h> 36#include <asm/crypto/glue_helper.h>
37 37
38#define CAST5_PARALLEL_BLOCKS 16 38#define CAST5_PARALLEL_BLOCKS 16
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index 8d0dfb86a559..09f3677393e4 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -28,6 +28,7 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/crypto.h> 29#include <linux/crypto.h>
30#include <linux/err.h> 30#include <linux/err.h>
31#include <crypto/ablk_helper.h>
31#include <crypto/algapi.h> 32#include <crypto/algapi.h>
32#include <crypto/cast6.h> 33#include <crypto/cast6.h>
33#include <crypto/cryptd.h> 34#include <crypto/cryptd.h>
@@ -37,7 +38,6 @@
37#include <crypto/xts.h> 38#include <crypto/xts.h>
38#include <asm/xcr.h> 39#include <asm/xcr.h>
39#include <asm/xsave.h> 40#include <asm/xsave.h>
40#include <asm/crypto/ablk_helper.h>
41#include <asm/crypto/glue_helper.h> 41#include <asm/crypto/glue_helper.h>
42 42
43#define CAST6_PARALLEL_BLOCKS 8 43#define CAST6_PARALLEL_BLOCKS 8
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index 23aabc6c20a5..2fae489b1524 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -14,6 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <crypto/ablk_helper.h>
17#include <crypto/algapi.h> 18#include <crypto/algapi.h>
18#include <crypto/ctr.h> 19#include <crypto/ctr.h>
19#include <crypto/lrw.h> 20#include <crypto/lrw.h>
@@ -22,7 +23,6 @@
22#include <asm/xcr.h> 23#include <asm/xcr.h>
23#include <asm/xsave.h> 24#include <asm/xsave.h>
24#include <asm/crypto/serpent-avx.h> 25#include <asm/crypto/serpent-avx.h>
25#include <asm/crypto/ablk_helper.h>
26#include <asm/crypto/glue_helper.h> 26#include <asm/crypto/glue_helper.h>
27 27
28#define SERPENT_AVX2_PARALLEL_BLOCKS 16 28#define SERPENT_AVX2_PARALLEL_BLOCKS 16
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index 9ae83cf8d21e..ff4870870972 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -28,6 +28,7 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/crypto.h> 29#include <linux/crypto.h>
30#include <linux/err.h> 30#include <linux/err.h>
31#include <crypto/ablk_helper.h>
31#include <crypto/algapi.h> 32#include <crypto/algapi.h>
32#include <crypto/serpent.h> 33#include <crypto/serpent.h>
33#include <crypto/cryptd.h> 34#include <crypto/cryptd.h>
@@ -38,7 +39,6 @@
38#include <asm/xcr.h> 39#include <asm/xcr.h>
39#include <asm/xsave.h> 40#include <asm/xsave.h>
40#include <asm/crypto/serpent-avx.h> 41#include <asm/crypto/serpent-avx.h>
41#include <asm/crypto/ablk_helper.h>
42#include <asm/crypto/glue_helper.h> 42#include <asm/crypto/glue_helper.h>
43 43
44/* 8-way parallel cipher functions */ 44/* 8-way parallel cipher functions */
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index 97a356ece24d..8c95f8637306 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -34,6 +34,7 @@
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/crypto.h> 35#include <linux/crypto.h>
36#include <linux/err.h> 36#include <linux/err.h>
37#include <crypto/ablk_helper.h>
37#include <crypto/algapi.h> 38#include <crypto/algapi.h>
38#include <crypto/serpent.h> 39#include <crypto/serpent.h>
39#include <crypto/cryptd.h> 40#include <crypto/cryptd.h>
@@ -42,7 +43,6 @@
42#include <crypto/lrw.h> 43#include <crypto/lrw.h>
43#include <crypto/xts.h> 44#include <crypto/xts.h>
44#include <asm/crypto/serpent-sse2.h> 45#include <asm/crypto/serpent-sse2.h>
45#include <asm/crypto/ablk_helper.h>
46#include <asm/crypto/glue_helper.h> 46#include <asm/crypto/glue_helper.h>
47 47
48static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) 48static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 50226c4b86ed..f248546da1ca 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -281,7 +281,7 @@ static int __init sha256_ssse3_mod_init(void)
281 /* allow AVX to override SSSE3, it's a little faster */ 281 /* allow AVX to override SSSE3, it's a little faster */
282 if (avx_usable()) { 282 if (avx_usable()) {
283#ifdef CONFIG_AS_AVX2 283#ifdef CONFIG_AS_AVX2
284 if (boot_cpu_has(X86_FEATURE_AVX2)) 284 if (boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI2))
285 sha256_transform_asm = sha256_transform_rorx; 285 sha256_transform_asm = sha256_transform_rorx;
286 else 286 else
287#endif 287#endif
@@ -319,4 +319,4 @@ MODULE_LICENSE("GPL");
319MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated"); 319MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
320 320
321MODULE_ALIAS("sha256"); 321MODULE_ALIAS("sha256");
322MODULE_ALIAS("sha384"); 322MODULE_ALIAS("sha224");
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index a62ba541884e..4e3c665be129 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -28,6 +28,7 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/crypto.h> 29#include <linux/crypto.h>
30#include <linux/err.h> 30#include <linux/err.h>
31#include <crypto/ablk_helper.h>
31#include <crypto/algapi.h> 32#include <crypto/algapi.h>
32#include <crypto/twofish.h> 33#include <crypto/twofish.h>
33#include <crypto/cryptd.h> 34#include <crypto/cryptd.h>
@@ -39,7 +40,6 @@
39#include <asm/xcr.h> 40#include <asm/xcr.h>
40#include <asm/xsave.h> 41#include <asm/xsave.h>
41#include <asm/crypto/twofish.h> 42#include <asm/crypto/twofish.h>
42#include <asm/crypto/ablk_helper.h>
43#include <asm/crypto/glue_helper.h> 43#include <asm/crypto/glue_helper.h>
44#include <crypto/scatterwalk.h> 44#include <crypto/scatterwalk.h>
45#include <linux/workqueue.h> 45#include <linux/workqueue.h>
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index da31c8b8a92d..b17f4f48ecd7 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -77,7 +77,7 @@ static inline void atomic_sub(int i, atomic_t *v)
77 */ 77 */
78static inline int atomic_sub_and_test(int i, atomic_t *v) 78static inline int atomic_sub_and_test(int i, atomic_t *v)
79{ 79{
80 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, i, "%0", "e"); 80 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
81} 81}
82 82
83/** 83/**
@@ -141,7 +141,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
141 */ 141 */
142static inline int atomic_add_negative(int i, atomic_t *v) 142static inline int atomic_add_negative(int i, atomic_t *v)
143{ 143{
144 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, i, "%0", "s"); 144 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
145} 145}
146 146
147/** 147/**
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 3f065c985aee..46e9052bbd28 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -72,7 +72,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
72 */ 72 */
73static inline int atomic64_sub_and_test(long i, atomic64_t *v) 73static inline int atomic64_sub_and_test(long i, atomic64_t *v)
74{ 74{
75 GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, i, "%0", "e"); 75 GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
76} 76}
77 77
78/** 78/**
@@ -138,7 +138,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
138 */ 138 */
139static inline int atomic64_add_negative(long i, atomic64_t *v) 139static inline int atomic64_add_negative(long i, atomic64_t *v)
140{ 140{
141 GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, i, "%0", "s"); 141 GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
142} 142}
143 143
144/** 144/**
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 6d76d0935989..9fc1af74dc83 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -205,7 +205,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
205 */ 205 */
206static inline int test_and_set_bit(long nr, volatile unsigned long *addr) 206static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
207{ 207{
208 GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, nr, "%0", "c"); 208 GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
209} 209}
210 210
211/** 211/**
@@ -251,7 +251,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
251 */ 251 */
252static inline int test_and_clear_bit(long nr, volatile unsigned long *addr) 252static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
253{ 253{
254 GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, nr, "%0", "c"); 254 GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
255} 255}
256 256
257/** 257/**
@@ -304,7 +304,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
304 */ 304 */
305static inline int test_and_change_bit(long nr, volatile unsigned long *addr) 305static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
306{ 306{
307 GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, nr, "%0", "c"); 307 GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
308} 308}
309 309
310static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr) 310static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 5b23e605e707..4ad6560847b1 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -52,7 +52,7 @@ static inline void local_sub(long i, local_t *l)
52 */ 52 */
53static inline int local_sub_and_test(long i, local_t *l) 53static inline int local_sub_and_test(long i, local_t *l)
54{ 54{
55 GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, i, "%0", "e"); 55 GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
56} 56}
57 57
58/** 58/**
@@ -92,7 +92,7 @@ static inline int local_inc_and_test(local_t *l)
92 */ 92 */
93static inline int local_add_negative(long i, local_t *l) 93static inline int local_add_negative(long i, local_t *l)
94{ 94{
95 GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, i, "%0", "s"); 95 GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
96} 96}
97 97
98/** 98/**
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 3d1999458709..bbc8b12fa443 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -452,9 +452,16 @@ static inline int pte_present(pte_t a)
452} 452}
453 453
454#define pte_accessible pte_accessible 454#define pte_accessible pte_accessible
455static inline int pte_accessible(pte_t a) 455static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
456{ 456{
457 return pte_flags(a) & _PAGE_PRESENT; 457 if (pte_flags(a) & _PAGE_PRESENT)
458 return true;
459
460 if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
461 mm_tlb_flush_pending(mm))
462 return true;
463
464 return false;
458} 465}
459 466
460static inline int pte_hidden(pte_t pte) 467static inline int pte_hidden(pte_t pte)
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 8729723636fd..c8b051933b1b 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -8,6 +8,12 @@
8DECLARE_PER_CPU(int, __preempt_count); 8DECLARE_PER_CPU(int, __preempt_count);
9 9
10/* 10/*
11 * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
12 * that a decrement hitting 0 means we can and should reschedule.
13 */
14#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
15
16/*
11 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users 17 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
12 * that think a non-zero value indicates we cannot preempt. 18 * that think a non-zero value indicates we cannot preempt.
13 */ 19 */
@@ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val)
74 __this_cpu_add_4(__preempt_count, -val); 80 __this_cpu_add_4(__preempt_count, -val);
75} 81}
76 82
83/*
84 * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
85 * a decrement which hits zero means we have no preempt_count and should
86 * reschedule.
87 */
77static __always_inline bool __preempt_count_dec_and_test(void) 88static __always_inline bool __preempt_count_dec_and_test(void)
78{ 89{
79 GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); 90 GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
index 1ff990f1de8e..8f7866a5b9a4 100644
--- a/arch/x86/include/asm/rmwcc.h
+++ b/arch/x86/include/asm/rmwcc.h
@@ -16,8 +16,8 @@ cc_label: \
16#define GEN_UNARY_RMWcc(op, var, arg0, cc) \ 16#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
17 __GEN_RMWcc(op " " arg0, var, cc) 17 __GEN_RMWcc(op " " arg0, var, cc)
18 18
19#define GEN_BINARY_RMWcc(op, var, val, arg0, cc) \ 19#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
20 __GEN_RMWcc(op " %1, " arg0, var, cc, "er" (val)) 20 __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
21 21
22#else /* !CC_HAVE_ASM_GOTO */ 22#else /* !CC_HAVE_ASM_GOTO */
23 23
@@ -33,8 +33,8 @@ do { \
33#define GEN_UNARY_RMWcc(op, var, arg0, cc) \ 33#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
34 __GEN_RMWcc(op " " arg0, var, cc) 34 __GEN_RMWcc(op " " arg0, var, cc)
35 35
36#define GEN_BINARY_RMWcc(op, var, val, arg0, cc) \ 36#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
37 __GEN_RMWcc(op " %2, " arg0, var, cc, "er" (val)) 37 __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
38 38
39#endif /* CC_HAVE_ASM_GOTO */ 39#endif /* CC_HAVE_ASM_GOTO */
40 40
diff --git a/arch/x86/include/asm/simd.h b/arch/x86/include/asm/simd.h
new file mode 100644
index 000000000000..ee80b92f0096
--- /dev/null
+++ b/arch/x86/include/asm/simd.h
@@ -0,0 +1,11 @@
1
2#include <asm/i387.h>
3
4/*
5 * may_use_simd - whether it is allowable at this time to issue SIMD
6 * instructions or access the SIMD register file
7 */
8static __must_check inline bool may_use_simd(void)
9{
10 return irq_fpu_usable();
11}
diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
index 2874df24e7a4..4cab890007a7 100644
--- a/arch/x86/include/asm/trace/irq_vectors.h
+++ b/arch/x86/include/asm/trace/irq_vectors.h
@@ -72,6 +72,17 @@ DEFINE_IRQ_VECTOR_EVENT(x86_platform_ipi);
72DEFINE_IRQ_VECTOR_EVENT(irq_work); 72DEFINE_IRQ_VECTOR_EVENT(irq_work);
73 73
74/* 74/*
75 * We must dis-allow sampling irq_work_exit() because perf event sampling
76 * itself can cause irq_work, which would lead to an infinite loop;
77 *
78 * 1) irq_work_exit happens
79 * 2) generates perf sample
80 * 3) generates irq_work
81 * 4) goto 1
82 */
83TRACE_EVENT_PERF_PERM(irq_work_exit, is_sampling_event(p_event) ? -EPERM : 0);
84
85/*
75 * call_function - called when entering/exiting a call function interrupt 86 * call_function - called when entering/exiting a call function interrupt
76 * vector handler 87 * vector handler
77 */ 88 */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index dc1ec0dff939..ea04b342c026 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -387,7 +387,8 @@ static void init_intel(struct cpuinfo_x86 *c)
387 set_cpu_cap(c, X86_FEATURE_PEBS); 387 set_cpu_cap(c, X86_FEATURE_PEBS);
388 } 388 }
389 389
390 if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) 390 if (c->x86 == 6 && cpu_has_clflush &&
391 (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
391 set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); 392 set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
392 393
393#ifdef CONFIG_X86_64 394#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index fd00bb29425d..c1a861829d81 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -262,11 +262,20 @@ struct cpu_hw_events {
262 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ 262 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
263 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) 263 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
264 264
265#define EVENT_CONSTRAINT_END \ 265/*
266 EVENT_CONSTRAINT(0, 0, 0) 266 * We define the end marker as having a weight of -1
267 * to enable blacklisting of events using a counter bitmask
268 * of zero and thus a weight of zero.
269 * The end marker has a weight that cannot possibly be
270 * obtained from counting the bits in the bitmask.
271 */
272#define EVENT_CONSTRAINT_END { .weight = -1 }
267 273
274/*
275 * Check for end marker with weight == -1
276 */
268#define for_each_event_constraint(e, c) \ 277#define for_each_event_constraint(e, c) \
269 for ((e) = (c); (e)->weight; (e)++) 278 for ((e) = (c); (e)->weight != -1; (e)++)
270 279
271/* 280/*
272 * Extra registers for specific events. 281 * Extra registers for specific events.
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index da3c599584a3..c752cb43e52f 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -558,6 +558,17 @@ void native_machine_shutdown(void)
558{ 558{
559 /* Stop the cpus and apics */ 559 /* Stop the cpus and apics */
560#ifdef CONFIG_X86_IO_APIC 560#ifdef CONFIG_X86_IO_APIC
561 /*
562 * Disabling IO APIC before local APIC is a workaround for
563 * erratum AVR31 in "Intel Atom Processor C2000 Product Family
564 * Specification Update". In this situation, interrupts that target
565 * a Logical Processor whose Local APIC is either in the process of
566 * being hardware disabled or software disabled are neither delivered
567 * nor discarded. When this erratum occurs, the processor may hang.
568 *
569 * Even without the erratum, it still makes sense to quiet IO APIC
570 * before disabling Local APIC.
571 */
561 disable_IO_APIC(); 572 disable_IO_APIC();
562#endif 573#endif
563 574
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 5439117d5c4c..1673940cf9c3 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -143,6 +143,8 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
143 return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; 143 return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
144} 144}
145 145
146#define KVM_X2APIC_CID_BITS 0
147
146static void recalculate_apic_map(struct kvm *kvm) 148static void recalculate_apic_map(struct kvm *kvm)
147{ 149{
148 struct kvm_apic_map *new, *old = NULL; 150 struct kvm_apic_map *new, *old = NULL;
@@ -180,7 +182,8 @@ static void recalculate_apic_map(struct kvm *kvm)
180 if (apic_x2apic_mode(apic)) { 182 if (apic_x2apic_mode(apic)) {
181 new->ldr_bits = 32; 183 new->ldr_bits = 32;
182 new->cid_shift = 16; 184 new->cid_shift = 16;
183 new->cid_mask = new->lid_mask = 0xffff; 185 new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1;
186 new->lid_mask = 0xffff;
184 } else if (kvm_apic_sw_enabled(apic) && 187 } else if (kvm_apic_sw_enabled(apic) &&
185 !new->cid_mask /* flat mode */ && 188 !new->cid_mask /* flat mode */ &&
186 kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) { 189 kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) {
@@ -841,7 +844,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
841 ASSERT(apic != NULL); 844 ASSERT(apic != NULL);
842 845
843 /* if initial count is 0, current count should also be 0 */ 846 /* if initial count is 0, current count should also be 0 */
844 if (kvm_apic_get_reg(apic, APIC_TMICT) == 0) 847 if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 ||
848 apic->lapic_timer.period == 0)
845 return 0; 849 return 0;
846 850
847 remaining = hrtimer_get_remaining(&apic->lapic_timer.timer); 851 remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
@@ -1346,6 +1350,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
1346 return; 1350 return;
1347 } 1351 }
1348 1352
1353 if (!kvm_vcpu_is_bsp(apic->vcpu))
1354 value &= ~MSR_IA32_APICBASE_BSP;
1355 vcpu->arch.apic_base = value;
1356
1349 /* update jump label if enable bit changes */ 1357 /* update jump label if enable bit changes */
1350 if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) { 1358 if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
1351 if (value & MSR_IA32_APICBASE_ENABLE) 1359 if (value & MSR_IA32_APICBASE_ENABLE)
@@ -1355,10 +1363,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
1355 recalculate_apic_map(vcpu->kvm); 1363 recalculate_apic_map(vcpu->kvm);
1356 } 1364 }
1357 1365
1358 if (!kvm_vcpu_is_bsp(apic->vcpu))
1359 value &= ~MSR_IA32_APICBASE_BSP;
1360
1361 vcpu->arch.apic_base = value;
1362 if ((old_value ^ value) & X2APIC_ENABLE) { 1366 if ((old_value ^ value) & X2APIC_ENABLE) {
1363 if (value & X2APIC_ENABLE) { 1367 if (value & X2APIC_ENABLE) {
1364 u32 id = kvm_apic_id(apic); 1368 u32 id = kvm_apic_id(apic);
@@ -1691,7 +1695,6 @@ static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
1691void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) 1695void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
1692{ 1696{
1693 u32 data; 1697 u32 data;
1694 void *vapic;
1695 1698
1696 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) 1699 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
1697 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); 1700 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
@@ -1699,9 +1702,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
1699 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) 1702 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
1700 return; 1703 return;
1701 1704
1702 vapic = kmap_atomic(vcpu->arch.apic->vapic_page); 1705 kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
1703 data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); 1706 sizeof(u32));
1704 kunmap_atomic(vapic);
1705 1707
1706 apic_set_tpr(vcpu->arch.apic, data & 0xff); 1708 apic_set_tpr(vcpu->arch.apic, data & 0xff);
1707} 1709}
@@ -1737,7 +1739,6 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
1737 u32 data, tpr; 1739 u32 data, tpr;
1738 int max_irr, max_isr; 1740 int max_irr, max_isr;
1739 struct kvm_lapic *apic = vcpu->arch.apic; 1741 struct kvm_lapic *apic = vcpu->arch.apic;
1740 void *vapic;
1741 1742
1742 apic_sync_pv_eoi_to_guest(vcpu, apic); 1743 apic_sync_pv_eoi_to_guest(vcpu, apic);
1743 1744
@@ -1753,18 +1754,24 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
1753 max_isr = 0; 1754 max_isr = 0;
1754 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); 1755 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
1755 1756
1756 vapic = kmap_atomic(vcpu->arch.apic->vapic_page); 1757 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
1757 *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; 1758 sizeof(u32));
1758 kunmap_atomic(vapic);
1759} 1759}
1760 1760
1761void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) 1761int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
1762{ 1762{
1763 vcpu->arch.apic->vapic_addr = vapic_addr; 1763 if (vapic_addr) {
1764 if (vapic_addr) 1764 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
1765 &vcpu->arch.apic->vapic_cache,
1766 vapic_addr, sizeof(u32)))
1767 return -EINVAL;
1765 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); 1768 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
1766 else 1769 } else {
1767 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); 1770 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
1771 }
1772
1773 vcpu->arch.apic->vapic_addr = vapic_addr;
1774 return 0;
1768} 1775}
1769 1776
1770int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1777int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index c730ac9fe801..c8b0d0d2da5c 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -34,7 +34,7 @@ struct kvm_lapic {
34 */ 34 */
35 void *regs; 35 void *regs;
36 gpa_t vapic_addr; 36 gpa_t vapic_addr;
37 struct page *vapic_page; 37 struct gfn_to_hva_cache vapic_cache;
38 unsigned long pending_events; 38 unsigned long pending_events;
39 unsigned int sipi_vector; 39 unsigned int sipi_vector;
40}; 40};
@@ -76,7 +76,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
76void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset); 76void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset);
77void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector); 77void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector);
78 78
79void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); 79int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
80void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); 80void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
81void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu); 81void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
82 82
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index b2fe1c252f35..da7837e1349d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8283,8 +8283,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
8283 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 8283 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
8284 kvm_set_cr4(vcpu, vmcs12->host_cr4); 8284 kvm_set_cr4(vcpu, vmcs12->host_cr4);
8285 8285
8286 if (nested_cpu_has_ept(vmcs12)) 8286 nested_ept_uninit_mmu_context(vcpu);
8287 nested_ept_uninit_mmu_context(vcpu);
8288 8287
8289 kvm_set_cr3(vcpu, vmcs12->host_cr3); 8288 kvm_set_cr3(vcpu, vmcs12->host_cr3);
8290 kvm_mmu_reset_context(vcpu); 8289 kvm_mmu_reset_context(vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 21ef1ba184ae..5d004da1e35d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
3214 r = -EFAULT; 3214 r = -EFAULT;
3215 if (copy_from_user(&va, argp, sizeof va)) 3215 if (copy_from_user(&va, argp, sizeof va))
3216 goto out; 3216 goto out;
3217 r = 0; 3217 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
3218 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
3219 break; 3218 break;
3220 } 3219 }
3221 case KVM_X86_SETUP_MCE: { 3220 case KVM_X86_SETUP_MCE: {
@@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
5739 !kvm_event_needs_reinjection(vcpu); 5738 !kvm_event_needs_reinjection(vcpu);
5740} 5739}
5741 5740
5742static int vapic_enter(struct kvm_vcpu *vcpu)
5743{
5744 struct kvm_lapic *apic = vcpu->arch.apic;
5745 struct page *page;
5746
5747 if (!apic || !apic->vapic_addr)
5748 return 0;
5749
5750 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
5751 if (is_error_page(page))
5752 return -EFAULT;
5753
5754 vcpu->arch.apic->vapic_page = page;
5755 return 0;
5756}
5757
5758static void vapic_exit(struct kvm_vcpu *vcpu)
5759{
5760 struct kvm_lapic *apic = vcpu->arch.apic;
5761 int idx;
5762
5763 if (!apic || !apic->vapic_addr)
5764 return;
5765
5766 idx = srcu_read_lock(&vcpu->kvm->srcu);
5767 kvm_release_page_dirty(apic->vapic_page);
5768 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
5769 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5770}
5771
5772static void update_cr8_intercept(struct kvm_vcpu *vcpu) 5741static void update_cr8_intercept(struct kvm_vcpu *vcpu)
5773{ 5742{
5774 int max_irr, tpr; 5743 int max_irr, tpr;
@@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
6069 struct kvm *kvm = vcpu->kvm; 6038 struct kvm *kvm = vcpu->kvm;
6070 6039
6071 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 6040 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
6072 r = vapic_enter(vcpu);
6073 if (r) {
6074 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6075 return r;
6076 }
6077 6041
6078 r = 1; 6042 r = 1;
6079 while (r > 0) { 6043 while (r > 0) {
@@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
6132 6096
6133 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 6097 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6134 6098
6135 vapic_exit(vcpu);
6136
6137 return r; 6099 return r;
6138} 6100}
6139 6101
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index dd74e46828c0..0596e8e0cc19 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
83 pte_t pte = gup_get_pte(ptep); 83 pte_t pte = gup_get_pte(ptep);
84 struct page *page; 84 struct page *page;
85 85
86 /* Similar to the PMD case, NUMA hinting must take slow path */
87 if (pte_numa(pte)) {
88 pte_unmap(ptep);
89 return 0;
90 }
91
86 if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { 92 if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
87 pte_unmap(ptep); 93 pte_unmap(ptep);
88 return 0; 94 return 0;
@@ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
167 if (pmd_none(pmd) || pmd_trans_splitting(pmd)) 173 if (pmd_none(pmd) || pmd_trans_splitting(pmd))
168 return 0; 174 return 0;
169 if (unlikely(pmd_large(pmd))) { 175 if (unlikely(pmd_large(pmd))) {
176 /*
177 * NUMA hinting faults need to be handled in the GUP
178 * slowpath for accounting purposes and so that they
179 * can be serialised against THP migration.
180 */
181 if (pmd_numa(pmd))
182 return 0;
170 if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) 183 if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
171 return 0; 184 return 0;
172 } else { 185 } else {
diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c
index 6599a0027b76..81b506d5befd 100644
--- a/arch/x86/platform/efi/early_printk.c
+++ b/arch/x86/platform/efi/early_printk.c
@@ -142,7 +142,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
142 efi_y += font->height; 142 efi_y += font->height;
143 } 143 }
144 144
145 if (efi_y + font->height >= si->lfb_height) { 145 if (efi_y + font->height > si->lfb_height) {
146 u32 i; 146 u32 i;
147 147
148 efi_y -= font->height; 148 efi_y -= font->height;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 92c02344a060..cceb813044ef 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -690,13 +690,6 @@ void __init efi_init(void)
690 690
691 set_bit(EFI_MEMMAP, &x86_efi_facility); 691 set_bit(EFI_MEMMAP, &x86_efi_facility);
692 692
693#ifdef CONFIG_X86_32
694 if (efi_is_native()) {
695 x86_platform.get_wallclock = efi_get_time;
696 x86_platform.set_wallclock = efi_set_rtc_mmss;
697 }
698#endif
699
700#if EFI_DEBUG 693#if EFI_DEBUG
701 print_efi_memmap(); 694 print_efi_memmap();
702#endif 695#endif
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 0f92173a12b6..efe4d7220397 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1070,12 +1070,13 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
1070 unsigned long status; 1070 unsigned long status;
1071 1071
1072 bcp = &per_cpu(bau_control, cpu); 1072 bcp = &per_cpu(bau_control, cpu);
1073 stat = bcp->statp;
1074 stat->s_enters++;
1075 1073
1076 if (bcp->nobau) 1074 if (bcp->nobau)
1077 return cpumask; 1075 return cpumask;
1078 1076
1077 stat = bcp->statp;
1078 stat->s_enters++;
1079
1079 if (bcp->busy) { 1080 if (bcp->busy) {
1080 descriptor_status = 1081 descriptor_status =
1081 read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0); 1082 read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0);
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 88692871823f..9cac82588cbc 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -73,9 +73,10 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
73 -march=i386 -mregparm=3 \ 73 -march=i386 -mregparm=3 \
74 -include $(srctree)/$(src)/../../boot/code16gcc.h \ 74 -include $(srctree)/$(src)/../../boot/code16gcc.h \
75 -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ 75 -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
76 -mno-mmx -mno-sse \
76 $(call cc-option, -ffreestanding) \ 77 $(call cc-option, -ffreestanding) \
77 $(call cc-option, -fno-toplevel-reorder,\ 78 $(call cc-option, -fno-toplevel-reorder,\
78 $(call cc-option, -fno-unit-at-a-time)) \ 79 $(call cc-option, -fno-unit-at-a-time)) \
79 $(call cc-option, -fno-stack-protector) \ 80 $(call cc-option, -fno-stack-protector) \
80 $(call cc-option, -mpreferred-stack-boundary=2) 81 $(call cc-option, -mpreferred-stack-boundary=2)
81KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ 82KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 1610b22edf09..86154eab9523 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -435,9 +435,9 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
435 uint64_t v; 435 uint64_t v;
436 436
437 do { 437 do {
438 start = u64_stats_fetch_begin(&stat->syncp); 438 start = u64_stats_fetch_begin_bh(&stat->syncp);
439 v = stat->cnt; 439 v = stat->cnt;
440 } while (u64_stats_fetch_retry(&stat->syncp, start)); 440 } while (u64_stats_fetch_retry_bh(&stat->syncp, start));
441 441
442 return v; 442 return v;
443} 443}
@@ -508,9 +508,9 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
508 struct blkg_rwstat tmp; 508 struct blkg_rwstat tmp;
509 509
510 do { 510 do {
511 start = u64_stats_fetch_begin(&rwstat->syncp); 511 start = u64_stats_fetch_begin_bh(&rwstat->syncp);
512 tmp = *rwstat; 512 tmp = *rwstat;
513 } while (u64_stats_fetch_retry(&rwstat->syncp, start)); 513 } while (u64_stats_fetch_retry_bh(&rwstat->syncp, start));
514 514
515 return tmp; 515 return tmp;
516} 516}
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 331e627301ea..fb6f3c0ffa49 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -502,15 +502,6 @@ void blk_abort_flushes(struct request_queue *q)
502 } 502 }
503} 503}
504 504
505static void bio_end_flush(struct bio *bio, int err)
506{
507 if (err)
508 clear_bit(BIO_UPTODATE, &bio->bi_flags);
509 if (bio->bi_private)
510 complete(bio->bi_private);
511 bio_put(bio);
512}
513
514/** 505/**
515 * blkdev_issue_flush - queue a flush 506 * blkdev_issue_flush - queue a flush
516 * @bdev: blockdev to issue flush for 507 * @bdev: blockdev to issue flush for
@@ -526,7 +517,6 @@ static void bio_end_flush(struct bio *bio, int err)
526int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 517int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
527 sector_t *error_sector) 518 sector_t *error_sector)
528{ 519{
529 DECLARE_COMPLETION_ONSTACK(wait);
530 struct request_queue *q; 520 struct request_queue *q;
531 struct bio *bio; 521 struct bio *bio;
532 int ret = 0; 522 int ret = 0;
@@ -548,13 +538,9 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
548 return -ENXIO; 538 return -ENXIO;
549 539
550 bio = bio_alloc(gfp_mask, 0); 540 bio = bio_alloc(gfp_mask, 0);
551 bio->bi_end_io = bio_end_flush;
552 bio->bi_bdev = bdev; 541 bio->bi_bdev = bdev;
553 bio->bi_private = &wait;
554 542
555 bio_get(bio); 543 ret = submit_bio_wait(WRITE_FLUSH, bio);
556 submit_bio(WRITE_FLUSH, bio);
557 wait_for_completion_io(&wait);
558 544
559 /* 545 /*
560 * The driver must store the error location in ->bi_sector, if 546 * The driver must store the error location in ->bi_sector, if
@@ -564,9 +550,6 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
564 if (error_sector) 550 if (error_sector)
565 *error_sector = bio->bi_sector; 551 *error_sector = bio->bi_sector;
566 552
567 if (!bio_flagged(bio, BIO_UPTODATE))
568 ret = -EIO;
569
570 bio_put(bio); 553 bio_put(bio);
571 return ret; 554 return ret;
572} 555}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index ba6cf8e9aa0a..b91ce75bd35d 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -335,9 +335,22 @@ static struct kobj_type blk_mq_hw_ktype = {
335void blk_mq_unregister_disk(struct gendisk *disk) 335void blk_mq_unregister_disk(struct gendisk *disk)
336{ 336{
337 struct request_queue *q = disk->queue; 337 struct request_queue *q = disk->queue;
338 struct blk_mq_hw_ctx *hctx;
339 struct blk_mq_ctx *ctx;
340 int i, j;
341
342 queue_for_each_hw_ctx(q, hctx, i) {
343 hctx_for_each_ctx(hctx, ctx, j) {
344 kobject_del(&ctx->kobj);
345 kobject_put(&ctx->kobj);
346 }
347 kobject_del(&hctx->kobj);
348 kobject_put(&hctx->kobj);
349 }
338 350
339 kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); 351 kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
340 kobject_del(&q->mq_kobj); 352 kobject_del(&q->mq_kobj);
353 kobject_put(&q->mq_kobj);
341 354
342 kobject_put(&disk_to_dev(disk)->kobj); 355 kobject_put(&disk_to_dev(disk)->kobj);
343} 356}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index cdc629cf075b..c79126e11030 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -202,10 +202,12 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
202 if (rq) { 202 if (rq) {
203 blk_mq_rq_ctx_init(q, ctx, rq, rw); 203 blk_mq_rq_ctx_init(q, ctx, rq, rw);
204 break; 204 break;
205 } else if (!(gfp & __GFP_WAIT)) 205 }
206 break;
207 206
208 blk_mq_put_ctx(ctx); 207 blk_mq_put_ctx(ctx);
208 if (!(gfp & __GFP_WAIT))
209 break;
210
209 __blk_mq_run_hw_queue(hctx); 211 __blk_mq_run_hw_queue(hctx);
210 blk_mq_wait_for_tags(hctx->tags); 212 blk_mq_wait_for_tags(hctx->tags);
211 } while (1); 213 } while (1);
@@ -222,7 +224,8 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
222 return NULL; 224 return NULL;
223 225
224 rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved); 226 rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
225 blk_mq_put_ctx(rq->mq_ctx); 227 if (rq)
228 blk_mq_put_ctx(rq->mq_ctx);
226 return rq; 229 return rq;
227} 230}
228 231
@@ -235,7 +238,8 @@ struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw,
235 return NULL; 238 return NULL;
236 239
237 rq = blk_mq_alloc_request_pinned(q, rw, gfp, true); 240 rq = blk_mq_alloc_request_pinned(q, rw, gfp, true);
238 blk_mq_put_ctx(rq->mq_ctx); 241 if (rq)
242 blk_mq_put_ctx(rq->mq_ctx);
239 return rq; 243 return rq;
240} 244}
241EXPORT_SYMBOL(blk_mq_alloc_reserved_request); 245EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
@@ -308,12 +312,12 @@ void blk_mq_complete_request(struct request *rq, int error)
308 312
309 blk_account_io_completion(rq, bytes); 313 blk_account_io_completion(rq, bytes);
310 314
315 blk_account_io_done(rq);
316
311 if (rq->end_io) 317 if (rq->end_io)
312 rq->end_io(rq, error); 318 rq->end_io(rq, error);
313 else 319 else
314 blk_mq_free_request(rq); 320 blk_mq_free_request(rq);
315
316 blk_account_io_done(rq);
317} 321}
318 322
319void __blk_mq_end_io(struct request *rq, int error) 323void __blk_mq_end_io(struct request *rq, int error)
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 4ae5734fb473..7bcb70d216e1 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -174,9 +174,8 @@ config CRYPTO_TEST
174 help 174 help
175 Quick & dirty crypto test module. 175 Quick & dirty crypto test module.
176 176
177config CRYPTO_ABLK_HELPER_X86 177config CRYPTO_ABLK_HELPER
178 tristate 178 tristate
179 depends on X86
180 select CRYPTO_CRYPTD 179 select CRYPTO_CRYPTD
181 180
182config CRYPTO_GLUE_HELPER_X86 181config CRYPTO_GLUE_HELPER_X86
@@ -695,7 +694,7 @@ config CRYPTO_AES_NI_INTEL
695 select CRYPTO_AES_X86_64 if 64BIT 694 select CRYPTO_AES_X86_64 if 64BIT
696 select CRYPTO_AES_586 if !64BIT 695 select CRYPTO_AES_586 if !64BIT
697 select CRYPTO_CRYPTD 696 select CRYPTO_CRYPTD
698 select CRYPTO_ABLK_HELPER_X86 697 select CRYPTO_ABLK_HELPER
699 select CRYPTO_ALGAPI 698 select CRYPTO_ALGAPI
700 select CRYPTO_GLUE_HELPER_X86 if 64BIT 699 select CRYPTO_GLUE_HELPER_X86 if 64BIT
701 select CRYPTO_LRW 700 select CRYPTO_LRW
@@ -895,7 +894,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
895 depends on CRYPTO 894 depends on CRYPTO
896 select CRYPTO_ALGAPI 895 select CRYPTO_ALGAPI
897 select CRYPTO_CRYPTD 896 select CRYPTO_CRYPTD
898 select CRYPTO_ABLK_HELPER_X86 897 select CRYPTO_ABLK_HELPER
899 select CRYPTO_GLUE_HELPER_X86 898 select CRYPTO_GLUE_HELPER_X86
900 select CRYPTO_CAMELLIA_X86_64 899 select CRYPTO_CAMELLIA_X86_64
901 select CRYPTO_LRW 900 select CRYPTO_LRW
@@ -917,7 +916,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
917 depends on CRYPTO 916 depends on CRYPTO
918 select CRYPTO_ALGAPI 917 select CRYPTO_ALGAPI
919 select CRYPTO_CRYPTD 918 select CRYPTO_CRYPTD
920 select CRYPTO_ABLK_HELPER_X86 919 select CRYPTO_ABLK_HELPER
921 select CRYPTO_GLUE_HELPER_X86 920 select CRYPTO_GLUE_HELPER_X86
922 select CRYPTO_CAMELLIA_X86_64 921 select CRYPTO_CAMELLIA_X86_64
923 select CRYPTO_CAMELLIA_AESNI_AVX_X86_64 922 select CRYPTO_CAMELLIA_AESNI_AVX_X86_64
@@ -969,7 +968,7 @@ config CRYPTO_CAST5_AVX_X86_64
969 depends on X86 && 64BIT 968 depends on X86 && 64BIT
970 select CRYPTO_ALGAPI 969 select CRYPTO_ALGAPI
971 select CRYPTO_CRYPTD 970 select CRYPTO_CRYPTD
972 select CRYPTO_ABLK_HELPER_X86 971 select CRYPTO_ABLK_HELPER
973 select CRYPTO_CAST_COMMON 972 select CRYPTO_CAST_COMMON
974 select CRYPTO_CAST5 973 select CRYPTO_CAST5
975 help 974 help
@@ -992,7 +991,7 @@ config CRYPTO_CAST6_AVX_X86_64
992 depends on X86 && 64BIT 991 depends on X86 && 64BIT
993 select CRYPTO_ALGAPI 992 select CRYPTO_ALGAPI
994 select CRYPTO_CRYPTD 993 select CRYPTO_CRYPTD
995 select CRYPTO_ABLK_HELPER_X86 994 select CRYPTO_ABLK_HELPER
996 select CRYPTO_GLUE_HELPER_X86 995 select CRYPTO_GLUE_HELPER_X86
997 select CRYPTO_CAST_COMMON 996 select CRYPTO_CAST_COMMON
998 select CRYPTO_CAST6 997 select CRYPTO_CAST6
@@ -1110,7 +1109,7 @@ config CRYPTO_SERPENT_SSE2_X86_64
1110 depends on X86 && 64BIT 1109 depends on X86 && 64BIT
1111 select CRYPTO_ALGAPI 1110 select CRYPTO_ALGAPI
1112 select CRYPTO_CRYPTD 1111 select CRYPTO_CRYPTD
1113 select CRYPTO_ABLK_HELPER_X86 1112 select CRYPTO_ABLK_HELPER
1114 select CRYPTO_GLUE_HELPER_X86 1113 select CRYPTO_GLUE_HELPER_X86
1115 select CRYPTO_SERPENT 1114 select CRYPTO_SERPENT
1116 select CRYPTO_LRW 1115 select CRYPTO_LRW
@@ -1132,7 +1131,7 @@ config CRYPTO_SERPENT_SSE2_586
1132 depends on X86 && !64BIT 1131 depends on X86 && !64BIT
1133 select CRYPTO_ALGAPI 1132 select CRYPTO_ALGAPI
1134 select CRYPTO_CRYPTD 1133 select CRYPTO_CRYPTD
1135 select CRYPTO_ABLK_HELPER_X86 1134 select CRYPTO_ABLK_HELPER
1136 select CRYPTO_GLUE_HELPER_X86 1135 select CRYPTO_GLUE_HELPER_X86
1137 select CRYPTO_SERPENT 1136 select CRYPTO_SERPENT
1138 select CRYPTO_LRW 1137 select CRYPTO_LRW
@@ -1154,7 +1153,7 @@ config CRYPTO_SERPENT_AVX_X86_64
1154 depends on X86 && 64BIT 1153 depends on X86 && 64BIT
1155 select CRYPTO_ALGAPI 1154 select CRYPTO_ALGAPI
1156 select CRYPTO_CRYPTD 1155 select CRYPTO_CRYPTD
1157 select CRYPTO_ABLK_HELPER_X86 1156 select CRYPTO_ABLK_HELPER
1158 select CRYPTO_GLUE_HELPER_X86 1157 select CRYPTO_GLUE_HELPER_X86
1159 select CRYPTO_SERPENT 1158 select CRYPTO_SERPENT
1160 select CRYPTO_LRW 1159 select CRYPTO_LRW
@@ -1176,7 +1175,7 @@ config CRYPTO_SERPENT_AVX2_X86_64
1176 depends on X86 && 64BIT 1175 depends on X86 && 64BIT
1177 select CRYPTO_ALGAPI 1176 select CRYPTO_ALGAPI
1178 select CRYPTO_CRYPTD 1177 select CRYPTO_CRYPTD
1179 select CRYPTO_ABLK_HELPER_X86 1178 select CRYPTO_ABLK_HELPER
1180 select CRYPTO_GLUE_HELPER_X86 1179 select CRYPTO_GLUE_HELPER_X86
1181 select CRYPTO_SERPENT 1180 select CRYPTO_SERPENT
1182 select CRYPTO_SERPENT_AVX_X86_64 1181 select CRYPTO_SERPENT_AVX_X86_64
@@ -1292,7 +1291,7 @@ config CRYPTO_TWOFISH_AVX_X86_64
1292 depends on X86 && 64BIT 1291 depends on X86 && 64BIT
1293 select CRYPTO_ALGAPI 1292 select CRYPTO_ALGAPI
1294 select CRYPTO_CRYPTD 1293 select CRYPTO_CRYPTD
1295 select CRYPTO_ABLK_HELPER_X86 1294 select CRYPTO_ABLK_HELPER
1296 select CRYPTO_GLUE_HELPER_X86 1295 select CRYPTO_GLUE_HELPER_X86
1297 select CRYPTO_TWOFISH_COMMON 1296 select CRYPTO_TWOFISH_COMMON
1298 select CRYPTO_TWOFISH_X86_64 1297 select CRYPTO_TWOFISH_X86_64
diff --git a/crypto/Makefile b/crypto/Makefile
index b3a7e807e08b..989c510da8cc 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -2,8 +2,13 @@
2# Cryptographic API 2# Cryptographic API
3# 3#
4 4
5# memneq MUST be built with -Os or -O0 to prevent early-return optimizations
6# that will defeat memneq's actual purpose to prevent timing attacks.
7CFLAGS_REMOVE_memneq.o := -O1 -O2 -O3
8CFLAGS_memneq.o := -Os
9
5obj-$(CONFIG_CRYPTO) += crypto.o 10obj-$(CONFIG_CRYPTO) += crypto.o
6crypto-y := api.o cipher.o compress.o 11crypto-y := api.o cipher.o compress.o memneq.o
7 12
8obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o 13obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
9 14
@@ -105,3 +110,4 @@ obj-$(CONFIG_XOR_BLOCKS) += xor.o
105obj-$(CONFIG_ASYNC_CORE) += async_tx/ 110obj-$(CONFIG_ASYNC_CORE) += async_tx/
106obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/ 111obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
107obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o 112obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o
113obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
diff --git a/arch/x86/crypto/ablk_helper.c b/crypto/ablk_helper.c
index 43282fe04a8b..ffe7278d4bd8 100644
--- a/arch/x86/crypto/ablk_helper.c
+++ b/crypto/ablk_helper.c
@@ -28,10 +28,11 @@
28#include <linux/crypto.h> 28#include <linux/crypto.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/hardirq.h>
31#include <crypto/algapi.h> 32#include <crypto/algapi.h>
32#include <crypto/cryptd.h> 33#include <crypto/cryptd.h>
33#include <asm/i387.h> 34#include <crypto/ablk_helper.h>
34#include <asm/crypto/ablk_helper.h> 35#include <asm/simd.h>
35 36
36int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, 37int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
37 unsigned int key_len) 38 unsigned int key_len)
@@ -70,11 +71,11 @@ int ablk_encrypt(struct ablkcipher_request *req)
70 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 71 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
71 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); 72 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
72 73
73 if (!irq_fpu_usable()) { 74 if (!may_use_simd()) {
74 struct ablkcipher_request *cryptd_req = 75 struct ablkcipher_request *cryptd_req =
75 ablkcipher_request_ctx(req); 76 ablkcipher_request_ctx(req);
76 77
77 memcpy(cryptd_req, req, sizeof(*req)); 78 *cryptd_req = *req;
78 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); 79 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
79 80
80 return crypto_ablkcipher_encrypt(cryptd_req); 81 return crypto_ablkcipher_encrypt(cryptd_req);
@@ -89,11 +90,11 @@ int ablk_decrypt(struct ablkcipher_request *req)
89 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 90 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
90 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); 91 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
91 92
92 if (!irq_fpu_usable()) { 93 if (!may_use_simd()) {
93 struct ablkcipher_request *cryptd_req = 94 struct ablkcipher_request *cryptd_req =
94 ablkcipher_request_ctx(req); 95 ablkcipher_request_ctx(req);
95 96
96 memcpy(cryptd_req, req, sizeof(*req)); 97 *cryptd_req = *req;
97 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); 98 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
98 99
99 return crypto_ablkcipher_decrypt(cryptd_req); 100 return crypto_ablkcipher_decrypt(cryptd_req);
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 7d4a8d28277e..40886c489903 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -16,9 +16,7 @@
16#include <crypto/internal/skcipher.h> 16#include <crypto/internal/skcipher.h>
17#include <linux/cpumask.h> 17#include <linux/cpumask.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/init.h>
20#include <linux/kernel.h> 19#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
23#include <linux/sched.h> 21#include <linux/sched.h>
24#include <linux/slab.h> 22#include <linux/slab.h>
@@ -30,8 +28,6 @@
30 28
31#include "internal.h" 29#include "internal.h"
32 30
33static const char *skcipher_default_geniv __read_mostly;
34
35struct ablkcipher_buffer { 31struct ablkcipher_buffer {
36 struct list_head entry; 32 struct list_head entry;
37 struct scatter_walk dst; 33 struct scatter_walk dst;
@@ -527,8 +523,7 @@ const char *crypto_default_geniv(const struct crypto_alg *alg)
527 alg->cra_blocksize) 523 alg->cra_blocksize)
528 return "chainiv"; 524 return "chainiv";
529 525
530 return alg->cra_flags & CRYPTO_ALG_ASYNC ? 526 return "eseqiv";
531 "eseqiv" : skcipher_default_geniv;
532} 527}
533 528
534static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) 529static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
@@ -709,17 +704,3 @@ err:
709 return ERR_PTR(err); 704 return ERR_PTR(err);
710} 705}
711EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); 706EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
712
713static int __init skcipher_module_init(void)
714{
715 skcipher_default_geniv = num_possible_cpus() > 1 ?
716 "eseqiv" : "chainiv";
717 return 0;
718}
719
720static void skcipher_module_exit(void)
721{
722}
723
724module_init(skcipher_module_init);
725module_exit(skcipher_module_exit);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index ef5356cd280a..850246206b12 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -114,6 +114,9 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
114 struct hash_ctx *ctx = ask->private; 114 struct hash_ctx *ctx = ask->private;
115 int err; 115 int err;
116 116
117 if (flags & MSG_SENDPAGE_NOTLAST)
118 flags |= MSG_MORE;
119
117 lock_sock(sk); 120 lock_sock(sk);
118 sg_init_table(ctx->sgl.sg, 1); 121 sg_init_table(ctx->sgl.sg, 1);
119 sg_set_page(ctx->sgl.sg, page, size, offset); 122 sg_set_page(ctx->sgl.sg, page, size, offset);
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 6a6dfc062d2a..a19c027b29bd 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -378,6 +378,9 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
378 struct skcipher_sg_list *sgl; 378 struct skcipher_sg_list *sgl;
379 int err = -EINVAL; 379 int err = -EINVAL;
380 380
381 if (flags & MSG_SENDPAGE_NOTLAST)
382 flags |= MSG_MORE;
383
381 lock_sock(sk); 384 lock_sock(sk);
382 if (!ctx->more && ctx->used) 385 if (!ctx->more && ctx->used)
383 goto unlock; 386 goto unlock;
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index c0bb3778f1ae..666f1962a160 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -230,11 +230,11 @@ remainder:
230 */ 230 */
231 if (byte_count < DEFAULT_BLK_SZ) { 231 if (byte_count < DEFAULT_BLK_SZ) {
232empty_rbuf: 232empty_rbuf:
233 for (; ctx->rand_data_valid < DEFAULT_BLK_SZ; 233 while (ctx->rand_data_valid < DEFAULT_BLK_SZ) {
234 ctx->rand_data_valid++) {
235 *ptr = ctx->rand_data[ctx->rand_data_valid]; 234 *ptr = ctx->rand_data[ctx->rand_data_valid];
236 ptr++; 235 ptr++;
237 byte_count--; 236 byte_count--;
237 ctx->rand_data_valid++;
238 if (byte_count == 0) 238 if (byte_count == 0)
239 goto done; 239 goto done;
240 } 240 }
diff --git a/crypto/asymmetric_keys/rsa.c b/crypto/asymmetric_keys/rsa.c
index 90a17f59ba28..459cf97a75e2 100644
--- a/crypto/asymmetric_keys/rsa.c
+++ b/crypto/asymmetric_keys/rsa.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <crypto/algapi.h>
16#include "public_key.h" 17#include "public_key.h"
17 18
18MODULE_LICENSE("GPL"); 19MODULE_LICENSE("GPL");
@@ -189,12 +190,12 @@ static int RSA_verify(const u8 *H, const u8 *EM, size_t k, size_t hash_size,
189 } 190 }
190 } 191 }
191 192
192 if (memcmp(asn1_template, EM + T_offset, asn1_size) != 0) { 193 if (crypto_memneq(asn1_template, EM + T_offset, asn1_size) != 0) {
193 kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]"); 194 kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]");
194 return -EBADMSG; 195 return -EBADMSG;
195 } 196 }
196 197
197 if (memcmp(H, EM + T_offset + asn1_size, hash_size) != 0) { 198 if (crypto_memneq(H, EM + T_offset + asn1_size, hash_size) != 0) {
198 kleave(" = -EKEYREJECTED [EM[T] hash mismatch]"); 199 kleave(" = -EKEYREJECTED [EM[T] hash mismatch]");
199 return -EKEYREJECTED; 200 return -EKEYREJECTED;
200 } 201 }
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index f83300b6e8c1..382ef0d2ff2e 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -18,60 +18,12 @@
18#include <linux/asn1_decoder.h> 18#include <linux/asn1_decoder.h>
19#include <keys/asymmetric-subtype.h> 19#include <keys/asymmetric-subtype.h>
20#include <keys/asymmetric-parser.h> 20#include <keys/asymmetric-parser.h>
21#include <keys/system_keyring.h>
22#include <crypto/hash.h> 21#include <crypto/hash.h>
23#include "asymmetric_keys.h" 22#include "asymmetric_keys.h"
24#include "public_key.h" 23#include "public_key.h"
25#include "x509_parser.h" 24#include "x509_parser.h"
26 25
27/* 26/*
28 * Find a key in the given keyring by issuer and authority.
29 */
30static struct key *x509_request_asymmetric_key(
31 struct key *keyring,
32 const char *signer, size_t signer_len,
33 const char *authority, size_t auth_len)
34{
35 key_ref_t key;
36 char *id;
37
38 /* Construct an identifier. */
39 id = kmalloc(signer_len + 2 + auth_len + 1, GFP_KERNEL);
40 if (!id)
41 return ERR_PTR(-ENOMEM);
42
43 memcpy(id, signer, signer_len);
44 id[signer_len + 0] = ':';
45 id[signer_len + 1] = ' ';
46 memcpy(id + signer_len + 2, authority, auth_len);
47 id[signer_len + 2 + auth_len] = 0;
48
49 pr_debug("Look up: \"%s\"\n", id);
50
51 key = keyring_search(make_key_ref(keyring, 1),
52 &key_type_asymmetric, id);
53 if (IS_ERR(key))
54 pr_debug("Request for module key '%s' err %ld\n",
55 id, PTR_ERR(key));
56 kfree(id);
57
58 if (IS_ERR(key)) {
59 switch (PTR_ERR(key)) {
60 /* Hide some search errors */
61 case -EACCES:
62 case -ENOTDIR:
63 case -EAGAIN:
64 return ERR_PTR(-ENOKEY);
65 default:
66 return ERR_CAST(key);
67 }
68 }
69
70 pr_devel("<==%s() = 0 [%x]\n", __func__, key_serial(key_ref_to_ptr(key)));
71 return key_ref_to_ptr(key);
72}
73
74/*
75 * Set up the signature parameters in an X.509 certificate. This involves 27 * Set up the signature parameters in an X.509 certificate. This involves
76 * digesting the signed data and extracting the signature. 28 * digesting the signed data and extracting the signature.
77 */ 29 */
@@ -151,33 +103,6 @@ int x509_check_signature(const struct public_key *pub,
151EXPORT_SYMBOL_GPL(x509_check_signature); 103EXPORT_SYMBOL_GPL(x509_check_signature);
152 104
153/* 105/*
154 * Check the new certificate against the ones in the trust keyring. If one of
155 * those is the signing key and validates the new certificate, then mark the
156 * new certificate as being trusted.
157 *
158 * Return 0 if the new certificate was successfully validated, 1 if we couldn't
159 * find a matching parent certificate in the trusted list and an error if there
160 * is a matching certificate but the signature check fails.
161 */
162static int x509_validate_trust(struct x509_certificate *cert,
163 struct key *trust_keyring)
164{
165 const struct public_key *pk;
166 struct key *key;
167 int ret = 1;
168
169 key = x509_request_asymmetric_key(trust_keyring,
170 cert->issuer, strlen(cert->issuer),
171 cert->authority,
172 strlen(cert->authority));
173 if (!IS_ERR(key)) {
174 pk = key->payload.data;
175 ret = x509_check_signature(pk, cert);
176 }
177 return ret;
178}
179
180/*
181 * Attempt to parse a data blob for a key as an X509 certificate. 106 * Attempt to parse a data blob for a key as an X509 certificate.
182 */ 107 */
183static int x509_key_preparse(struct key_preparsed_payload *prep) 108static int x509_key_preparse(struct key_preparsed_payload *prep)
@@ -230,13 +155,9 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
230 /* Check the signature on the key if it appears to be self-signed */ 155 /* Check the signature on the key if it appears to be self-signed */
231 if (!cert->authority || 156 if (!cert->authority ||
232 strcmp(cert->fingerprint, cert->authority) == 0) { 157 strcmp(cert->fingerprint, cert->authority) == 0) {
233 ret = x509_check_signature(cert->pub, cert); /* self-signed */ 158 ret = x509_check_signature(cert->pub, cert);
234 if (ret < 0) 159 if (ret < 0)
235 goto error_free_cert; 160 goto error_free_cert;
236 } else {
237 ret = x509_validate_trust(cert, system_trusted_keyring);
238 if (!ret)
239 prep->trusted = 1;
240 } 161 }
241 162
242 /* Propose a description */ 163 /* Propose a description */
diff --git a/crypto/authenc.c b/crypto/authenc.c
index ffce19de05cf..e1223559d5df 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -52,40 +52,52 @@ static void authenc_request_complete(struct aead_request *req, int err)
52 aead_request_complete(req, err); 52 aead_request_complete(req, err);
53} 53}
54 54
55static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, 55int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
56 unsigned int keylen) 56 unsigned int keylen)
57{ 57{
58 unsigned int authkeylen; 58 struct rtattr *rta = (struct rtattr *)key;
59 unsigned int enckeylen;
60 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
61 struct crypto_ahash *auth = ctx->auth;
62 struct crypto_ablkcipher *enc = ctx->enc;
63 struct rtattr *rta = (void *)key;
64 struct crypto_authenc_key_param *param; 59 struct crypto_authenc_key_param *param;
65 int err = -EINVAL;
66 60
67 if (!RTA_OK(rta, keylen)) 61 if (!RTA_OK(rta, keylen))
68 goto badkey; 62 return -EINVAL;
69 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 63 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
70 goto badkey; 64 return -EINVAL;
71 if (RTA_PAYLOAD(rta) < sizeof(*param)) 65 if (RTA_PAYLOAD(rta) < sizeof(*param))
72 goto badkey; 66 return -EINVAL;
73 67
74 param = RTA_DATA(rta); 68 param = RTA_DATA(rta);
75 enckeylen = be32_to_cpu(param->enckeylen); 69 keys->enckeylen = be32_to_cpu(param->enckeylen);
76 70
77 key += RTA_ALIGN(rta->rta_len); 71 key += RTA_ALIGN(rta->rta_len);
78 keylen -= RTA_ALIGN(rta->rta_len); 72 keylen -= RTA_ALIGN(rta->rta_len);
79 73
80 if (keylen < enckeylen) 74 if (keylen < keys->enckeylen)
81 goto badkey; 75 return -EINVAL;
82 76
83 authkeylen = keylen - enckeylen; 77 keys->authkeylen = keylen - keys->enckeylen;
78 keys->authkey = key;
79 keys->enckey = key + keys->authkeylen;
80
81 return 0;
82}
83EXPORT_SYMBOL_GPL(crypto_authenc_extractkeys);
84
85static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
86 unsigned int keylen)
87{
88 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
89 struct crypto_ahash *auth = ctx->auth;
90 struct crypto_ablkcipher *enc = ctx->enc;
91 struct crypto_authenc_keys keys;
92 int err = -EINVAL;
93
94 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
95 goto badkey;
84 96
85 crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); 97 crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
86 crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) & 98 crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) &
87 CRYPTO_TFM_REQ_MASK); 99 CRYPTO_TFM_REQ_MASK);
88 err = crypto_ahash_setkey(auth, key, authkeylen); 100 err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
89 crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) & 101 crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) &
90 CRYPTO_TFM_RES_MASK); 102 CRYPTO_TFM_RES_MASK);
91 103
@@ -95,7 +107,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
95 crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); 107 crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
96 crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) & 108 crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) &
97 CRYPTO_TFM_REQ_MASK); 109 CRYPTO_TFM_REQ_MASK);
98 err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen); 110 err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
99 crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) & 111 crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) &
100 CRYPTO_TFM_RES_MASK); 112 CRYPTO_TFM_RES_MASK);
101 113
@@ -188,7 +200,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
188 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 200 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
189 authsize, 0); 201 authsize, 0);
190 202
191 err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 203 err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
192 if (err) 204 if (err)
193 goto out; 205 goto out;
194 206
@@ -227,7 +239,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
227 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 239 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
228 authsize, 0); 240 authsize, 0);
229 241
230 err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 242 err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
231 if (err) 243 if (err)
232 goto out; 244 goto out;
233 245
@@ -368,9 +380,10 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
368 if (!err) { 380 if (!err) {
369 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 381 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
370 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 382 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
371 struct ablkcipher_request *abreq = aead_request_ctx(areq); 383 struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq);
372 u8 *iv = (u8 *)(abreq + 1) + 384 struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
373 crypto_ablkcipher_reqsize(ctx->enc); 385 + ctx->reqoff);
386 u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc);
374 387
375 err = crypto_authenc_genicv(areq, iv, 0); 388 err = crypto_authenc_genicv(areq, iv, 0);
376 } 389 }
@@ -462,7 +475,7 @@ static int crypto_authenc_verify(struct aead_request *req,
462 ihash = ohash + authsize; 475 ihash = ohash + authsize;
463 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 476 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
464 authsize, 0); 477 authsize, 0);
465 return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; 478 return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
466} 479}
467 480
468static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, 481static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index ab53762fc309..4be0dd4373a9 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -59,37 +59,19 @@ static void authenc_esn_request_complete(struct aead_request *req, int err)
59static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key, 59static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,
60 unsigned int keylen) 60 unsigned int keylen)
61{ 61{
62 unsigned int authkeylen;
63 unsigned int enckeylen;
64 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); 62 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
65 struct crypto_ahash *auth = ctx->auth; 63 struct crypto_ahash *auth = ctx->auth;
66 struct crypto_ablkcipher *enc = ctx->enc; 64 struct crypto_ablkcipher *enc = ctx->enc;
67 struct rtattr *rta = (void *)key; 65 struct crypto_authenc_keys keys;
68 struct crypto_authenc_key_param *param;
69 int err = -EINVAL; 66 int err = -EINVAL;
70 67
71 if (!RTA_OK(rta, keylen)) 68 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
72 goto badkey; 69 goto badkey;
73 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
74 goto badkey;
75 if (RTA_PAYLOAD(rta) < sizeof(*param))
76 goto badkey;
77
78 param = RTA_DATA(rta);
79 enckeylen = be32_to_cpu(param->enckeylen);
80
81 key += RTA_ALIGN(rta->rta_len);
82 keylen -= RTA_ALIGN(rta->rta_len);
83
84 if (keylen < enckeylen)
85 goto badkey;
86
87 authkeylen = keylen - enckeylen;
88 70
89 crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); 71 crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
90 crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) & 72 crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) &
91 CRYPTO_TFM_REQ_MASK); 73 CRYPTO_TFM_REQ_MASK);
92 err = crypto_ahash_setkey(auth, key, authkeylen); 74 err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
93 crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) & 75 crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) &
94 CRYPTO_TFM_RES_MASK); 76 CRYPTO_TFM_RES_MASK);
95 77
@@ -99,7 +81,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
99 crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); 81 crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
100 crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) & 82 crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
101 CRYPTO_TFM_REQ_MASK); 83 CRYPTO_TFM_REQ_MASK);
102 err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen); 84 err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
103 crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) & 85 crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) &
104 CRYPTO_TFM_RES_MASK); 86 CRYPTO_TFM_RES_MASK);
105 87
@@ -247,7 +229,7 @@ static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *ar
247 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 229 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
248 authsize, 0); 230 authsize, 0);
249 231
250 err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 232 err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
251 if (err) 233 if (err)
252 goto out; 234 goto out;
253 235
@@ -296,7 +278,7 @@ static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *a
296 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 278 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
297 authsize, 0); 279 authsize, 0);
298 280
299 err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 281 err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
300 if (err) 282 if (err)
301 goto out; 283 goto out;
302 284
@@ -336,7 +318,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
336 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 318 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
337 authsize, 0); 319 authsize, 0);
338 320
339 err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 321 err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
340 if (err) 322 if (err)
341 goto out; 323 goto out;
342 324
@@ -568,7 +550,7 @@ static int crypto_authenc_esn_verify(struct aead_request *req)
568 ihash = ohash + authsize; 550 ihash = ohash + authsize;
569 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 551 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
570 authsize, 0); 552 authsize, 0);
571 return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; 553 return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
572} 554}
573 555
574static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv, 556static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 499c91717d93..1df84217f7c9 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -271,7 +271,8 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
271 } 271 }
272 272
273 /* compute plaintext into mac */ 273 /* compute plaintext into mac */
274 get_data_to_compute(cipher, pctx, plain, cryptlen); 274 if (cryptlen)
275 get_data_to_compute(cipher, pctx, plain, cryptlen);
275 276
276out: 277out:
277 return err; 278 return err;
@@ -363,7 +364,7 @@ static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
363 364
364 if (!err) { 365 if (!err) {
365 err = crypto_ccm_auth(req, req->dst, cryptlen); 366 err = crypto_ccm_auth(req, req->dst, cryptlen);
366 if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize)) 367 if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize))
367 err = -EBADMSG; 368 err = -EBADMSG;
368 } 369 }
369 aead_request_complete(req, err); 370 aead_request_complete(req, err);
@@ -422,7 +423,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
422 return err; 423 return err;
423 424
424 /* verify */ 425 /* verify */
425 if (memcmp(authtag, odata, authsize)) 426 if (crypto_memneq(authtag, odata, authsize))
426 return -EBADMSG; 427 return -EBADMSG;
427 428
428 return err; 429 return err;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 43e1fb05ea54..b4f017939004 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -582,7 +582,7 @@ static int crypto_gcm_verify(struct aead_request *req,
582 582
583 crypto_xor(auth_tag, iauth_tag, 16); 583 crypto_xor(auth_tag, iauth_tag, 16);
584 scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); 584 scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
585 return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; 585 return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
586} 586}
587 587
588static void gcm_decrypt_done(struct crypto_async_request *areq, int err) 588static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
diff --git a/crypto/memneq.c b/crypto/memneq.c
new file mode 100644
index 000000000000..cd0162221c14
--- /dev/null
+++ b/crypto/memneq.c
@@ -0,0 +1,138 @@
1/*
2 * Constant-time equality testing of memory regions.
3 *
4 * Authors:
5 *
6 * James Yonan <james@openvpn.net>
7 * Daniel Borkmann <dborkman@redhat.com>
8 *
9 * This file is provided under a dual BSD/GPLv2 license. When using or
10 * redistributing this file, you may do so under either license.
11 *
12 * GPL LICENSE SUMMARY
13 *
14 * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of version 2 of the GNU General Public License as
18 * published by the Free Software Foundation.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
28 * The full GNU General Public License is included in this distribution
29 * in the file called LICENSE.GPL.
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 *
39 * * Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * * Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in
43 * the documentation and/or other materials provided with the
44 * distribution.
45 * * Neither the name of OpenVPN Technologies nor the names of its
46 * contributors may be used to endorse or promote products derived
47 * from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
52 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
53 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
55 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
59 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 */
61
62#include <crypto/algapi.h>
63
64#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ
65
66/* Generic path for arbitrary size */
67static inline unsigned long
68__crypto_memneq_generic(const void *a, const void *b, size_t size)
69{
70 unsigned long neq = 0;
71
72#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
73 while (size >= sizeof(unsigned long)) {
74 neq |= *(unsigned long *)a ^ *(unsigned long *)b;
75 a += sizeof(unsigned long);
76 b += sizeof(unsigned long);
77 size -= sizeof(unsigned long);
78 }
79#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
80 while (size > 0) {
81 neq |= *(unsigned char *)a ^ *(unsigned char *)b;
82 a += 1;
83 b += 1;
84 size -= 1;
85 }
86 return neq;
87}
88
89/* Loop-free fast-path for frequently used 16-byte size */
90static inline unsigned long __crypto_memneq_16(const void *a, const void *b)
91{
92#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
93 if (sizeof(unsigned long) == 8)
94 return ((*(unsigned long *)(a) ^ *(unsigned long *)(b))
95 | (*(unsigned long *)(a+8) ^ *(unsigned long *)(b+8)));
96 else if (sizeof(unsigned int) == 4)
97 return ((*(unsigned int *)(a) ^ *(unsigned int *)(b))
98 | (*(unsigned int *)(a+4) ^ *(unsigned int *)(b+4))
99 | (*(unsigned int *)(a+8) ^ *(unsigned int *)(b+8))
100 | (*(unsigned int *)(a+12) ^ *(unsigned int *)(b+12)));
101 else
102#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
103 return ((*(unsigned char *)(a) ^ *(unsigned char *)(b))
104 | (*(unsigned char *)(a+1) ^ *(unsigned char *)(b+1))
105 | (*(unsigned char *)(a+2) ^ *(unsigned char *)(b+2))
106 | (*(unsigned char *)(a+3) ^ *(unsigned char *)(b+3))
107 | (*(unsigned char *)(a+4) ^ *(unsigned char *)(b+4))
108 | (*(unsigned char *)(a+5) ^ *(unsigned char *)(b+5))
109 | (*(unsigned char *)(a+6) ^ *(unsigned char *)(b+6))
110 | (*(unsigned char *)(a+7) ^ *(unsigned char *)(b+7))
111 | (*(unsigned char *)(a+8) ^ *(unsigned char *)(b+8))
112 | (*(unsigned char *)(a+9) ^ *(unsigned char *)(b+9))
113 | (*(unsigned char *)(a+10) ^ *(unsigned char *)(b+10))
114 | (*(unsigned char *)(a+11) ^ *(unsigned char *)(b+11))
115 | (*(unsigned char *)(a+12) ^ *(unsigned char *)(b+12))
116 | (*(unsigned char *)(a+13) ^ *(unsigned char *)(b+13))
117 | (*(unsigned char *)(a+14) ^ *(unsigned char *)(b+14))
118 | (*(unsigned char *)(a+15) ^ *(unsigned char *)(b+15)));
119}
120
121/* Compare two areas of memory without leaking timing information,
122 * and with special optimizations for common sizes. Users should
123 * not call this function directly, but should instead use
124 * crypto_memneq defined in crypto/algapi.h.
125 */
126noinline unsigned long __crypto_memneq(const void *a, const void *b,
127 size_t size)
128{
129 switch (size) {
130 case 16:
131 return __crypto_memneq_16(a, b);
132 default:
133 return __crypto_memneq_generic(a, b, size);
134 }
135}
136EXPORT_SYMBOL(__crypto_memneq);
137
138#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 1ab8258fcf56..001f07cdb828 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1242,6 +1242,10 @@ static int do_test(int m)
1242 ret += tcrypt_test("cmac(des3_ede)"); 1242 ret += tcrypt_test("cmac(des3_ede)");
1243 break; 1243 break;
1244 1244
1245 case 155:
1246 ret += tcrypt_test("authenc(hmac(sha1),cbc(aes))");
1247 break;
1248
1245 case 200: 1249 case 200:
1246 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, 1250 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
1247 speed_template_16_24_32); 1251 speed_template_16_24_32);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 432afc03e7c3..77955507f6f1 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -503,16 +503,16 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
503 goto out; 503 goto out;
504 } 504 }
505 505
506 sg_init_one(&sg[0], input,
507 template[i].ilen + (enc ? authsize : 0));
508
509 if (diff_dst) { 506 if (diff_dst) {
510 output = xoutbuf[0]; 507 output = xoutbuf[0];
511 output += align_offset; 508 output += align_offset;
509 sg_init_one(&sg[0], input, template[i].ilen);
512 sg_init_one(&sgout[0], output, 510 sg_init_one(&sgout[0], output,
511 template[i].rlen);
512 } else {
513 sg_init_one(&sg[0], input,
513 template[i].ilen + 514 template[i].ilen +
514 (enc ? authsize : 0)); 515 (enc ? authsize : 0));
515 } else {
516 output = input; 516 output = input;
517 } 517 }
518 518
@@ -612,12 +612,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
612 memcpy(q, template[i].input + temp, 612 memcpy(q, template[i].input + temp,
613 template[i].tap[k]); 613 template[i].tap[k]);
614 614
615 n = template[i].tap[k];
616 if (k == template[i].np - 1 && enc)
617 n += authsize;
618 if (offset_in_page(q) + n < PAGE_SIZE)
619 q[n] = 0;
620
621 sg_set_buf(&sg[k], q, template[i].tap[k]); 615 sg_set_buf(&sg[k], q, template[i].tap[k]);
622 616
623 if (diff_dst) { 617 if (diff_dst) {
@@ -625,13 +619,17 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
625 offset_in_page(IDX[k]); 619 offset_in_page(IDX[k]);
626 620
627 memset(q, 0, template[i].tap[k]); 621 memset(q, 0, template[i].tap[k]);
628 if (offset_in_page(q) + n < PAGE_SIZE)
629 q[n] = 0;
630 622
631 sg_set_buf(&sgout[k], q, 623 sg_set_buf(&sgout[k], q,
632 template[i].tap[k]); 624 template[i].tap[k]);
633 } 625 }
634 626
627 n = template[i].tap[k];
628 if (k == template[i].np - 1 && enc)
629 n += authsize;
630 if (offset_in_page(q) + n < PAGE_SIZE)
631 q[n] = 0;
632
635 temp += template[i].tap[k]; 633 temp += template[i].tap[k];
636 } 634 }
637 635
@@ -650,10 +648,10 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
650 goto out; 648 goto out;
651 } 649 }
652 650
653 sg[k - 1].length += authsize;
654
655 if (diff_dst) 651 if (diff_dst)
656 sgout[k - 1].length += authsize; 652 sgout[k - 1].length += authsize;
653 else
654 sg[k - 1].length += authsize;
657 } 655 }
658 656
659 sg_init_table(asg, template[i].anp); 657 sg_init_table(asg, template[i].anp);
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 5d9248526d78..4770de5707b9 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -348,7 +348,6 @@ source "drivers/acpi/apei/Kconfig"
348config ACPI_EXTLOG 348config ACPI_EXTLOG
349 tristate "Extended Error Log support" 349 tristate "Extended Error Log support"
350 depends on X86_MCE && X86_LOCAL_APIC 350 depends on X86_MCE && X86_LOCAL_APIC
351 select EFI
352 select UEFI_CPER 351 select UEFI_CPER
353 default n 352 default n
354 help 353 help
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 8711e3797165..3c2e4aa529c4 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -207,7 +207,7 @@ static int acpi_ac_probe(struct platform_device *pdev)
207 goto end; 207 goto end;
208 208
209 result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev), 209 result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
210 ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler, ac); 210 ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac);
211 if (result) { 211 if (result) {
212 power_supply_unregister(&ac->charger); 212 power_supply_unregister(&ac->charger);
213 goto end; 213 goto end;
@@ -255,7 +255,7 @@ static int acpi_ac_remove(struct platform_device *pdev)
255 return -EINVAL; 255 return -EINVAL;
256 256
257 acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), 257 acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
258 ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler); 258 ACPI_ALL_NOTIFY, acpi_ac_notify_handler);
259 259
260 ac = platform_get_drvdata(pdev); 260 ac = platform_get_drvdata(pdev);
261 if (ac->charger.dev) 261 if (ac->charger.dev)
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 6745fe137b9e..e60390597372 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -162,6 +162,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
162 { "80860F14", (unsigned long)&byt_sdio_dev_desc }, 162 { "80860F14", (unsigned long)&byt_sdio_dev_desc },
163 { "80860F41", (unsigned long)&byt_i2c_dev_desc }, 163 { "80860F41", (unsigned long)&byt_i2c_dev_desc },
164 { "INT33B2", }, 164 { "INT33B2", },
165 { "INT33FC", },
165 166
166 { "INT3430", (unsigned long)&lpt_dev_desc }, 167 { "INT3430", (unsigned long)&lpt_dev_desc },
167 { "INT3431", (unsigned long)&lpt_dev_desc }, 168 { "INT3431", (unsigned long)&lpt_dev_desc },
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index f691d0e4d9fa..ff97430455cb 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -184,7 +184,7 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
184 struct acpi_buffer *output_buffer); 184 struct acpi_buffer *output_buffer);
185 185
186acpi_status 186acpi_status
187acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer, 187acpi_rs_create_aml_resources(struct acpi_buffer *resource_list,
188 struct acpi_buffer *output_buffer); 188 struct acpi_buffer *output_buffer);
189 189
190acpi_status 190acpi_status
@@ -227,8 +227,8 @@ acpi_rs_get_list_length(u8 * aml_buffer,
227 u32 aml_buffer_length, acpi_size * size_needed); 227 u32 aml_buffer_length, acpi_size * size_needed);
228 228
229acpi_status 229acpi_status
230acpi_rs_get_aml_length(struct acpi_resource *linked_list_buffer, 230acpi_rs_get_aml_length(struct acpi_resource *resource_list,
231 acpi_size * size_needed); 231 acpi_size resource_list_size, acpi_size * size_needed);
232 232
233acpi_status 233acpi_status
234acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object, 234acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 243737363fb8..fd1ff54cda19 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -106,6 +106,7 @@ struct acpi_namespace_node *acpi_ns_create_node(u32 name)
106void acpi_ns_delete_node(struct acpi_namespace_node *node) 106void acpi_ns_delete_node(struct acpi_namespace_node *node)
107{ 107{
108 union acpi_operand_object *obj_desc; 108 union acpi_operand_object *obj_desc;
109 union acpi_operand_object *next_desc;
109 110
110 ACPI_FUNCTION_NAME(ns_delete_node); 111 ACPI_FUNCTION_NAME(ns_delete_node);
111 112
@@ -114,12 +115,13 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
114 acpi_ns_detach_object(node); 115 acpi_ns_detach_object(node);
115 116
116 /* 117 /*
117 * Delete an attached data object if present (an object that was created 118 * Delete an attached data object list if present (objects that were
118 * and attached via acpi_attach_data). Note: After any normal object is 119 * attached via acpi_attach_data). Note: After any normal object is
119 * detached above, the only possible remaining object is a data object. 120 * detached above, the only possible remaining object(s) are data
121 * objects, in a linked list.
120 */ 122 */
121 obj_desc = node->object; 123 obj_desc = node->object;
122 if (obj_desc && (obj_desc->common.type == ACPI_TYPE_LOCAL_DATA)) { 124 while (obj_desc && (obj_desc->common.type == ACPI_TYPE_LOCAL_DATA)) {
123 125
124 /* Invoke the attached data deletion handler if present */ 126 /* Invoke the attached data deletion handler if present */
125 127
@@ -127,7 +129,15 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
127 obj_desc->data.handler(node, obj_desc->data.pointer); 129 obj_desc->data.handler(node, obj_desc->data.pointer);
128 } 130 }
129 131
132 next_desc = obj_desc->common.next_object;
130 acpi_ut_remove_reference(obj_desc); 133 acpi_ut_remove_reference(obj_desc);
134 obj_desc = next_desc;
135 }
136
137 /* Special case for the statically allocated root node */
138
139 if (node == acpi_gbl_root_node) {
140 return;
131 } 141 }
132 142
133 /* Now we can delete the node */ 143 /* Now we can delete the node */
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index cc2fea94c5f0..4a0665b6bcc1 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -593,24 +593,26 @@ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle)
593 593
594void acpi_ns_terminate(void) 594void acpi_ns_terminate(void)
595{ 595{
596 union acpi_operand_object *obj_desc; 596 acpi_status status;
597 597
598 ACPI_FUNCTION_TRACE(ns_terminate); 598 ACPI_FUNCTION_TRACE(ns_terminate);
599 599
600 /* 600 /*
601 * 1) Free the entire namespace -- all nodes and objects 601 * Free the entire namespace -- all nodes and all objects
602 * 602 * attached to the nodes
603 * Delete all object descriptors attached to namepsace nodes
604 */ 603 */
605 acpi_ns_delete_namespace_subtree(acpi_gbl_root_node); 604 acpi_ns_delete_namespace_subtree(acpi_gbl_root_node);
606 605
607 /* Detach any objects attached to the root */ 606 /* Delete any objects attached to the root node */
608 607
609 obj_desc = acpi_ns_get_attached_object(acpi_gbl_root_node); 608 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
610 if (obj_desc) { 609 if (ACPI_FAILURE(status)) {
611 acpi_ns_detach_object(acpi_gbl_root_node); 610 return_VOID;
612 } 611 }
613 612
613 acpi_ns_delete_node(acpi_gbl_root_node);
614 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
615
614 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n")); 616 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n"));
615 return_VOID; 617 return_VOID;
616} 618}
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index b62a0f4f4f9b..b60c9cf82862 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -174,6 +174,7 @@ acpi_rs_stream_option_length(u32 resource_length,
174 * FUNCTION: acpi_rs_get_aml_length 174 * FUNCTION: acpi_rs_get_aml_length
175 * 175 *
176 * PARAMETERS: resource - Pointer to the resource linked list 176 * PARAMETERS: resource - Pointer to the resource linked list
177 * resource_list_size - Size of the resource linked list
177 * size_needed - Where the required size is returned 178 * size_needed - Where the required size is returned
178 * 179 *
179 * RETURN: Status 180 * RETURN: Status
@@ -185,16 +186,20 @@ acpi_rs_stream_option_length(u32 resource_length,
185 ******************************************************************************/ 186 ******************************************************************************/
186 187
187acpi_status 188acpi_status
188acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed) 189acpi_rs_get_aml_length(struct acpi_resource *resource,
190 acpi_size resource_list_size, acpi_size * size_needed)
189{ 191{
190 acpi_size aml_size_needed = 0; 192 acpi_size aml_size_needed = 0;
193 struct acpi_resource *resource_end;
191 acpi_rs_length total_size; 194 acpi_rs_length total_size;
192 195
193 ACPI_FUNCTION_TRACE(rs_get_aml_length); 196 ACPI_FUNCTION_TRACE(rs_get_aml_length);
194 197
195 /* Traverse entire list of internal resource descriptors */ 198 /* Traverse entire list of internal resource descriptors */
196 199
197 while (resource) { 200 resource_end =
201 ACPI_ADD_PTR(struct acpi_resource, resource, resource_list_size);
202 while (resource < resource_end) {
198 203
199 /* Validate the descriptor type */ 204 /* Validate the descriptor type */
200 205
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 65f3e1c5b598..3a2ace93e62c 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -418,22 +418,21 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
418 * 418 *
419 * FUNCTION: acpi_rs_create_aml_resources 419 * FUNCTION: acpi_rs_create_aml_resources
420 * 420 *
421 * PARAMETERS: linked_list_buffer - Pointer to the resource linked list 421 * PARAMETERS: resource_list - Pointer to the resource list buffer
422 * output_buffer - Pointer to the user's buffer 422 * output_buffer - Where the AML buffer is returned
423 * 423 *
424 * RETURN: Status AE_OK if okay, else a valid acpi_status code. 424 * RETURN: Status AE_OK if okay, else a valid acpi_status code.
425 * If the output_buffer is too small, the error will be 425 * If the output_buffer is too small, the error will be
426 * AE_BUFFER_OVERFLOW and output_buffer->Length will point 426 * AE_BUFFER_OVERFLOW and output_buffer->Length will point
427 * to the size buffer needed. 427 * to the size buffer needed.
428 * 428 *
429 * DESCRIPTION: Takes the linked list of device resources and 429 * DESCRIPTION: Converts a list of device resources to an AML bytestream
430 * creates a bytestream to be used as input for the 430 * to be used as input for the _SRS control method.
431 * _SRS control method.
432 * 431 *
433 ******************************************************************************/ 432 ******************************************************************************/
434 433
435acpi_status 434acpi_status
436acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer, 435acpi_rs_create_aml_resources(struct acpi_buffer *resource_list,
437 struct acpi_buffer *output_buffer) 436 struct acpi_buffer *output_buffer)
438{ 437{
439 acpi_status status; 438 acpi_status status;
@@ -441,16 +440,16 @@ acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
441 440
442 ACPI_FUNCTION_TRACE(rs_create_aml_resources); 441 ACPI_FUNCTION_TRACE(rs_create_aml_resources);
443 442
444 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "LinkedListBuffer = %p\n", 443 /* Params already validated, no need to re-validate here */
445 linked_list_buffer));
446 444
447 /* 445 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ResourceList Buffer = %p\n",
448 * Params already validated, so we don't re-validate here 446 resource_list->pointer));
449 * 447
450 * Pass the linked_list_buffer into a module that calculates 448 /* Get the buffer size needed for the AML byte stream */
451 * the buffer size needed for the byte stream. 449
452 */ 450 status = acpi_rs_get_aml_length(resource_list->pointer,
453 status = acpi_rs_get_aml_length(linked_list_buffer, &aml_size_needed); 451 resource_list->length,
452 &aml_size_needed);
454 453
455 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlSizeNeeded=%X, %s\n", 454 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlSizeNeeded=%X, %s\n",
456 (u32)aml_size_needed, acpi_format_exception(status))); 455 (u32)aml_size_needed, acpi_format_exception(status)));
@@ -467,10 +466,9 @@ acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
467 466
468 /* Do the conversion */ 467 /* Do the conversion */
469 468
470 status = 469 status = acpi_rs_convert_resources_to_aml(resource_list->pointer,
471 acpi_rs_convert_resources_to_aml(linked_list_buffer, 470 aml_size_needed,
472 aml_size_needed, 471 output_buffer->pointer);
473 output_buffer->pointer);
474 if (ACPI_FAILURE(status)) { 472 if (ACPI_FAILURE(status)) {
475 return_ACPI_STATUS(status); 473 return_ACPI_STATUS(status);
476 } 474 }
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index aef303d56d86..14a7982c9961 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -753,7 +753,7 @@ acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
753 * Convert the linked list into a byte stream 753 * Convert the linked list into a byte stream
754 */ 754 */
755 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; 755 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
756 status = acpi_rs_create_aml_resources(in_buffer->pointer, &buffer); 756 status = acpi_rs_create_aml_resources(in_buffer, &buffer);
757 if (ACPI_FAILURE(status)) { 757 if (ACPI_FAILURE(status)) {
758 goto cleanup; 758 goto cleanup;
759 } 759 }
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 1a67b3944b3b..03ae8affe48f 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -185,6 +185,7 @@ acpi_debug_print(u32 requested_debug_level,
185 } 185 }
186 186
187 acpi_gbl_prev_thread_id = thread_id; 187 acpi_gbl_prev_thread_id = thread_id;
188 acpi_gbl_nesting_level = 0;
188 } 189 }
189 190
190 /* 191 /*
@@ -193,13 +194,21 @@ acpi_debug_print(u32 requested_debug_level,
193 */ 194 */
194 acpi_os_printf("%9s-%04ld ", module_name, line_number); 195 acpi_os_printf("%9s-%04ld ", module_name, line_number);
195 196
197#ifdef ACPI_EXEC_APP
198 /*
199 * For acpi_exec only, emit the thread ID and nesting level.
200 * Note: nesting level is really only useful during a single-thread
201 * execution. Otherwise, multiple threads will keep resetting the
202 * level.
203 */
196 if (ACPI_LV_THREADS & acpi_dbg_level) { 204 if (ACPI_LV_THREADS & acpi_dbg_level) {
197 acpi_os_printf("[%u] ", (u32)thread_id); 205 acpi_os_printf("[%u] ", (u32)thread_id);
198 } 206 }
199 207
200 acpi_os_printf("[%02ld] %-22.22s: ", 208 acpi_os_printf("[%02ld] ", acpi_gbl_nesting_level);
201 acpi_gbl_nesting_level, 209#endif
202 acpi_ut_trim_function_name(function_name)); 210
211 acpi_os_printf("%-22.22s: ", acpi_ut_trim_function_name(function_name));
203 212
204 va_start(args, format); 213 va_start(args, format);
205 acpi_os_vprintf(format, args); 214 acpi_os_vprintf(format, args);
@@ -420,7 +429,9 @@ acpi_ut_exit(u32 line_number,
420 component_id, "%s\n", acpi_gbl_fn_exit_str); 429 component_id, "%s\n", acpi_gbl_fn_exit_str);
421 } 430 }
422 431
423 acpi_gbl_nesting_level--; 432 if (acpi_gbl_nesting_level) {
433 acpi_gbl_nesting_level--;
434 }
424} 435}
425 436
426ACPI_EXPORT_SYMBOL(acpi_ut_exit) 437ACPI_EXPORT_SYMBOL(acpi_ut_exit)
@@ -467,7 +478,9 @@ acpi_ut_status_exit(u32 line_number,
467 } 478 }
468 } 479 }
469 480
470 acpi_gbl_nesting_level--; 481 if (acpi_gbl_nesting_level) {
482 acpi_gbl_nesting_level--;
483 }
471} 484}
472 485
473ACPI_EXPORT_SYMBOL(acpi_ut_status_exit) 486ACPI_EXPORT_SYMBOL(acpi_ut_status_exit)
@@ -504,7 +517,9 @@ acpi_ut_value_exit(u32 line_number,
504 ACPI_FORMAT_UINT64(value)); 517 ACPI_FORMAT_UINT64(value));
505 } 518 }
506 519
507 acpi_gbl_nesting_level--; 520 if (acpi_gbl_nesting_level) {
521 acpi_gbl_nesting_level--;
522 }
508} 523}
509 524
510ACPI_EXPORT_SYMBOL(acpi_ut_value_exit) 525ACPI_EXPORT_SYMBOL(acpi_ut_value_exit)
@@ -540,7 +555,9 @@ acpi_ut_ptr_exit(u32 line_number,
540 ptr); 555 ptr);
541 } 556 }
542 557
543 acpi_gbl_nesting_level--; 558 if (acpi_gbl_nesting_level) {
559 acpi_gbl_nesting_level--;
560 }
544} 561}
545 562
546#endif 563#endif
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 786294bb682c..3650b2183227 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -2,7 +2,6 @@ config ACPI_APEI
2 bool "ACPI Platform Error Interface (APEI)" 2 bool "ACPI Platform Error Interface (APEI)"
3 select MISC_FILESYSTEMS 3 select MISC_FILESYSTEMS
4 select PSTORE 4 select PSTORE
5 select EFI
6 select UEFI_CPER 5 select UEFI_CPER
7 depends on X86 6 depends on X86
8 help 7 help
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 26311f23c824..cb1d557fc22c 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -942,6 +942,7 @@ static int erst_clearer(enum pstore_type_id type, u64 id, int count,
942static struct pstore_info erst_info = { 942static struct pstore_info erst_info = {
943 .owner = THIS_MODULE, 943 .owner = THIS_MODULE,
944 .name = "erst", 944 .name = "erst",
945 .flags = PSTORE_FLAGS_FRAGILE,
945 .open = erst_open_pstore, 946 .open = erst_open_pstore,
946 .close = erst_close_pstore, 947 .close = erst_close_pstore,
947 .read = erst_reader, 948 .read = erst_reader,
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index fbf1aceda8b8..5876a49dfd38 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -62,6 +62,7 @@ MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
62MODULE_DESCRIPTION("ACPI Battery Driver"); 62MODULE_DESCRIPTION("ACPI Battery Driver");
63MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
64 64
65static int battery_bix_broken_package;
65static unsigned int cache_time = 1000; 66static unsigned int cache_time = 1000;
66module_param(cache_time, uint, 0644); 67module_param(cache_time, uint, 0644);
67MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); 68MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -416,7 +417,12 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
416 ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name)); 417 ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name));
417 return -ENODEV; 418 return -ENODEV;
418 } 419 }
419 if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags)) 420
421 if (battery_bix_broken_package)
422 result = extract_package(battery, buffer.pointer,
423 extended_info_offsets + 1,
424 ARRAY_SIZE(extended_info_offsets) - 1);
425 else if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
420 result = extract_package(battery, buffer.pointer, 426 result = extract_package(battery, buffer.pointer,
421 extended_info_offsets, 427 extended_info_offsets,
422 ARRAY_SIZE(extended_info_offsets)); 428 ARRAY_SIZE(extended_info_offsets));
@@ -754,6 +760,17 @@ static int battery_notify(struct notifier_block *nb,
754 return 0; 760 return 0;
755} 761}
756 762
763static struct dmi_system_id bat_dmi_table[] = {
764 {
765 .ident = "NEC LZ750/LS",
766 .matches = {
767 DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
768 DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"),
769 },
770 },
771 {},
772};
773
757static int acpi_battery_add(struct acpi_device *device) 774static int acpi_battery_add(struct acpi_device *device)
758{ 775{
759 int result = 0; 776 int result = 0;
@@ -846,6 +863,9 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
846{ 863{
847 if (acpi_disabled) 864 if (acpi_disabled)
848 return; 865 return;
866
867 if (dmi_check_system(bat_dmi_table))
868 battery_bix_broken_package = 1;
849 acpi_bus_register_driver(&acpi_battery_driver); 869 acpi_bus_register_driver(&acpi_battery_driver);
850} 870}
851 871
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index bba9b72e25f8..0710004055c8 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -156,6 +156,16 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
156} 156}
157EXPORT_SYMBOL(acpi_bus_get_private_data); 157EXPORT_SYMBOL(acpi_bus_get_private_data);
158 158
159void acpi_bus_no_hotplug(acpi_handle handle)
160{
161 struct acpi_device *adev = NULL;
162
163 acpi_bus_get_device(handle, &adev);
164 if (adev)
165 adev->flags.no_hotplug = true;
166}
167EXPORT_SYMBOL_GPL(acpi_bus_no_hotplug);
168
159static void acpi_print_osc_error(acpi_handle handle, 169static void acpi_print_osc_error(acpi_handle handle,
160 struct acpi_osc_context *context, char *error) 170 struct acpi_osc_context *context, char *error)
161{ 171{
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
index 266bc58ce0ce..386a9fe497b4 100644
--- a/drivers/acpi/nvs.c
+++ b/drivers/acpi/nvs.c
@@ -13,7 +13,6 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/acpi.h> 14#include <linux/acpi.h>
15#include <linux/acpi_io.h> 15#include <linux/acpi_io.h>
16#include <acpi/acpiosxf.h>
17 16
18/* ACPI NVS regions, APEI may use it */ 17/* ACPI NVS regions, APEI may use it */
19 18
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 0703bff5e60e..20360e480bd8 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -65,6 +65,9 @@ static struct acpi_scan_handler pci_root_handler = {
65 .ids = root_device_ids, 65 .ids = root_device_ids,
66 .attach = acpi_pci_root_add, 66 .attach = acpi_pci_root_add,
67 .detach = acpi_pci_root_remove, 67 .detach = acpi_pci_root_remove,
68 .hotplug = {
69 .ignore = true,
70 },
68}; 71};
69 72
70static DEFINE_MUTEX(osc_lock); 73static DEFINE_MUTEX(osc_lock);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 15daa21fcd05..fd39459926b1 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1772,7 +1772,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type)
1772 */ 1772 */
1773 list_for_each_entry(hwid, &pnp.ids, list) { 1773 list_for_each_entry(hwid, &pnp.ids, list) {
1774 handler = acpi_scan_match_handler(hwid->id, NULL); 1774 handler = acpi_scan_match_handler(hwid->id, NULL);
1775 if (handler) { 1775 if (handler && !handler->hotplug.ignore) {
1776 acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, 1776 acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
1777 acpi_hotplug_notify_cb, handler); 1777 acpi_hotplug_notify_cb, handler);
1778 break; 1778 break;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 14df30580e15..721e949e606e 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -525,7 +525,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
525 * generate wakeup events. 525 * generate wakeup events.
526 */ 526 */
527 if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) { 527 if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
528 acpi_event_status pwr_btn_status; 528 acpi_event_status pwr_btn_status = ACPI_EVENT_FLAG_DISABLED;
529 529
530 acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status); 530 acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
531 531
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index db5293650f62..6dbc3ca45223 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -309,7 +309,7 @@ static void acpi_table_attr_init(struct acpi_table_attr *table_attr,
309 sprintf(table_attr->name + ACPI_NAME_SIZE, "%d", 309 sprintf(table_attr->name + ACPI_NAME_SIZE, "%d",
310 table_attr->instance); 310 table_attr->instance);
311 311
312 table_attr->attr.size = 0; 312 table_attr->attr.size = table_header->length;
313 table_attr->attr.read = acpi_table_show; 313 table_attr->attr.read = acpi_table_show;
314 table_attr->attr.attr.name = table_attr->name; 314 table_attr->attr.attr.name = table_attr->name;
315 table_attr->attr.attr.mode = 0400; 315 table_attr->attr.attr.mode = 0400;
@@ -354,8 +354,9 @@ static int acpi_tables_sysfs_init(void)
354{ 354{
355 struct acpi_table_attr *table_attr; 355 struct acpi_table_attr *table_attr;
356 struct acpi_table_header *table_header = NULL; 356 struct acpi_table_header *table_header = NULL;
357 int table_index = 0; 357 int table_index;
358 int result; 358 acpi_status status;
359 int ret;
359 360
360 tables_kobj = kobject_create_and_add("tables", acpi_kobj); 361 tables_kobj = kobject_create_and_add("tables", acpi_kobj);
361 if (!tables_kobj) 362 if (!tables_kobj)
@@ -365,33 +366,34 @@ static int acpi_tables_sysfs_init(void)
365 if (!dynamic_tables_kobj) 366 if (!dynamic_tables_kobj)
366 goto err_dynamic_tables; 367 goto err_dynamic_tables;
367 368
368 do { 369 for (table_index = 0;; table_index++) {
369 result = acpi_get_table_by_index(table_index, &table_header); 370 status = acpi_get_table_by_index(table_index, &table_header);
370 if (!result) { 371
371 table_index++; 372 if (status == AE_BAD_PARAMETER)
372 table_attr = NULL; 373 break;
373 table_attr = 374
374 kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL); 375 if (ACPI_FAILURE(status))
375 if (!table_attr) 376 continue;
376 return -ENOMEM; 377
377 378 table_attr = NULL;
378 acpi_table_attr_init(table_attr, table_header); 379 table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
379 result = 380 if (!table_attr)
380 sysfs_create_bin_file(tables_kobj, 381 return -ENOMEM;
381 &table_attr->attr); 382
382 if (result) { 383 acpi_table_attr_init(table_attr, table_header);
383 kfree(table_attr); 384 ret = sysfs_create_bin_file(tables_kobj, &table_attr->attr);
384 return result; 385 if (ret) {
385 } else 386 kfree(table_attr);
386 list_add_tail(&table_attr->node, 387 return ret;
387 &acpi_table_attr_list);
388 } 388 }
389 } while (!result); 389 list_add_tail(&table_attr->node, &acpi_table_attr_list);
390 }
391
390 kobject_uevent(tables_kobj, KOBJ_ADD); 392 kobject_uevent(tables_kobj, KOBJ_ADD);
391 kobject_uevent(dynamic_tables_kobj, KOBJ_ADD); 393 kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
392 result = acpi_install_table_handler(acpi_sysfs_table_handler, NULL); 394 status = acpi_install_table_handler(acpi_sysfs_table_handler, NULL);
393 395
394 return result == AE_OK ? 0 : -EINVAL; 396 return ACPI_FAILURE(status) ? -EINVAL : 0;
395err_dynamic_tables: 397err_dynamic_tables:
396 kobject_put(tables_kobj); 398 kobject_put(tables_kobj);
397err: 399err:
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index e2903d03180e..e3a92a6da39a 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -427,6 +427,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
427 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ 427 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
428 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125), 428 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125),
429 .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ 429 .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
430 { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178,
431 PCI_VENDOR_ID_MARVELL_EXT, 0x9170),
432 .driver_data = board_ahci_yes_fbs }, /* 88se9170 */
430 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a), 433 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
431 .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ 434 .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
432 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172), 435 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
@@ -435,6 +438,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
435 .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */ 438 .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
436 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3), 439 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
437 .driver_data = board_ahci_yes_fbs }, 440 .driver_data = board_ahci_yes_fbs },
441 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
442 .driver_data = board_ahci_yes_fbs },
438 443
439 /* Promise */ 444 /* Promise */
440 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ 445 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
@@ -1236,15 +1241,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1236 if (rc) 1241 if (rc)
1237 return rc; 1242 return rc;
1238 1243
1239 /* AHCI controllers often implement SFF compatible interface.
1240 * Grab all PCI BARs just in case.
1241 */
1242 rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
1243 if (rc == -EBUSY)
1244 pcim_pin_device(pdev);
1245 if (rc)
1246 return rc;
1247
1248 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 1244 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
1249 (pdev->device == 0x2652 || pdev->device == 0x2653)) { 1245 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
1250 u8 map; 1246 u8 map;
@@ -1261,6 +1257,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1261 } 1257 }
1262 } 1258 }
1263 1259
1260 /* AHCI controllers often implement SFF compatible interface.
1261 * Grab all PCI BARs just in case.
1262 */
1263 rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
1264 if (rc == -EBUSY)
1265 pcim_pin_device(pdev);
1266 if (rc)
1267 return rc;
1268
1264 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); 1269 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
1265 if (!hpriv) 1270 if (!hpriv)
1266 return -ENOMEM; 1271 return -ENOMEM;
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index ae2d73fe321e..3e23e9941dad 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -113,7 +113,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
113 /* 113 /*
114 * set PHY Paremeters, two steps to configure the GPR13, 114 * set PHY Paremeters, two steps to configure the GPR13,
115 * one write for rest of parameters, mask of first write 115 * one write for rest of parameters, mask of first write
116 * is 0x07fffffd, and the other one write for setting 116 * is 0x07ffffff, and the other one write for setting
117 * the mpll_clk_en. 117 * the mpll_clk_en.
118 */ 118 */
119 regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK 119 regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK
@@ -124,6 +124,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
124 | IMX6Q_GPR13_SATA_TX_ATTEN_MASK 124 | IMX6Q_GPR13_SATA_TX_ATTEN_MASK
125 | IMX6Q_GPR13_SATA_TX_BOOST_MASK 125 | IMX6Q_GPR13_SATA_TX_BOOST_MASK
126 | IMX6Q_GPR13_SATA_TX_LVL_MASK 126 | IMX6Q_GPR13_SATA_TX_LVL_MASK
127 | IMX6Q_GPR13_SATA_MPLL_CLK_EN
127 | IMX6Q_GPR13_SATA_TX_EDGE_RATE 128 | IMX6Q_GPR13_SATA_TX_EDGE_RATE
128 , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB 129 , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB
129 | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M 130 | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index f9554318504f..4b231baceb09 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -329,6 +329,7 @@ static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume);
329static const struct of_device_id ahci_of_match[] = { 329static const struct of_device_id ahci_of_match[] = {
330 { .compatible = "snps,spear-ahci", }, 330 { .compatible = "snps,spear-ahci", },
331 { .compatible = "snps,exynos5440-ahci", }, 331 { .compatible = "snps,exynos5440-ahci", },
332 { .compatible = "ibm,476gtr-ahci", },
332 {}, 333 {},
333}; 334};
334MODULE_DEVICE_TABLE(of, ahci_of_match); 335MODULE_DEVICE_TABLE(of, ahci_of_match);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 81a94a3919db..1393a5890ed5 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2149,9 +2149,16 @@ static int ata_dev_config_ncq(struct ata_device *dev,
2149 "failed to get NCQ Send/Recv Log Emask 0x%x\n", 2149 "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2150 err_mask); 2150 err_mask);
2151 } else { 2151 } else {
2152 u8 *cmds = dev->ncq_send_recv_cmds;
2153
2152 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; 2154 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2153 memcpy(dev->ncq_send_recv_cmds, ap->sector_buf, 2155 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2154 ATA_LOG_NCQ_SEND_RECV_SIZE); 2156
2157 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2158 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2159 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2160 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2161 }
2155 } 2162 }
2156 } 2163 }
2157 2164
@@ -4156,6 +4163,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4156 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4163 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4157 ATA_HORKAGE_FIRMWARE_WARN }, 4164 ATA_HORKAGE_FIRMWARE_WARN },
4158 4165
4166 /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
4167 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4168
4159 /* Blacklist entries taken from Silicon Image 3124/3132 4169 /* Blacklist entries taken from Silicon Image 3124/3132
4160 Windows driver .inf file - also several Linux problem reports */ 4170 Windows driver .inf file - also several Linux problem reports */
4161 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4171 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
@@ -4202,6 +4212,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4202 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, 4212 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4203 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4213 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4204 4214
4215 /* devices that don't properly handle queued TRIM commands */
4216 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4217 { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4218
4205 /* End Marker */ 4219 /* End Marker */
4206 { } 4220 { }
4207}; 4221};
@@ -6304,10 +6318,9 @@ static void ata_port_detach(struct ata_port *ap)
6304 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 6318 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6305 ata_tlink_delete(&ap->pmp_link[i]); 6319 ata_tlink_delete(&ap->pmp_link[i]);
6306 } 6320 }
6307 ata_tport_delete(ap);
6308
6309 /* remove the associated SCSI host */ 6321 /* remove the associated SCSI host */
6310 scsi_remove_host(ap->scsi_host); 6322 scsi_remove_host(ap->scsi_host);
6323 ata_tport_delete(ap);
6311} 6324}
6312 6325
6313/** 6326/**
@@ -6520,6 +6533,7 @@ static int __init ata_parse_force_one(char **cur,
6520 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6533 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6521 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, 6534 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
6522 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, 6535 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
6536 { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
6523 }; 6537 };
6524 char *start = *cur, *p = *cur; 6538 char *start = *cur, *p = *cur;
6525 char *id, *val, *endp; 6539 char *id, *val, *endp;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index db6dfcfa3e2e..377eb889f555 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3625,6 +3625,7 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
3625 shost->max_lun = 1; 3625 shost->max_lun = 1;
3626 shost->max_channel = 1; 3626 shost->max_channel = 1;
3627 shost->max_cmd_len = 16; 3627 shost->max_cmd_len = 16;
3628 shost->no_write_same = 1;
3628 3629
3629 /* Schedule policy is determined by ->qc_defer() 3630 /* Schedule policy is determined by ->qc_defer()
3630 * callback and it needs to see every deferred qc. 3631 * callback and it needs to see every deferred qc.
@@ -3871,6 +3872,27 @@ void ata_scsi_hotplug(struct work_struct *work)
3871 return; 3872 return;
3872 } 3873 }
3873 3874
3875 /*
3876 * XXX - UGLY HACK
3877 *
3878 * The block layer suspend/resume path is fundamentally broken due
3879 * to freezable kthreads and workqueue and may deadlock if a block
3880 * device gets removed while resume is in progress. I don't know
3881 * what the solution is short of removing freezable kthreads and
3882 * workqueues altogether.
3883 *
3884 * The following is an ugly hack to avoid kicking off device
3885 * removal while freezer is active. This is a joke but does avoid
3886 * this particular deadlock scenario.
3887 *
3888 * https://bugzilla.kernel.org/show_bug.cgi?id=62801
3889 * http://marc.info/?l=linux-kernel&m=138695698516487
3890 */
3891#ifdef CONFIG_FREEZER
3892 while (pm_freezing)
3893 msleep(10);
3894#endif
3895
3874 DPRINTK("ENTER\n"); 3896 DPRINTK("ENTER\n");
3875 mutex_lock(&ap->scsi_scan_mutex); 3897 mutex_lock(&ap->scsi_scan_mutex);
3876 3898
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 68f9e3293e9c..88949c6d55dd 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -88,15 +88,13 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
88static bool odd_can_poweroff(struct ata_device *ata_dev) 88static bool odd_can_poweroff(struct ata_device *ata_dev)
89{ 89{
90 acpi_handle handle; 90 acpi_handle handle;
91 acpi_status status;
92 struct acpi_device *acpi_dev; 91 struct acpi_device *acpi_dev;
93 92
94 handle = ata_dev_acpi_handle(ata_dev); 93 handle = ata_dev_acpi_handle(ata_dev);
95 if (!handle) 94 if (!handle)
96 return false; 95 return false;
97 96
98 status = acpi_bus_get_device(handle, &acpi_dev); 97 if (acpi_bus_get_device(handle, &acpi_dev))
99 if (ACPI_FAILURE(status))
100 return false; 98 return false;
101 99
102 return acpi_device_can_poweroff(acpi_dev); 100 return acpi_device_can_poweroff(acpi_dev);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index e88690ebfd82..73492dd4a4bc 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -319,6 +319,7 @@ static int cf_init(struct arasan_cf_dev *acdev)
319 ret = clk_set_rate(acdev->clk, 166000000); 319 ret = clk_set_rate(acdev->clk, 166000000);
320 if (ret) { 320 if (ret) {
321 dev_warn(acdev->host->dev, "clock set rate failed"); 321 dev_warn(acdev->host->dev, "clock set rate failed");
322 clk_disable_unprepare(acdev->clk);
322 return ret; 323 return ret;
323 } 324 }
324 325
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index fe3ca0989b14..1ad2f62d34b9 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -83,6 +83,10 @@ static struct pci_driver sis_pci_driver = {
83 .id_table = sis_pci_tbl, 83 .id_table = sis_pci_tbl,
84 .probe = sis_init_one, 84 .probe = sis_init_one,
85 .remove = ata_pci_remove_one, 85 .remove = ata_pci_remove_one,
86#ifdef CONFIG_PM
87 .suspend = ata_pci_device_suspend,
88 .resume = ata_pci_device_resume,
89#endif
86}; 90};
87 91
88static struct scsi_host_template sis_sht = { 92static struct scsi_host_template sis_sht = {
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 98745dd77e8c..81f977510775 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -40,7 +40,7 @@ static int regmap_mmio_gather_write(void *context,
40 40
41 BUG_ON(reg_size != 4); 41 BUG_ON(reg_size != 4);
42 42
43 if (ctx->clk) { 43 if (!IS_ERR(ctx->clk)) {
44 ret = clk_enable(ctx->clk); 44 ret = clk_enable(ctx->clk);
45 if (ret < 0) 45 if (ret < 0)
46 return ret; 46 return ret;
@@ -73,7 +73,7 @@ static int regmap_mmio_gather_write(void *context,
73 offset += ctx->val_bytes; 73 offset += ctx->val_bytes;
74 } 74 }
75 75
76 if (ctx->clk) 76 if (!IS_ERR(ctx->clk))
77 clk_disable(ctx->clk); 77 clk_disable(ctx->clk);
78 78
79 return 0; 79 return 0;
@@ -96,7 +96,7 @@ static int regmap_mmio_read(void *context,
96 96
97 BUG_ON(reg_size != 4); 97 BUG_ON(reg_size != 4);
98 98
99 if (ctx->clk) { 99 if (!IS_ERR(ctx->clk)) {
100 ret = clk_enable(ctx->clk); 100 ret = clk_enable(ctx->clk);
101 if (ret < 0) 101 if (ret < 0)
102 return ret; 102 return ret;
@@ -129,7 +129,7 @@ static int regmap_mmio_read(void *context,
129 offset += ctx->val_bytes; 129 offset += ctx->val_bytes;
130 } 130 }
131 131
132 if (ctx->clk) 132 if (!IS_ERR(ctx->clk))
133 clk_disable(ctx->clk); 133 clk_disable(ctx->clk);
134 134
135 return 0; 135 return 0;
@@ -139,7 +139,7 @@ static void regmap_mmio_free_context(void *context)
139{ 139{
140 struct regmap_mmio_context *ctx = context; 140 struct regmap_mmio_context *ctx = context;
141 141
142 if (ctx->clk) { 142 if (!IS_ERR(ctx->clk)) {
143 clk_unprepare(ctx->clk); 143 clk_unprepare(ctx->clk);
144 clk_put(ctx->clk); 144 clk_put(ctx->clk);
145 } 145 }
@@ -209,6 +209,7 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
209 209
210 ctx->regs = regs; 210 ctx->regs = regs;
211 ctx->val_bytes = config->val_bits / 8; 211 ctx->val_bytes = config->val_bits / 8;
212 ctx->clk = ERR_PTR(-ENODEV);
212 213
213 if (clk_id == NULL) 214 if (clk_id == NULL)
214 return ctx; 215 return ctx;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 9c021d9cace0..c2e002100949 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1549,7 +1549,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1549 val + (i * val_bytes), 1549 val + (i * val_bytes),
1550 val_bytes); 1550 val_bytes);
1551 if (ret != 0) 1551 if (ret != 0)
1552 return ret; 1552 goto out;
1553 } 1553 }
1554 } else { 1554 } else {
1555 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); 1555 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
@@ -1743,7 +1743,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
1743/** 1743/**
1744 * regmap_read(): Read a value from a single register 1744 * regmap_read(): Read a value from a single register
1745 * 1745 *
1746 * @map: Register map to write to 1746 * @map: Register map to read from
1747 * @reg: Register to be read from 1747 * @reg: Register to be read from
1748 * @val: Pointer to store read value 1748 * @val: Pointer to store read value
1749 * 1749 *
@@ -1770,7 +1770,7 @@ EXPORT_SYMBOL_GPL(regmap_read);
1770/** 1770/**
1771 * regmap_raw_read(): Read raw data from the device 1771 * regmap_raw_read(): Read raw data from the device
1772 * 1772 *
1773 * @map: Register map to write to 1773 * @map: Register map to read from
1774 * @reg: First register to be read from 1774 * @reg: First register to be read from
1775 * @val: Pointer to store read value 1775 * @val: Pointer to store read value
1776 * @val_len: Size of data to read 1776 * @val_len: Size of data to read
@@ -1882,7 +1882,7 @@ EXPORT_SYMBOL_GPL(regmap_fields_read);
1882/** 1882/**
1883 * regmap_bulk_read(): Read multiple registers from the device 1883 * regmap_bulk_read(): Read multiple registers from the device
1884 * 1884 *
1885 * @map: Register map to write to 1885 * @map: Register map to read from
1886 * @reg: First register to be read from 1886 * @reg: First register to be read from
1887 * @val: Pointer to store read value, in native register size for device 1887 * @val: Pointer to store read value, in native register size for device
1888 * @val_count: Number of registers to read 1888 * @val_count: Number of registers to read
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index ea192ec029c4..83a598ebb65a 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -1,4 +1,5 @@
1#include <linux/module.h> 1#include <linux/module.h>
2
2#include <linux/moduleparam.h> 3#include <linux/moduleparam.h>
3#include <linux/sched.h> 4#include <linux/sched.h>
4#include <linux/fs.h> 5#include <linux/fs.h>
@@ -65,7 +66,7 @@ enum {
65 NULL_Q_MQ = 2, 66 NULL_Q_MQ = 2,
66}; 67};
67 68
68static int submit_queues = 1; 69static int submit_queues;
69module_param(submit_queues, int, S_IRUGO); 70module_param(submit_queues, int, S_IRUGO);
70MODULE_PARM_DESC(submit_queues, "Number of submission queues"); 71MODULE_PARM_DESC(submit_queues, "Number of submission queues");
71 72
@@ -101,9 +102,9 @@ static int hw_queue_depth = 64;
101module_param(hw_queue_depth, int, S_IRUGO); 102module_param(hw_queue_depth, int, S_IRUGO);
102MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); 103MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
103 104
104static bool use_per_node_hctx = true; 105static bool use_per_node_hctx = false;
105module_param(use_per_node_hctx, bool, S_IRUGO); 106module_param(use_per_node_hctx, bool, S_IRUGO);
106MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true"); 107MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
107 108
108static void put_tag(struct nullb_queue *nq, unsigned int tag) 109static void put_tag(struct nullb_queue *nq, unsigned int tag)
109{ 110{
@@ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
346 347
347static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) 348static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index)
348{ 349{
349 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, 350 int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes);
350 hctx_index); 351 int tip = (reg->nr_hw_queues % nr_online_nodes);
352 int node = 0, i, n;
353
354 /*
355 * Split submit queues evenly wrt to the number of nodes. If uneven,
356 * fill the first buckets with one extra, until the rest is filled with
357 * no extra.
358 */
359 for (i = 0, n = 1; i < hctx_index; i++, n++) {
360 if (n % b_size == 0) {
361 n = 0;
362 node++;
363
364 tip--;
365 if (!tip)
366 b_size = reg->nr_hw_queues / nr_online_nodes;
367 }
368 }
369
370 /*
371 * A node might not be online, therefore map the relative node id to the
372 * real node id.
373 */
374 for_each_online_node(n) {
375 if (!node)
376 break;
377 node--;
378 }
379
380 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n);
351} 381}
352 382
353static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) 383static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
@@ -355,16 +385,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
355 kfree(hctx); 385 kfree(hctx);
356} 386}
357 387
388static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
389{
390 BUG_ON(!nullb);
391 BUG_ON(!nq);
392
393 init_waitqueue_head(&nq->wait);
394 nq->queue_depth = nullb->queue_depth;
395}
396
358static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 397static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
359 unsigned int index) 398 unsigned int index)
360{ 399{
361 struct nullb *nullb = data; 400 struct nullb *nullb = data;
362 struct nullb_queue *nq = &nullb->queues[index]; 401 struct nullb_queue *nq = &nullb->queues[index];
363 402
364 init_waitqueue_head(&nq->wait);
365 nq->queue_depth = nullb->queue_depth;
366 nullb->nr_queues++;
367 hctx->driver_data = nq; 403 hctx->driver_data = nq;
404 null_init_queue(nullb, nq);
405 nullb->nr_queues++;
368 406
369 return 0; 407 return 0;
370} 408}
@@ -387,10 +425,7 @@ static void null_del_dev(struct nullb *nullb)
387 list_del_init(&nullb->list); 425 list_del_init(&nullb->list);
388 426
389 del_gendisk(nullb->disk); 427 del_gendisk(nullb->disk);
390 if (queue_mode == NULL_Q_MQ) 428 blk_cleanup_queue(nullb->q);
391 blk_mq_free_queue(nullb->q);
392 else
393 blk_cleanup_queue(nullb->q);
394 put_disk(nullb->disk); 429 put_disk(nullb->disk);
395 kfree(nullb); 430 kfree(nullb);
396} 431}
@@ -417,13 +452,13 @@ static int setup_commands(struct nullb_queue *nq)
417 452
418 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); 453 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
419 if (!nq->cmds) 454 if (!nq->cmds)
420 return 1; 455 return -ENOMEM;
421 456
422 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; 457 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
423 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); 458 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
424 if (!nq->tag_map) { 459 if (!nq->tag_map) {
425 kfree(nq->cmds); 460 kfree(nq->cmds);
426 return 1; 461 return -ENOMEM;
427 } 462 }
428 463
429 for (i = 0; i < nq->queue_depth; i++) { 464 for (i = 0; i < nq->queue_depth; i++) {
@@ -454,33 +489,37 @@ static void cleanup_queues(struct nullb *nullb)
454 489
455static int setup_queues(struct nullb *nullb) 490static int setup_queues(struct nullb *nullb)
456{ 491{
457 struct nullb_queue *nq; 492 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
458 int i; 493 GFP_KERNEL);
459
460 nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL);
461 if (!nullb->queues) 494 if (!nullb->queues)
462 return 1; 495 return -ENOMEM;
463 496
464 nullb->nr_queues = 0; 497 nullb->nr_queues = 0;
465 nullb->queue_depth = hw_queue_depth; 498 nullb->queue_depth = hw_queue_depth;
466 499
467 if (queue_mode == NULL_Q_MQ) 500 return 0;
468 return 0; 501}
502
503static int init_driver_queues(struct nullb *nullb)
504{
505 struct nullb_queue *nq;
506 int i, ret = 0;
469 507
470 for (i = 0; i < submit_queues; i++) { 508 for (i = 0; i < submit_queues; i++) {
471 nq = &nullb->queues[i]; 509 nq = &nullb->queues[i];
472 init_waitqueue_head(&nq->wait); 510
473 nq->queue_depth = hw_queue_depth; 511 null_init_queue(nullb, nq);
474 if (setup_commands(nq)) 512
475 break; 513 ret = setup_commands(nq);
514 if (ret)
515 goto err_queue;
476 nullb->nr_queues++; 516 nullb->nr_queues++;
477 } 517 }
478 518
479 if (i == submit_queues) 519 return 0;
480 return 0; 520err_queue:
481
482 cleanup_queues(nullb); 521 cleanup_queues(nullb);
483 return 1; 522 return ret;
484} 523}
485 524
486static int null_add_dev(void) 525static int null_add_dev(void)
@@ -495,34 +534,36 @@ static int null_add_dev(void)
495 534
496 spin_lock_init(&nullb->lock); 535 spin_lock_init(&nullb->lock);
497 536
537 if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
538 submit_queues = nr_online_nodes;
539
498 if (setup_queues(nullb)) 540 if (setup_queues(nullb))
499 goto err; 541 goto err;
500 542
501 if (queue_mode == NULL_Q_MQ) { 543 if (queue_mode == NULL_Q_MQ) {
502 null_mq_reg.numa_node = home_node; 544 null_mq_reg.numa_node = home_node;
503 null_mq_reg.queue_depth = hw_queue_depth; 545 null_mq_reg.queue_depth = hw_queue_depth;
546 null_mq_reg.nr_hw_queues = submit_queues;
504 547
505 if (use_per_node_hctx) { 548 if (use_per_node_hctx) {
506 null_mq_reg.ops->alloc_hctx = null_alloc_hctx; 549 null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
507 null_mq_reg.ops->free_hctx = null_free_hctx; 550 null_mq_reg.ops->free_hctx = null_free_hctx;
508
509 null_mq_reg.nr_hw_queues = nr_online_nodes;
510 } else { 551 } else {
511 null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; 552 null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
512 null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; 553 null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;
513
514 null_mq_reg.nr_hw_queues = submit_queues;
515 } 554 }
516 555
517 nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); 556 nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
518 } else if (queue_mode == NULL_Q_BIO) { 557 } else if (queue_mode == NULL_Q_BIO) {
519 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); 558 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
520 blk_queue_make_request(nullb->q, null_queue_bio); 559 blk_queue_make_request(nullb->q, null_queue_bio);
560 init_driver_queues(nullb);
521 } else { 561 } else {
522 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); 562 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
523 blk_queue_prep_rq(nullb->q, null_rq_prep_fn); 563 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
524 if (nullb->q) 564 if (nullb->q)
525 blk_queue_softirq_done(nullb->q, null_softirq_done_fn); 565 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
566 init_driver_queues(nullb);
526 } 567 }
527 568
528 if (!nullb->q) 569 if (!nullb->q)
@@ -534,10 +575,7 @@ static int null_add_dev(void)
534 disk = nullb->disk = alloc_disk_node(1, home_node); 575 disk = nullb->disk = alloc_disk_node(1, home_node);
535 if (!disk) { 576 if (!disk) {
536queue_fail: 577queue_fail:
537 if (queue_mode == NULL_Q_MQ) 578 blk_cleanup_queue(nullb->q);
538 blk_mq_free_queue(nullb->q);
539 else
540 blk_cleanup_queue(nullb->q);
541 cleanup_queues(nullb); 579 cleanup_queues(nullb);
542err: 580err:
543 kfree(nullb); 581 kfree(nullb);
@@ -579,7 +617,13 @@ static int __init null_init(void)
579 } 617 }
580#endif 618#endif
581 619
582 if (submit_queues > nr_cpu_ids) 620 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
621 if (submit_queues < nr_online_nodes) {
622 pr_warn("null_blk: submit_queues param is set to %u.",
623 nr_online_nodes);
624 submit_queues = nr_online_nodes;
625 }
626 } else if (submit_queues > nr_cpu_ids)
583 submit_queues = nr_cpu_ids; 627 submit_queues = nr_cpu_ids;
584 else if (!submit_queues) 628 else if (!submit_queues)
585 submit_queues = 1; 629 submit_queues = 1;
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 9199c93be926..eb6e1e0e8db2 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -5269,7 +5269,7 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state)
5269 } 5269 }
5270} 5270}
5271 5271
5272const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) 5272static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5273{ 5273{
5274 switch (state) { 5274 switch (state) {
5275 case SKD_MSG_STATE_IDLE: 5275 case SKD_MSG_STATE_IDLE:
@@ -5281,7 +5281,7 @@ const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5281 } 5281 }
5282} 5282}
5283 5283
5284const char *skd_skreq_state_to_str(enum skd_req_state state) 5284static const char *skd_skreq_state_to_str(enum skd_req_state state)
5285{ 5285{
5286 switch (state) { 5286 switch (state) {
5287 case SKD_REQ_STATE_IDLE: 5287 case SKD_REQ_STATE_IDLE:
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 432db1b59b00..c4a4c9006288 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -489,7 +489,7 @@ static int blkif_queue_request(struct request *req)
489 489
490 if ((ring_req->operation == BLKIF_OP_INDIRECT) && 490 if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
491 (i % SEGS_PER_INDIRECT_FRAME == 0)) { 491 (i % SEGS_PER_INDIRECT_FRAME == 0)) {
492 unsigned long pfn; 492 unsigned long uninitialized_var(pfn);
493 493
494 if (segments) 494 if (segments)
495 kunmap_atomic(segments); 495 kunmap_atomic(segments);
@@ -2011,6 +2011,10 @@ static void blkif_release(struct gendisk *disk, fmode_t mode)
2011 2011
2012 bdev = bdget_disk(disk, 0); 2012 bdev = bdget_disk(disk, 0);
2013 2013
2014 if (!bdev) {
2015 WARN(1, "Block device %s yanked out from us!\n", disk->disk_name);
2016 goto out_mutex;
2017 }
2014 if (bdev->bd_openers) 2018 if (bdev->bd_openers)
2015 goto out; 2019 goto out;
2016 2020
@@ -2041,6 +2045,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode)
2041 2045
2042out: 2046out:
2043 bdput(bdev); 2047 bdput(bdev);
2048out_mutex:
2044 mutex_unlock(&blkfront_mutex); 2049 mutex_unlock(&blkfront_mutex);
2045} 2050}
2046 2051
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 6bfc1bb318f6..dceb85f8d9a8 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -87,6 +87,7 @@ static const struct usb_device_id ath3k_table[] = {
87 { USB_DEVICE(0x0CF3, 0xE004) }, 87 { USB_DEVICE(0x0CF3, 0xE004) },
88 { USB_DEVICE(0x0CF3, 0xE005) }, 88 { USB_DEVICE(0x0CF3, 0xE005) },
89 { USB_DEVICE(0x0930, 0x0219) }, 89 { USB_DEVICE(0x0930, 0x0219) },
90 { USB_DEVICE(0x0930, 0x0220) },
90 { USB_DEVICE(0x0489, 0xe057) }, 91 { USB_DEVICE(0x0489, 0xe057) },
91 { USB_DEVICE(0x13d3, 0x3393) }, 92 { USB_DEVICE(0x13d3, 0x3393) },
92 { USB_DEVICE(0x0489, 0xe04e) }, 93 { USB_DEVICE(0x0489, 0xe04e) },
@@ -129,6 +130,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
129 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 130 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
130 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, 131 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
131 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 132 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
133 { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
132 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 134 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
133 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 135 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
134 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, 136 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index c0ff34f2d2df..3980fd18f6ea 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -154,6 +154,7 @@ static const struct usb_device_id blacklist_table[] = {
154 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 154 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
155 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, 155 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
156 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 156 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
157 { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
157 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 158 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
158 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 159 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
159 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, 160 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index c206de2951f2..2f2b08457c67 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -165,6 +165,19 @@ config HW_RANDOM_OMAP
165 165
166 If unsure, say Y. 166 If unsure, say Y.
167 167
168config HW_RANDOM_OMAP3_ROM
169 tristate "OMAP3 ROM Random Number Generator support"
170 depends on HW_RANDOM && ARCH_OMAP3
171 default HW_RANDOM
172 ---help---
173 This driver provides kernel-side support for the Random Number
174 Generator hardware found on OMAP34xx processors.
175
176 To compile this driver as a module, choose M here: the
177 module will be called omap3-rom-rng.
178
179 If unsure, say Y.
180
168config HW_RANDOM_OCTEON 181config HW_RANDOM_OCTEON
169 tristate "Octeon Random Number Generator support" 182 tristate "Octeon Random Number Generator support"
170 depends on HW_RANDOM && CAVIUM_OCTEON_SOC 183 depends on HW_RANDOM && CAVIUM_OCTEON_SOC
@@ -327,3 +340,15 @@ config HW_RANDOM_TPM
327 module will be called tpm-rng. 340 module will be called tpm-rng.
328 341
329 If unsure, say Y. 342 If unsure, say Y.
343
344config HW_RANDOM_MSM
345 tristate "Qualcomm MSM Random Number Generator support"
346 depends on HW_RANDOM && ARCH_MSM
347 ---help---
348 This driver provides kernel-side support for the Random Number
349 Generator hardware found on Qualcomm MSM SoCs.
350
351 To compile this driver as a module, choose M here. the
352 module will be called msm-rng.
353
354 If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index d7d2435ff7fa..3ae7755a52e7 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -15,6 +15,7 @@ n2-rng-y := n2-drv.o n2-asm.o
15obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o 15obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
16obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o 16obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
17obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o 17obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
18obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o
18obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o 19obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
19obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o 20obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
20obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o 21obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
@@ -28,3 +29,4 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
28obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o 29obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
29obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o 30obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
30obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o 31obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
32obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
new file mode 100644
index 000000000000..148521e51dc6
--- /dev/null
+++ b/drivers/char/hw_random/msm-rng.c
@@ -0,0 +1,197 @@
1/*
2 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/clk.h>
15#include <linux/err.h>
16#include <linux/hw_random.h>
17#include <linux/io.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/platform_device.h>
21
22/* Device specific register offsets */
23#define PRNG_DATA_OUT 0x0000
24#define PRNG_STATUS 0x0004
25#define PRNG_LFSR_CFG 0x0100
26#define PRNG_CONFIG 0x0104
27
28/* Device specific register masks and config values */
29#define PRNG_LFSR_CFG_MASK 0x0000ffff
30#define PRNG_LFSR_CFG_CLOCKS 0x0000dddd
31#define PRNG_CONFIG_HW_ENABLE BIT(1)
32#define PRNG_STATUS_DATA_AVAIL BIT(0)
33
34#define MAX_HW_FIFO_DEPTH 16
35#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4)
36#define WORD_SZ 4
37
38struct msm_rng {
39 void __iomem *base;
40 struct clk *clk;
41 struct hwrng hwrng;
42};
43
44#define to_msm_rng(p) container_of(p, struct msm_rng, hwrng)
45
46static int msm_rng_enable(struct hwrng *hwrng, int enable)
47{
48 struct msm_rng *rng = to_msm_rng(hwrng);
49 u32 val;
50 int ret;
51
52 ret = clk_prepare_enable(rng->clk);
53 if (ret)
54 return ret;
55
56 if (enable) {
57 /* Enable PRNG only if it is not already enabled */
58 val = readl_relaxed(rng->base + PRNG_CONFIG);
59 if (val & PRNG_CONFIG_HW_ENABLE)
60 goto already_enabled;
61
62 val = readl_relaxed(rng->base + PRNG_LFSR_CFG);
63 val &= ~PRNG_LFSR_CFG_MASK;
64 val |= PRNG_LFSR_CFG_CLOCKS;
65 writel(val, rng->base + PRNG_LFSR_CFG);
66
67 val = readl_relaxed(rng->base + PRNG_CONFIG);
68 val |= PRNG_CONFIG_HW_ENABLE;
69 writel(val, rng->base + PRNG_CONFIG);
70 } else {
71 val = readl_relaxed(rng->base + PRNG_CONFIG);
72 val &= ~PRNG_CONFIG_HW_ENABLE;
73 writel(val, rng->base + PRNG_CONFIG);
74 }
75
76already_enabled:
77 clk_disable_unprepare(rng->clk);
78 return 0;
79}
80
81static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
82{
83 struct msm_rng *rng = to_msm_rng(hwrng);
84 size_t currsize = 0;
85 u32 *retdata = data;
86 size_t maxsize;
87 int ret;
88 u32 val;
89
90 /* calculate max size bytes to transfer back to caller */
91 maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max);
92
93 /* no room for word data */
94 if (maxsize < WORD_SZ)
95 return 0;
96
97 ret = clk_prepare_enable(rng->clk);
98 if (ret)
99 return ret;
100
101 /* read random data from hardware */
102 do {
103 val = readl_relaxed(rng->base + PRNG_STATUS);
104 if (!(val & PRNG_STATUS_DATA_AVAIL))
105 break;
106
107 val = readl_relaxed(rng->base + PRNG_DATA_OUT);
108 if (!val)
109 break;
110
111 *retdata++ = val;
112 currsize += WORD_SZ;
113
114 /* make sure we stay on 32bit boundary */
115 if ((maxsize - currsize) < WORD_SZ)
116 break;
117 } while (currsize < maxsize);
118
119 clk_disable_unprepare(rng->clk);
120
121 return currsize;
122}
123
124static int msm_rng_init(struct hwrng *hwrng)
125{
126 return msm_rng_enable(hwrng, 1);
127}
128
129static void msm_rng_cleanup(struct hwrng *hwrng)
130{
131 msm_rng_enable(hwrng, 0);
132}
133
134static int msm_rng_probe(struct platform_device *pdev)
135{
136 struct resource *res;
137 struct msm_rng *rng;
138 int ret;
139
140 rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
141 if (!rng)
142 return -ENOMEM;
143
144 platform_set_drvdata(pdev, rng);
145
146 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
147 rng->base = devm_ioremap_resource(&pdev->dev, res);
148 if (IS_ERR(rng->base))
149 return PTR_ERR(rng->base);
150
151 rng->clk = devm_clk_get(&pdev->dev, "core");
152 if (IS_ERR(rng->clk))
153 return PTR_ERR(rng->clk);
154
155 rng->hwrng.name = KBUILD_MODNAME,
156 rng->hwrng.init = msm_rng_init,
157 rng->hwrng.cleanup = msm_rng_cleanup,
158 rng->hwrng.read = msm_rng_read,
159
160 ret = hwrng_register(&rng->hwrng);
161 if (ret) {
162 dev_err(&pdev->dev, "failed to register hwrng\n");
163 return ret;
164 }
165
166 return 0;
167}
168
169static int msm_rng_remove(struct platform_device *pdev)
170{
171 struct msm_rng *rng = platform_get_drvdata(pdev);
172
173 hwrng_unregister(&rng->hwrng);
174 return 0;
175}
176
177static const struct of_device_id msm_rng_of_match[] = {
178 { .compatible = "qcom,prng", },
179 {}
180};
181MODULE_DEVICE_TABLE(of, msm_rng_of_match);
182
183static struct platform_driver msm_rng_driver = {
184 .probe = msm_rng_probe,
185 .remove = msm_rng_remove,
186 .driver = {
187 .name = KBUILD_MODNAME,
188 .owner = THIS_MODULE,
189 .of_match_table = of_match_ptr(msm_rng_of_match),
190 }
191};
192module_platform_driver(msm_rng_driver);
193
194MODULE_ALIAS("platform:" KBUILD_MODNAME);
195MODULE_AUTHOR("The Linux Foundation");
196MODULE_DESCRIPTION("Qualcomm MSM random number generator driver");
197MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
new file mode 100644
index 000000000000..c853e9e68573
--- /dev/null
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -0,0 +1,141 @@
1/*
2 * omap3-rom-rng.c - RNG driver for TI OMAP3 CPU family
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Juha Yrjola <juha.yrjola@solidboot.com>
6 *
7 * Copyright (C) 2013 Pali Rohár <pali.rohar@gmail.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/random.h>
19#include <linux/hw_random.h>
20#include <linux/timer.h>
21#include <linux/clk.h>
22#include <linux/err.h>
23#include <linux/platform_device.h>
24
25#define RNG_RESET 0x01
26#define RNG_GEN_PRNG_HW_INIT 0x02
27#define RNG_GEN_HW 0x08
28
29/* param1: ptr, param2: count, param3: flag */
30static u32 (*omap3_rom_rng_call)(u32, u32, u32);
31
32static struct timer_list idle_timer;
33static int rng_idle;
34static struct clk *rng_clk;
35
36static void omap3_rom_rng_idle(unsigned long data)
37{
38 int r;
39
40 r = omap3_rom_rng_call(0, 0, RNG_RESET);
41 if (r != 0) {
42 pr_err("reset failed: %d\n", r);
43 return;
44 }
45 clk_disable_unprepare(rng_clk);
46 rng_idle = 1;
47}
48
49static int omap3_rom_rng_get_random(void *buf, unsigned int count)
50{
51 u32 r;
52 u32 ptr;
53
54 del_timer_sync(&idle_timer);
55 if (rng_idle) {
56 clk_prepare_enable(rng_clk);
57 r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
58 if (r != 0) {
59 clk_disable_unprepare(rng_clk);
60 pr_err("HW init failed: %d\n", r);
61 return -EIO;
62 }
63 rng_idle = 0;
64 }
65
66 ptr = virt_to_phys(buf);
67 r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW);
68 mod_timer(&idle_timer, jiffies + msecs_to_jiffies(500));
69 if (r != 0)
70 return -EINVAL;
71 return 0;
72}
73
74static int omap3_rom_rng_data_present(struct hwrng *rng, int wait)
75{
76 return 1;
77}
78
79static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data)
80{
81 int r;
82
83 r = omap3_rom_rng_get_random(data, 4);
84 if (r < 0)
85 return r;
86 return 4;
87}
88
89static struct hwrng omap3_rom_rng_ops = {
90 .name = "omap3-rom",
91 .data_present = omap3_rom_rng_data_present,
92 .data_read = omap3_rom_rng_data_read,
93};
94
95static int omap3_rom_rng_probe(struct platform_device *pdev)
96{
97 pr_info("initializing\n");
98
99 omap3_rom_rng_call = pdev->dev.platform_data;
100 if (!omap3_rom_rng_call) {
101 pr_err("omap3_rom_rng_call is NULL\n");
102 return -EINVAL;
103 }
104
105 setup_timer(&idle_timer, omap3_rom_rng_idle, 0);
106 rng_clk = clk_get(&pdev->dev, "ick");
107 if (IS_ERR(rng_clk)) {
108 pr_err("unable to get RNG clock\n");
109 return PTR_ERR(rng_clk);
110 }
111
112 /* Leave the RNG in reset state. */
113 clk_prepare_enable(rng_clk);
114 omap3_rom_rng_idle(0);
115
116 return hwrng_register(&omap3_rom_rng_ops);
117}
118
119static int omap3_rom_rng_remove(struct platform_device *pdev)
120{
121 hwrng_unregister(&omap3_rom_rng_ops);
122 clk_disable_unprepare(rng_clk);
123 clk_put(rng_clk);
124 return 0;
125}
126
127static struct platform_driver omap3_rom_rng_driver = {
128 .driver = {
129 .name = "omap3-rom-rng",
130 .owner = THIS_MODULE,
131 },
132 .probe = omap3_rom_rng_probe,
133 .remove = omap3_rom_rng_remove,
134};
135
136module_platform_driver(omap3_rom_rng_driver);
137
138MODULE_ALIAS("platform:omap3-rom-rng");
139MODULE_AUTHOR("Juha Yrjola");
140MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
141MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index b761459a3436..ab7ffdec0ec3 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -24,7 +24,6 @@
24#include <linux/hw_random.h> 24#include <linux/hw_random.h>
25#include <asm/vio.h> 25#include <asm/vio.h>
26 26
27#define MODULE_NAME "pseries-rng"
28 27
29static int pseries_rng_data_read(struct hwrng *rng, u32 *data) 28static int pseries_rng_data_read(struct hwrng *rng, u32 *data)
30{ 29{
@@ -55,7 +54,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
55}; 54};
56 55
57static struct hwrng pseries_rng = { 56static struct hwrng pseries_rng = {
58 .name = MODULE_NAME, 57 .name = KBUILD_MODNAME,
59 .data_read = pseries_rng_data_read, 58 .data_read = pseries_rng_data_read,
60}; 59};
61 60
@@ -78,7 +77,7 @@ static struct vio_device_id pseries_rng_driver_ids[] = {
78MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids); 77MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids);
79 78
80static struct vio_driver pseries_rng_driver = { 79static struct vio_driver pseries_rng_driver = {
81 .name = MODULE_NAME, 80 .name = KBUILD_MODNAME,
82 .probe = pseries_rng_probe, 81 .probe = pseries_rng_probe,
83 .remove = pseries_rng_remove, 82 .remove = pseries_rng_remove,
84 .get_desired_dma = pseries_rng_get_desired_dma, 83 .get_desired_dma = pseries_rng_get_desired_dma,
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index e737772ad69a..de5a6dcfb3e2 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -221,7 +221,7 @@ static void __exit mod_exit(void)
221module_init(mod_init); 221module_init(mod_init);
222module_exit(mod_exit); 222module_exit(mod_exit);
223 223
224static struct x86_cpu_id via_rng_cpu_id[] = { 224static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = {
225 X86_FEATURE_MATCH(X86_FEATURE_XSTORE), 225 X86_FEATURE_MATCH(X86_FEATURE_XSTORE),
226 {} 226 {}
227}; 227};
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index 40cc0cf2ded6..e6939e13e338 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -664,6 +664,13 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = {
664 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro"), 664 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro"),
665 }, 665 },
666 }, 666 },
667 {
668 .ident = "Dell XPS421",
669 .matches = {
670 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
671 DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"),
672 },
673 },
667 { } 674 { }
668}; 675};
669 676
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 8e562dc65601..e1f3337a0cf9 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -27,15 +27,18 @@ static char *tpm_device_name = "TPM";
27static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context, 27static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context,
28 void **return_value) 28 void **return_value)
29{ 29{
30 acpi_status status; 30 acpi_status status = AE_OK;
31 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 31 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
32 status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); 32
33 if (strstr(buffer.pointer, context) != NULL) { 33 if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) {
34 *return_value = handle; 34 if (strstr(buffer.pointer, context) != NULL) {
35 *return_value = handle;
36 status = AE_CTRL_TERMINATE;
37 }
35 kfree(buffer.pointer); 38 kfree(buffer.pointer);
36 return AE_CTRL_TERMINATE;
37 } 39 }
38 return AE_OK; 40
41 return status;
39} 42}
40 43
41static inline void ppi_assign_params(union acpi_object params[4], 44static inline void ppi_assign_params(union acpi_object params[4],
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 8d3009e44fba..5543b7df8e16 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -87,7 +87,7 @@ static unsigned int _get_table_val(const struct clk_div_table *table,
87 return 0; 87 return 0;
88} 88}
89 89
90static unsigned int _get_val(struct clk_divider *divider, u8 div) 90static unsigned int _get_val(struct clk_divider *divider, unsigned int div)
91{ 91{
92 if (divider->flags & CLK_DIVIDER_ONE_BASED) 92 if (divider->flags & CLK_DIVIDER_ONE_BASED)
93 return div; 93 return div;
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 7be41e676a64..00a3abe103a5 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -60,7 +60,7 @@ static int s2mps11_clk_prepare(struct clk_hw *hw)
60 struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); 60 struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
61 int ret; 61 int ret;
62 62
63 ret = regmap_update_bits(s2mps11->iodev->regmap, 63 ret = regmap_update_bits(s2mps11->iodev->regmap_pmic,
64 S2MPS11_REG_RTC_CTRL, 64 S2MPS11_REG_RTC_CTRL,
65 s2mps11->mask, s2mps11->mask); 65 s2mps11->mask, s2mps11->mask);
66 if (!ret) 66 if (!ret)
@@ -74,7 +74,7 @@ static void s2mps11_clk_unprepare(struct clk_hw *hw)
74 struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); 74 struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
75 int ret; 75 int ret;
76 76
77 ret = regmap_update_bits(s2mps11->iodev->regmap, S2MPS11_REG_RTC_CTRL, 77 ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL,
78 s2mps11->mask, ~s2mps11->mask); 78 s2mps11->mask, ~s2mps11->mask);
79 79
80 if (!ret) 80 if (!ret)
@@ -174,7 +174,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
174 s2mps11_clk->hw.init = &s2mps11_clks_init[i]; 174 s2mps11_clk->hw.init = &s2mps11_clks_init[i];
175 s2mps11_clk->mask = 1 << i; 175 s2mps11_clk->mask = 1 << i;
176 176
177 ret = regmap_read(s2mps11_clk->iodev->regmap, 177 ret = regmap_read(s2mps11_clk->iodev->regmap_pmic,
178 S2MPS11_REG_RTC_CTRL, &val); 178 S2MPS11_REG_RTC_CTRL, &val);
179 if (ret < 0) 179 if (ret < 0)
180 goto err_reg; 180 goto err_reg;
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 39b40aaede2b..68e515d093d8 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -26,17 +26,17 @@ static struct clk_onecell_data clk_data;
26#define ASS_CLK_DIV 0x4 26#define ASS_CLK_DIV 0x4
27#define ASS_CLK_GATE 0x8 27#define ASS_CLK_GATE 0x8
28 28
29/* list of all parent clock list */
30static const char *mout_audss_p[] = { "fin_pll", "fout_epll" };
31static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" };
32
33#ifdef CONFIG_PM_SLEEP
29static unsigned long reg_save[][2] = { 34static unsigned long reg_save[][2] = {
30 {ASS_CLK_SRC, 0}, 35 {ASS_CLK_SRC, 0},
31 {ASS_CLK_DIV, 0}, 36 {ASS_CLK_DIV, 0},
32 {ASS_CLK_GATE, 0}, 37 {ASS_CLK_GATE, 0},
33}; 38};
34 39
35/* list of all parent clock list */
36static const char *mout_audss_p[] = { "fin_pll", "fout_epll" };
37static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" };
38
39#ifdef CONFIG_PM_SLEEP
40static int exynos_audss_clk_suspend(void) 40static int exynos_audss_clk_suspend(void)
41{ 41{
42 int i; 42 int i;
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index ad5ff50c5f28..1a7c1b929c69 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -39,7 +39,7 @@
39#define SRC_TOP1 0xc214 39#define SRC_TOP1 0xc214
40#define SRC_CAM 0xc220 40#define SRC_CAM 0xc220
41#define SRC_TV 0xc224 41#define SRC_TV 0xc224
42#define SRC_MFC 0xcc28 42#define SRC_MFC 0xc228
43#define SRC_G3D 0xc22c 43#define SRC_G3D 0xc22c
44#define E4210_SRC_IMAGE 0xc230 44#define E4210_SRC_IMAGE 0xc230
45#define SRC_LCD0 0xc234 45#define SRC_LCD0 0xc234
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index adf32343c9f9..e52359cf9b6f 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -25,6 +25,7 @@
25#define MPLL_LOCK 0x4000 25#define MPLL_LOCK 0x4000
26#define MPLL_CON0 0x4100 26#define MPLL_CON0 0x4100
27#define SRC_CORE1 0x4204 27#define SRC_CORE1 0x4204
28#define GATE_IP_ACP 0x8800
28#define CPLL_LOCK 0x10020 29#define CPLL_LOCK 0x10020
29#define EPLL_LOCK 0x10030 30#define EPLL_LOCK 0x10030
30#define VPLL_LOCK 0x10040 31#define VPLL_LOCK 0x10040
@@ -75,7 +76,6 @@
75#define SRC_CDREX 0x20200 76#define SRC_CDREX 0x20200
76#define PLL_DIV2_SEL 0x20a24 77#define PLL_DIV2_SEL 0x20a24
77#define GATE_IP_DISP1 0x10928 78#define GATE_IP_DISP1 0x10928
78#define GATE_IP_ACP 0x10000
79 79
80/* list of PLLs to be registered */ 80/* list of PLLs to be registered */
81enum exynos5250_plls { 81enum exynos5250_plls {
@@ -120,7 +120,8 @@ enum exynos5250_clks {
120 spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2, 120 spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2,
121 hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1, 121 hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1,
122 tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct, 122 tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct,
123 wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, 123 wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, mdma0,
124 smmu_mdma0,
124 125
125 /* mux clocks */ 126 /* mux clocks */
126 mout_hdmi = 1024, 127 mout_hdmi = 1024,
@@ -354,8 +355,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
354 GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0), 355 GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0),
355 GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0), 356 GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0),
356 GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0), 357 GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
357 GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0), 358 GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 2, 0, 0),
358 GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0), 359 GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 1, 0, 0),
359 GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0), 360 GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0),
360 GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0), 361 GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0),
361 GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0), 362 GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0),
@@ -406,7 +407,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
406 GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0), 407 GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0),
407 GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0), 408 GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
408 GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0), 409 GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
409 GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0), 410 GATE(sysreg, "sysreg", "aclk66",
411 GATE_IP_PERIS, 1, CLK_IGNORE_UNUSED, 0),
410 GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0), 412 GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0),
411 GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0), 413 GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
412 GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0), 414 GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
@@ -492,6 +494,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
492 GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0), 494 GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0),
493 GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0), 495 GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
494 GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0), 496 GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0),
497 GATE(mdma0, "mdma0", "aclk266", GATE_IP_ACP, 1, 0, 0),
498 GATE(smmu_mdma0, "smmu_mdma0", "aclk266", GATE_IP_ACP, 5, 0, 0),
495}; 499};
496 500
497static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = { 501static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = {
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index bdb953e15d2a..634c4d6dd45a 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -75,6 +75,7 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
75config CLKSRC_EFM32 75config CLKSRC_EFM32
76 bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32 76 bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
77 depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) 77 depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
78 select CLKSRC_MMIO
78 default ARCH_EFM32 79 default ARCH_EFM32
79 help 80 help
80 Support to use the timers of EFM32 SoCs as clock source and clock 81 Support to use the timers of EFM32 SoCs as clock source and clock
@@ -87,6 +88,7 @@ config ARM_ARCH_TIMER
87config ARM_ARCH_TIMER_EVTSTREAM 88config ARM_ARCH_TIMER_EVTSTREAM
88 bool "Support for ARM architected timer event stream generation" 89 bool "Support for ARM architected timer event stream generation"
89 default y if ARM_ARCH_TIMER 90 default y if ARM_ARCH_TIMER
91 depends on ARM_ARCH_TIMER
90 help 92 help
91 This option enables support for event stream generation based on 93 This option enables support for event stream generation based on
92 the ARM architected timer. It is used for waking up CPUs executing 94 the ARM architected timer. It is used for waking up CPUs executing
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index 35639cf4e5a2..b9ddd9e3a2f5 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -35,6 +35,5 @@ void __init clocksource_of_init(void)
35 35
36 init_func = match->data; 36 init_func = match->data;
37 init_func(np); 37 init_func(np);
38 of_node_put(np);
39 } 38 }
40} 39}
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 45ba8aecc729..2a2ea2717f3a 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -108,12 +108,11 @@ static void __init add_clocksource(struct device_node *source_timer)
108 108
109static u64 read_sched_clock(void) 109static u64 read_sched_clock(void)
110{ 110{
111 return __raw_readl(sched_io_base); 111 return ~__raw_readl(sched_io_base);
112} 112}
113 113
114static const struct of_device_id sptimer_ids[] __initconst = { 114static const struct of_device_id sptimer_ids[] __initconst = {
115 { .compatible = "picochip,pc3x2-rtc" }, 115 { .compatible = "picochip,pc3x2-rtc" },
116 { .compatible = "snps,dw-apb-timer-sp" },
117 { /* Sentinel */ }, 116 { /* Sentinel */ },
118}; 117};
119 118
@@ -151,4 +150,6 @@ static void __init dw_apb_timer_init(struct device_node *timer)
151 num_called++; 150 num_called++;
152} 151}
153CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init); 152CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
154CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer-osc", dw_apb_timer_init); 153CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
154CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init);
155CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 4aac9ee0d0c0..3cf12834681e 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -313,8 +313,20 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
313 goto err1; 313 goto err1;
314 } 314 }
315 315
316 return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev), 316 ret = clk_prepare(p->clk);
317 cfg->clockevent_rating); 317 if (ret < 0)
318 goto err2;
319
320 ret = sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
321 cfg->clockevent_rating);
322 if (ret < 0)
323 goto err3;
324
325 return 0;
326 err3:
327 clk_unprepare(p->clk);
328 err2:
329 clk_put(p->clk);
318 err1: 330 err1:
319 iounmap(p->mapbase); 331 iounmap(p->mapbase);
320 err0: 332 err0:
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 78b8dae49628..63557cda0a7d 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -472,12 +472,26 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
472 ret = PTR_ERR(p->clk); 472 ret = PTR_ERR(p->clk);
473 goto err1; 473 goto err1;
474 } 474 }
475
476 ret = clk_prepare(p->clk);
477 if (ret < 0)
478 goto err2;
479
475 p->cs_enabled = false; 480 p->cs_enabled = false;
476 p->enable_count = 0; 481 p->enable_count = 0;
477 482
478 return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), 483 ret = sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
479 cfg->clockevent_rating, 484 cfg->clockevent_rating,
480 cfg->clocksource_rating); 485 cfg->clocksource_rating);
486 if (ret < 0)
487 goto err3;
488
489 return 0;
490
491 err3:
492 clk_unprepare(p->clk);
493 err2:
494 clk_put(p->clk);
481 err1: 495 err1:
482 iounmap(p->mapbase); 496 iounmap(p->mapbase);
483 err0: 497 err0:
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index 2fb4695a28d8..a4f6119aafd8 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -179,6 +179,9 @@ static void __init sun4i_timer_init(struct device_node *node)
179 writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), 179 writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
180 timer_base + TIMER_CTL_REG(0)); 180 timer_base + TIMER_CTL_REG(0));
181 181
182 /* Make sure timer is stopped before playing with interrupts */
183 sun4i_clkevt_time_stop(0);
184
182 ret = setup_irq(irq, &sun4i_timer_irq); 185 ret = setup_irq(irq, &sun4i_timer_irq);
183 if (ret) 186 if (ret)
184 pr_warn("failed to setup irq %d\n", irq); 187 pr_warn("failed to setup irq %d\n", irq);
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index d8e47e502785..4e7f6802e840 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -256,11 +256,6 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
256 ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; 256 ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
257 257
258 /* 258 /*
259 * Set scale and timer for sched_clock.
260 */
261 sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
262
263 /*
264 * Setup free-running clocksource timer (interrupts 259 * Setup free-running clocksource timer (interrupts
265 * disabled). 260 * disabled).
266 */ 261 */
@@ -270,6 +265,11 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
270 timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN | 265 timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN |
271 TIMER0_DIV(TIMER_DIVIDER_SHIFT)); 266 TIMER0_DIV(TIMER_DIVIDER_SHIFT));
272 267
268 /*
269 * Set scale and timer for sched_clock.
270 */
271 sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
272
273 clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, 273 clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
274 "armada_370_xp_clocksource", 274 "armada_370_xp_clocksource",
275 timer_clk, 300, 32, clocksource_mmio_readl_down); 275 timer_clk, 300, 32, clocksource_mmio_readl_down);
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c
index 856ad80418ae..7c03dd84f66a 100644
--- a/drivers/cpufreq/at32ap-cpufreq.c
+++ b/drivers/cpufreq/at32ap-cpufreq.c
@@ -58,7 +58,7 @@ static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
58 return 0; 58 return 0;
59} 59}
60 60
61static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy) 61static int at32_cpufreq_driver_init(struct cpufreq_policy *policy)
62{ 62{
63 unsigned int frequency, rate, min_freq; 63 unsigned int frequency, rate, min_freq;
64 int retval, steps, i; 64 int retval, steps, i;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 02d534da22dd..8d19f7c06010 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -828,14 +828,17 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
828 int ret = 0; 828 int ret = 0;
829 829
830 memcpy(&new_policy, policy, sizeof(*policy)); 830 memcpy(&new_policy, policy, sizeof(*policy));
831
832 /* Use the default policy if its valid. */
833 if (cpufreq_driver->setpolicy)
834 cpufreq_parse_governor(policy->governor->name,
835 &new_policy.policy, NULL);
836
831 /* assure that the starting sequence is run in cpufreq_set_policy */ 837 /* assure that the starting sequence is run in cpufreq_set_policy */
832 policy->governor = NULL; 838 policy->governor = NULL;
833 839
834 /* set default policy */ 840 /* set default policy */
835 ret = cpufreq_set_policy(policy, &new_policy); 841 ret = cpufreq_set_policy(policy, &new_policy);
836 policy->user_policy.policy = policy->policy;
837 policy->user_policy.governor = policy->governor;
838
839 if (ret) { 842 if (ret) {
840 pr_debug("setting policy failed\n"); 843 pr_debug("setting policy failed\n");
841 if (cpufreq_driver->exit) 844 if (cpufreq_driver->exit)
@@ -845,8 +848,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
845 848
846#ifdef CONFIG_HOTPLUG_CPU 849#ifdef CONFIG_HOTPLUG_CPU
847static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, 850static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
848 unsigned int cpu, struct device *dev, 851 unsigned int cpu, struct device *dev)
849 bool frozen)
850{ 852{
851 int ret = 0; 853 int ret = 0;
852 unsigned long flags; 854 unsigned long flags;
@@ -877,11 +879,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
877 } 879 }
878 } 880 }
879 881
880 /* Don't touch sysfs links during light-weight init */ 882 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
881 if (!frozen)
882 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
883
884 return ret;
885} 883}
886#endif 884#endif
887 885
@@ -926,6 +924,27 @@ err_free_policy:
926 return NULL; 924 return NULL;
927} 925}
928 926
927static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
928{
929 struct kobject *kobj;
930 struct completion *cmp;
931
932 down_read(&policy->rwsem);
933 kobj = &policy->kobj;
934 cmp = &policy->kobj_unregister;
935 up_read(&policy->rwsem);
936 kobject_put(kobj);
937
938 /*
939 * We need to make sure that the underlying kobj is
940 * actually not referenced anymore by anybody before we
941 * proceed with unloading.
942 */
943 pr_debug("waiting for dropping of refcount\n");
944 wait_for_completion(cmp);
945 pr_debug("wait complete\n");
946}
947
929static void cpufreq_policy_free(struct cpufreq_policy *policy) 948static void cpufreq_policy_free(struct cpufreq_policy *policy)
930{ 949{
931 free_cpumask_var(policy->related_cpus); 950 free_cpumask_var(policy->related_cpus);
@@ -986,7 +1005,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
986 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) { 1005 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
987 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) { 1006 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
988 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1007 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
989 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen); 1008 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
990 up_read(&cpufreq_rwsem); 1009 up_read(&cpufreq_rwsem);
991 return ret; 1010 return ret;
992 } 1011 }
@@ -994,15 +1013,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
994 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1013 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
995#endif 1014#endif
996 1015
997 if (frozen) 1016 /*
998 /* Restore the saved policy when doing light-weight init */ 1017 * Restore the saved policy when doing light-weight init and fall back
999 policy = cpufreq_policy_restore(cpu); 1018 * to the full init if that fails.
1000 else 1019 */
1020 policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
1021 if (!policy) {
1022 frozen = false;
1001 policy = cpufreq_policy_alloc(); 1023 policy = cpufreq_policy_alloc();
1002 1024 if (!policy)
1003 if (!policy) 1025 goto nomem_out;
1004 goto nomem_out; 1026 }
1005
1006 1027
1007 /* 1028 /*
1008 * In the resume path, since we restore a saved policy, the assignment 1029 * In the resume path, since we restore a saved policy, the assignment
@@ -1047,8 +1068,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1047 */ 1068 */
1048 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 1069 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1049 1070
1050 policy->user_policy.min = policy->min; 1071 if (!frozen) {
1051 policy->user_policy.max = policy->max; 1072 policy->user_policy.min = policy->min;
1073 policy->user_policy.max = policy->max;
1074 }
1052 1075
1053 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1076 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1054 CPUFREQ_START, policy); 1077 CPUFREQ_START, policy);
@@ -1079,6 +1102,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1079 1102
1080 cpufreq_init_policy(policy); 1103 cpufreq_init_policy(policy);
1081 1104
1105 if (!frozen) {
1106 policy->user_policy.policy = policy->policy;
1107 policy->user_policy.governor = policy->governor;
1108 }
1109
1082 kobject_uevent(&policy->kobj, KOBJ_ADD); 1110 kobject_uevent(&policy->kobj, KOBJ_ADD);
1083 up_read(&cpufreq_rwsem); 1111 up_read(&cpufreq_rwsem);
1084 1112
@@ -1096,7 +1124,13 @@ err_get_freq:
1096 if (cpufreq_driver->exit) 1124 if (cpufreq_driver->exit)
1097 cpufreq_driver->exit(policy); 1125 cpufreq_driver->exit(policy);
1098err_set_policy_cpu: 1126err_set_policy_cpu:
1127 if (frozen) {
1128 /* Do not leave stale fallback data behind. */
1129 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1130 cpufreq_policy_put_kobj(policy);
1131 }
1099 cpufreq_policy_free(policy); 1132 cpufreq_policy_free(policy);
1133
1100nomem_out: 1134nomem_out:
1101 up_read(&cpufreq_rwsem); 1135 up_read(&cpufreq_rwsem);
1102 1136
@@ -1118,7 +1152,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1118} 1152}
1119 1153
1120static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, 1154static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1121 unsigned int old_cpu, bool frozen) 1155 unsigned int old_cpu)
1122{ 1156{
1123 struct device *cpu_dev; 1157 struct device *cpu_dev;
1124 int ret; 1158 int ret;
@@ -1126,10 +1160,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1126 /* first sibling now owns the new sysfs dir */ 1160 /* first sibling now owns the new sysfs dir */
1127 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu)); 1161 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1128 1162
1129 /* Don't touch sysfs files during light-weight tear-down */
1130 if (frozen)
1131 return cpu_dev->id;
1132
1133 sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); 1163 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1134 ret = kobject_move(&policy->kobj, &cpu_dev->kobj); 1164 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1135 if (ret) { 1165 if (ret) {
@@ -1196,7 +1226,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1196 if (!frozen) 1226 if (!frozen)
1197 sysfs_remove_link(&dev->kobj, "cpufreq"); 1227 sysfs_remove_link(&dev->kobj, "cpufreq");
1198 } else if (cpus > 1) { 1228 } else if (cpus > 1) {
1199 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); 1229 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
1200 if (new_cpu >= 0) { 1230 if (new_cpu >= 0) {
1201 update_policy_cpu(policy, new_cpu); 1231 update_policy_cpu(policy, new_cpu);
1202 1232
@@ -1218,8 +1248,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1218 int ret; 1248 int ret;
1219 unsigned long flags; 1249 unsigned long flags;
1220 struct cpufreq_policy *policy; 1250 struct cpufreq_policy *policy;
1221 struct kobject *kobj;
1222 struct completion *cmp;
1223 1251
1224 read_lock_irqsave(&cpufreq_driver_lock, flags); 1252 read_lock_irqsave(&cpufreq_driver_lock, flags);
1225 policy = per_cpu(cpufreq_cpu_data, cpu); 1253 policy = per_cpu(cpufreq_cpu_data, cpu);
@@ -1249,22 +1277,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1249 } 1277 }
1250 } 1278 }
1251 1279
1252 if (!frozen) { 1280 if (!frozen)
1253 down_read(&policy->rwsem); 1281 cpufreq_policy_put_kobj(policy);
1254 kobj = &policy->kobj;
1255 cmp = &policy->kobj_unregister;
1256 up_read(&policy->rwsem);
1257 kobject_put(kobj);
1258
1259 /*
1260 * We need to make sure that the underlying kobj is
1261 * actually not referenced anymore by anybody before we
1262 * proceed with unloading.
1263 */
1264 pr_debug("waiting for dropping of refcount\n");
1265 wait_for_completion(cmp);
1266 pr_debug("wait complete\n");
1267 }
1268 1282
1269 /* 1283 /*
1270 * Perform the ->exit() even during light-weight tear-down, 1284 * Perform the ->exit() even during light-weight tear-down,
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index f2c75065ce19..dfd1643b0b2f 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -157,4 +157,3 @@ err_moutcore:
157 pr_debug("%s: failed initialization\n", __func__); 157 pr_debug("%s: failed initialization\n", __func__);
158 return -EINVAL; 158 return -EINVAL;
159} 159}
160EXPORT_SYMBOL(exynos4210_cpufreq_init);
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 8683304ce62c..efad5e657f6f 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -211,4 +211,3 @@ err_moutcore:
211 pr_debug("%s: failed initialization\n", __func__); 211 pr_debug("%s: failed initialization\n", __func__);
212 return -EINVAL; 212 return -EINVAL;
213} 213}
214EXPORT_SYMBOL(exynos4x12_cpufreq_init);
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index 9fae466d7746..8feda86fe42c 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -236,4 +236,3 @@ err_moutcore:
236 pr_err("%s: failed initialization\n", __func__); 236 pr_err("%s: failed initialization\n", __func__);
237 return -EINVAL; 237 return -EINVAL;
238} 238}
239EXPORT_SYMBOL(exynos5250_cpufreq_init);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 5f1cbae36961..d51f17ed691e 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -581,7 +581,8 @@ static void intel_pstate_timer_func(unsigned long __data)
581} 581}
582 582
583#define ICPU(model, policy) \ 583#define ICPU(model, policy) \
584 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy } 584 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
585 (unsigned long)&policy }
585 586
586static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 587static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
587 ICPU(0x2a, core_params), 588 ICPU(0x2a, core_params),
@@ -614,6 +615,11 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
614 cpu = all_cpu_data[cpunum]; 615 cpu = all_cpu_data[cpunum];
615 616
616 intel_pstate_get_cpu_pstates(cpu); 617 intel_pstate_get_cpu_pstates(cpu);
618 if (!cpu->pstate.current_pstate) {
619 all_cpu_data[cpunum] = NULL;
620 kfree(cpu);
621 return -ENODATA;
622 }
617 623
618 cpu->cpu = cpunum; 624 cpu->cpu = cpunum;
619 625
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
index f42df7ec03c5..b7309c37033d 100644
--- a/drivers/cpufreq/tegra-cpufreq.c
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -142,10 +142,8 @@ static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
142 142
143 mutex_lock(&tegra_cpu_lock); 143 mutex_lock(&tegra_cpu_lock);
144 144
145 if (is_suspended) { 145 if (is_suspended)
146 ret = -EBUSY;
147 goto out; 146 goto out;
148 }
149 147
150 freq = freq_table[index].frequency; 148 freq = freq_table[index].frequency;
151 149
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index 36795639df0d..6e51114057d0 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -65,7 +65,7 @@ static struct cpuidle_driver calxeda_idle_driver = {
65 .state_count = 2, 65 .state_count = 2,
66}; 66};
67 67
68static int __init calxeda_cpuidle_probe(struct platform_device *pdev) 68static int calxeda_cpuidle_probe(struct platform_device *pdev)
69{ 69{
70 return cpuidle_register(&calxeda_idle_driver, NULL); 70 return cpuidle_register(&calxeda_idle_driver, NULL);
71} 71}
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 2a991e468f78..a55e68f2cfc8 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -400,7 +400,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device);
400 */ 400 */
401void cpuidle_unregister_device(struct cpuidle_device *dev) 401void cpuidle_unregister_device(struct cpuidle_device *dev)
402{ 402{
403 if (dev->registered == 0) 403 if (!dev || dev->registered == 0)
404 return; 404 return;
405 405
406 cpuidle_pause_and_lock(); 406 cpuidle_pause_and_lock();
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index ca89f6b84b06..e7555ff4cafd 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -4,16 +4,29 @@ config CRYPTO_DEV_FSL_CAAM
4 help 4 help
5 Enables the driver module for Freescale's Cryptographic Accelerator 5 Enables the driver module for Freescale's Cryptographic Accelerator
6 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). 6 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
7 This module adds a job ring operation interface, and configures h/w 7 This module creates job ring devices, and configures h/w
8 to operate as a DPAA component automatically, depending 8 to operate as a DPAA component automatically, depending
9 on h/w feature availability. 9 on h/w feature availability.
10 10
11 To compile this driver as a module, choose M here: the module 11 To compile this driver as a module, choose M here: the module
12 will be called caam. 12 will be called caam.
13 13
14config CRYPTO_DEV_FSL_CAAM_JR
15 tristate "Freescale CAAM Job Ring driver backend"
16 depends on CRYPTO_DEV_FSL_CAAM
17 default y
18 help
19 Enables the driver module for Job Rings which are part of
20 Freescale's Cryptographic Accelerator
21 and Assurance Module (CAAM). This module adds a job ring operation
22 interface.
23
24 To compile this driver as a module, choose M here: the module
25 will be called caam_jr.
26
14config CRYPTO_DEV_FSL_CAAM_RINGSIZE 27config CRYPTO_DEV_FSL_CAAM_RINGSIZE
15 int "Job Ring size" 28 int "Job Ring size"
16 depends on CRYPTO_DEV_FSL_CAAM 29 depends on CRYPTO_DEV_FSL_CAAM_JR
17 range 2 9 30 range 2 9
18 default "9" 31 default "9"
19 help 32 help
@@ -31,7 +44,7 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
31 44
32config CRYPTO_DEV_FSL_CAAM_INTC 45config CRYPTO_DEV_FSL_CAAM_INTC
33 bool "Job Ring interrupt coalescing" 46 bool "Job Ring interrupt coalescing"
34 depends on CRYPTO_DEV_FSL_CAAM 47 depends on CRYPTO_DEV_FSL_CAAM_JR
35 default n 48 default n
36 help 49 help
37 Enable the Job Ring's interrupt coalescing feature. 50 Enable the Job Ring's interrupt coalescing feature.
@@ -62,7 +75,7 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
62 75
63config CRYPTO_DEV_FSL_CAAM_CRYPTO_API 76config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
64 tristate "Register algorithm implementations with the Crypto API" 77 tristate "Register algorithm implementations with the Crypto API"
65 depends on CRYPTO_DEV_FSL_CAAM 78 depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
66 default y 79 default y
67 select CRYPTO_ALGAPI 80 select CRYPTO_ALGAPI
68 select CRYPTO_AUTHENC 81 select CRYPTO_AUTHENC
@@ -76,7 +89,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
76 89
77config CRYPTO_DEV_FSL_CAAM_AHASH_API 90config CRYPTO_DEV_FSL_CAAM_AHASH_API
78 tristate "Register hash algorithm implementations with Crypto API" 91 tristate "Register hash algorithm implementations with Crypto API"
79 depends on CRYPTO_DEV_FSL_CAAM 92 depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
80 default y 93 default y
81 select CRYPTO_HASH 94 select CRYPTO_HASH
82 help 95 help
@@ -88,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
88 101
89config CRYPTO_DEV_FSL_CAAM_RNG_API 102config CRYPTO_DEV_FSL_CAAM_RNG_API
90 tristate "Register caam device for hwrng API" 103 tristate "Register caam device for hwrng API"
91 depends on CRYPTO_DEV_FSL_CAAM 104 depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
92 default y 105 default y
93 select CRYPTO_RNG 106 select CRYPTO_RNG
94 select HW_RANDOM 107 select HW_RANDOM
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index d56bd0ec65d8..550758a333e7 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -6,8 +6,10 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
6endif 6endif
7 7
8obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o 8obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
9obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
9obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o 10obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
10obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o 11obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
11obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o 12obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
12 13
13caam-objs := ctrl.o jr.o error.o key_gen.o 14caam-objs := ctrl.o
15caam_jr-objs := jr.o key_gen.o error.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 7c63b72ecd75..4cf5dec826e1 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -86,6 +86,7 @@
86#else 86#else
87#define debug(format, arg...) 87#define debug(format, arg...)
88#endif 88#endif
89static struct list_head alg_list;
89 90
90/* Set DK bit in class 1 operation if shared */ 91/* Set DK bit in class 1 operation if shared */
91static inline void append_dec_op1(u32 *desc, u32 type) 92static inline void append_dec_op1(u32 *desc, u32 type)
@@ -817,7 +818,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
817 ivsize, 1); 818 ivsize, 1);
818 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", 819 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
819 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), 820 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
820 req->cryptlen, 1); 821 req->cryptlen - ctx->authsize, 1);
821#endif 822#endif
822 823
823 if (err) { 824 if (err) {
@@ -971,12 +972,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
971 (edesc->src_nents ? : 1); 972 (edesc->src_nents ? : 1);
972 in_options = LDST_SGF; 973 in_options = LDST_SGF;
973 } 974 }
974 if (encrypt) 975
975 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + 976 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
976 req->cryptlen - authsize, in_options); 977 in_options);
977 else
978 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
979 req->cryptlen, in_options);
980 978
981 if (likely(req->src == req->dst)) { 979 if (likely(req->src == req->dst)) {
982 if (all_contig) { 980 if (all_contig) {
@@ -997,7 +995,8 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
997 } 995 }
998 } 996 }
999 if (encrypt) 997 if (encrypt)
1000 append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); 998 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
999 out_options);
1001 else 1000 else
1002 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, 1001 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1003 out_options); 1002 out_options);
@@ -1047,8 +1046,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1047 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; 1046 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1048 in_options = LDST_SGF; 1047 in_options = LDST_SGF;
1049 } 1048 }
1050 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + 1049 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1051 req->cryptlen - authsize, in_options); 1050 in_options);
1052 1051
1053 if (contig & GIV_DST_CONTIG) { 1052 if (contig & GIV_DST_CONTIG) {
1054 dst_dma = edesc->iv_dma; 1053 dst_dma = edesc->iv_dma;
@@ -1065,7 +1064,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1065 } 1064 }
1066 } 1065 }
1067 1066
1068 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options); 1067 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
1068 out_options);
1069} 1069}
1070 1070
1071/* 1071/*
@@ -1129,7 +1129,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1129 * allocate and map the aead extended descriptor 1129 * allocate and map the aead extended descriptor
1130 */ 1130 */
1131static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 1131static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1132 int desc_bytes, bool *all_contig_ptr) 1132 int desc_bytes, bool *all_contig_ptr,
1133 bool encrypt)
1133{ 1134{
1134 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1135 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1135 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1136 struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -1144,12 +1145,22 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1144 bool assoc_chained = false, src_chained = false, dst_chained = false; 1145 bool assoc_chained = false, src_chained = false, dst_chained = false;
1145 int ivsize = crypto_aead_ivsize(aead); 1146 int ivsize = crypto_aead_ivsize(aead);
1146 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 1147 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1148 unsigned int authsize = ctx->authsize;
1147 1149
1148 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); 1150 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1149 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1150 1151
1151 if (unlikely(req->dst != req->src)) 1152 if (unlikely(req->dst != req->src)) {
1152 dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); 1153 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1154 dst_nents = sg_count(req->dst,
1155 req->cryptlen +
1156 (encrypt ? authsize : (-authsize)),
1157 &dst_chained);
1158 } else {
1159 src_nents = sg_count(req->src,
1160 req->cryptlen +
1161 (encrypt ? authsize : 0),
1162 &src_chained);
1163 }
1153 1164
1154 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, 1165 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1155 DMA_TO_DEVICE, assoc_chained); 1166 DMA_TO_DEVICE, assoc_chained);
@@ -1233,11 +1244,9 @@ static int aead_encrypt(struct aead_request *req)
1233 u32 *desc; 1244 u32 *desc;
1234 int ret = 0; 1245 int ret = 0;
1235 1246
1236 req->cryptlen += ctx->authsize;
1237
1238 /* allocate extended descriptor */ 1247 /* allocate extended descriptor */
1239 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * 1248 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1240 CAAM_CMD_SZ, &all_contig); 1249 CAAM_CMD_SZ, &all_contig, true);
1241 if (IS_ERR(edesc)) 1250 if (IS_ERR(edesc))
1242 return PTR_ERR(edesc); 1251 return PTR_ERR(edesc);
1243 1252
@@ -1274,7 +1283,7 @@ static int aead_decrypt(struct aead_request *req)
1274 1283
1275 /* allocate extended descriptor */ 1284 /* allocate extended descriptor */
1276 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * 1285 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1277 CAAM_CMD_SZ, &all_contig); 1286 CAAM_CMD_SZ, &all_contig, false);
1278 if (IS_ERR(edesc)) 1287 if (IS_ERR(edesc))
1279 return PTR_ERR(edesc); 1288 return PTR_ERR(edesc);
1280 1289
@@ -1331,7 +1340,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1331 src_nents = sg_count(req->src, req->cryptlen, &src_chained); 1340 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1332 1341
1333 if (unlikely(req->dst != req->src)) 1342 if (unlikely(req->dst != req->src))
1334 dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); 1343 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
1344 &dst_chained);
1335 1345
1336 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, 1346 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1337 DMA_TO_DEVICE, assoc_chained); 1347 DMA_TO_DEVICE, assoc_chained);
@@ -1425,8 +1435,6 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
1425 u32 *desc; 1435 u32 *desc;
1426 int ret = 0; 1436 int ret = 0;
1427 1437
1428 req->cryptlen += ctx->authsize;
1429
1430 /* allocate extended descriptor */ 1438 /* allocate extended descriptor */
1431 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * 1439 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1432 CAAM_CMD_SZ, &contig); 1440 CAAM_CMD_SZ, &contig);
@@ -2057,7 +2065,6 @@ static struct caam_alg_template driver_algs[] = {
2057 2065
2058struct caam_crypto_alg { 2066struct caam_crypto_alg {
2059 struct list_head entry; 2067 struct list_head entry;
2060 struct device *ctrldev;
2061 int class1_alg_type; 2068 int class1_alg_type;
2062 int class2_alg_type; 2069 int class2_alg_type;
2063 int alg_op; 2070 int alg_op;
@@ -2070,14 +2077,12 @@ static int caam_cra_init(struct crypto_tfm *tfm)
2070 struct caam_crypto_alg *caam_alg = 2077 struct caam_crypto_alg *caam_alg =
2071 container_of(alg, struct caam_crypto_alg, crypto_alg); 2078 container_of(alg, struct caam_crypto_alg, crypto_alg);
2072 struct caam_ctx *ctx = crypto_tfm_ctx(tfm); 2079 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2073 struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
2074 int tgt_jr = atomic_inc_return(&priv->tfm_count);
2075 2080
2076 /* 2081 ctx->jrdev = caam_jr_alloc();
2077 * distribute tfms across job rings to ensure in-order 2082 if (IS_ERR(ctx->jrdev)) {
2078 * crypto request processing per tfm 2083 pr_err("Job Ring Device allocation for transform failed\n");
2079 */ 2084 return PTR_ERR(ctx->jrdev);
2080 ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs]; 2085 }
2081 2086
2082 /* copy descriptor header template value */ 2087 /* copy descriptor header template value */
2083 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; 2088 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
@@ -2104,44 +2109,26 @@ static void caam_cra_exit(struct crypto_tfm *tfm)
2104 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, 2109 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2105 desc_bytes(ctx->sh_desc_givenc), 2110 desc_bytes(ctx->sh_desc_givenc),
2106 DMA_TO_DEVICE); 2111 DMA_TO_DEVICE);
2112
2113 caam_jr_free(ctx->jrdev);
2107} 2114}
2108 2115
2109static void __exit caam_algapi_exit(void) 2116static void __exit caam_algapi_exit(void)
2110{ 2117{
2111 2118
2112 struct device_node *dev_node;
2113 struct platform_device *pdev;
2114 struct device *ctrldev;
2115 struct caam_drv_private *priv;
2116 struct caam_crypto_alg *t_alg, *n; 2119 struct caam_crypto_alg *t_alg, *n;
2117 2120
2118 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2121 if (!alg_list.next)
2119 if (!dev_node) {
2120 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2121 if (!dev_node)
2122 return;
2123 }
2124
2125 pdev = of_find_device_by_node(dev_node);
2126 if (!pdev)
2127 return;
2128
2129 ctrldev = &pdev->dev;
2130 of_node_put(dev_node);
2131 priv = dev_get_drvdata(ctrldev);
2132
2133 if (!priv->alg_list.next)
2134 return; 2122 return;
2135 2123
2136 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { 2124 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2137 crypto_unregister_alg(&t_alg->crypto_alg); 2125 crypto_unregister_alg(&t_alg->crypto_alg);
2138 list_del(&t_alg->entry); 2126 list_del(&t_alg->entry);
2139 kfree(t_alg); 2127 kfree(t_alg);
2140 } 2128 }
2141} 2129}
2142 2130
2143static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, 2131static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2144 struct caam_alg_template
2145 *template) 2132 *template)
2146{ 2133{
2147 struct caam_crypto_alg *t_alg; 2134 struct caam_crypto_alg *t_alg;
@@ -2149,7 +2136,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
2149 2136
2150 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); 2137 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2151 if (!t_alg) { 2138 if (!t_alg) {
2152 dev_err(ctrldev, "failed to allocate t_alg\n"); 2139 pr_err("failed to allocate t_alg\n");
2153 return ERR_PTR(-ENOMEM); 2140 return ERR_PTR(-ENOMEM);
2154 } 2141 }
2155 2142
@@ -2181,62 +2168,39 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
2181 t_alg->class1_alg_type = template->class1_alg_type; 2168 t_alg->class1_alg_type = template->class1_alg_type;
2182 t_alg->class2_alg_type = template->class2_alg_type; 2169 t_alg->class2_alg_type = template->class2_alg_type;
2183 t_alg->alg_op = template->alg_op; 2170 t_alg->alg_op = template->alg_op;
2184 t_alg->ctrldev = ctrldev;
2185 2171
2186 return t_alg; 2172 return t_alg;
2187} 2173}
2188 2174
2189static int __init caam_algapi_init(void) 2175static int __init caam_algapi_init(void)
2190{ 2176{
2191 struct device_node *dev_node;
2192 struct platform_device *pdev;
2193 struct device *ctrldev;
2194 struct caam_drv_private *priv;
2195 int i = 0, err = 0; 2177 int i = 0, err = 0;
2196 2178
2197 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2179 INIT_LIST_HEAD(&alg_list);
2198 if (!dev_node) {
2199 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2200 if (!dev_node)
2201 return -ENODEV;
2202 }
2203
2204 pdev = of_find_device_by_node(dev_node);
2205 if (!pdev)
2206 return -ENODEV;
2207
2208 ctrldev = &pdev->dev;
2209 priv = dev_get_drvdata(ctrldev);
2210 of_node_put(dev_node);
2211
2212 INIT_LIST_HEAD(&priv->alg_list);
2213
2214 atomic_set(&priv->tfm_count, -1);
2215 2180
2216 /* register crypto algorithms the device supports */ 2181 /* register crypto algorithms the device supports */
2217 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2182 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2218 /* TODO: check if h/w supports alg */ 2183 /* TODO: check if h/w supports alg */
2219 struct caam_crypto_alg *t_alg; 2184 struct caam_crypto_alg *t_alg;
2220 2185
2221 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); 2186 t_alg = caam_alg_alloc(&driver_algs[i]);
2222 if (IS_ERR(t_alg)) { 2187 if (IS_ERR(t_alg)) {
2223 err = PTR_ERR(t_alg); 2188 err = PTR_ERR(t_alg);
2224 dev_warn(ctrldev, "%s alg allocation failed\n", 2189 pr_warn("%s alg allocation failed\n",
2225 driver_algs[i].driver_name); 2190 driver_algs[i].driver_name);
2226 continue; 2191 continue;
2227 } 2192 }
2228 2193
2229 err = crypto_register_alg(&t_alg->crypto_alg); 2194 err = crypto_register_alg(&t_alg->crypto_alg);
2230 if (err) { 2195 if (err) {
2231 dev_warn(ctrldev, "%s alg registration failed\n", 2196 pr_warn("%s alg registration failed\n",
2232 t_alg->crypto_alg.cra_driver_name); 2197 t_alg->crypto_alg.cra_driver_name);
2233 kfree(t_alg); 2198 kfree(t_alg);
2234 } else 2199 } else
2235 list_add_tail(&t_alg->entry, &priv->alg_list); 2200 list_add_tail(&t_alg->entry, &alg_list);
2236 } 2201 }
2237 if (!list_empty(&priv->alg_list)) 2202 if (!list_empty(&alg_list))
2238 dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", 2203 pr_info("caam algorithms registered in /proc/crypto\n");
2239 (char *)of_get_property(dev_node, "compatible", NULL));
2240 2204
2241 return err; 2205 return err;
2242} 2206}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index e732bd962e98..0378328f47a7 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -94,6 +94,9 @@
94#define debug(format, arg...) 94#define debug(format, arg...)
95#endif 95#endif
96 96
97
98static struct list_head hash_list;
99
97/* ahash per-session context */ 100/* ahash per-session context */
98struct caam_hash_ctx { 101struct caam_hash_ctx {
99 struct device *jrdev; 102 struct device *jrdev;
@@ -1653,7 +1656,6 @@ static struct caam_hash_template driver_hash[] = {
1653 1656
1654struct caam_hash_alg { 1657struct caam_hash_alg {
1655 struct list_head entry; 1658 struct list_head entry;
1656 struct device *ctrldev;
1657 int alg_type; 1659 int alg_type;
1658 int alg_op; 1660 int alg_op;
1659 struct ahash_alg ahash_alg; 1661 struct ahash_alg ahash_alg;
@@ -1670,7 +1672,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
1670 struct caam_hash_alg *caam_hash = 1672 struct caam_hash_alg *caam_hash =
1671 container_of(alg, struct caam_hash_alg, ahash_alg); 1673 container_of(alg, struct caam_hash_alg, ahash_alg);
1672 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1674 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1673 struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
1674 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ 1675 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1675 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, 1676 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1676 HASH_MSG_LEN + SHA1_DIGEST_SIZE, 1677 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
@@ -1678,15 +1679,17 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
1678 HASH_MSG_LEN + SHA256_DIGEST_SIZE, 1679 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1679 HASH_MSG_LEN + 64, 1680 HASH_MSG_LEN + 64,
1680 HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 1681 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1681 int tgt_jr = atomic_inc_return(&priv->tfm_count);
1682 int ret = 0; 1682 int ret = 0;
1683 1683
1684 /* 1684 /*
1685 * distribute tfms across job rings to ensure in-order 1685 * Get a Job ring from Job Ring driver to ensure in-order
1686 * crypto request processing per tfm 1686 * crypto request processing per tfm
1687 */ 1687 */
1688 ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs]; 1688 ctx->jrdev = caam_jr_alloc();
1689 1689 if (IS_ERR(ctx->jrdev)) {
1690 pr_err("Job Ring Device allocation for transform failed\n");
1691 return PTR_ERR(ctx->jrdev);
1692 }
1690 /* copy descriptor header template value */ 1693 /* copy descriptor header template value */
1691 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; 1694 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1692 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; 1695 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
@@ -1729,35 +1732,18 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1729 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) 1732 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1730 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, 1733 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1731 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE); 1734 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1735
1736 caam_jr_free(ctx->jrdev);
1732} 1737}
1733 1738
1734static void __exit caam_algapi_hash_exit(void) 1739static void __exit caam_algapi_hash_exit(void)
1735{ 1740{
1736 struct device_node *dev_node;
1737 struct platform_device *pdev;
1738 struct device *ctrldev;
1739 struct caam_drv_private *priv;
1740 struct caam_hash_alg *t_alg, *n; 1741 struct caam_hash_alg *t_alg, *n;
1741 1742
1742 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 1743 if (!hash_list.next)
1743 if (!dev_node) {
1744 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1745 if (!dev_node)
1746 return;
1747 }
1748
1749 pdev = of_find_device_by_node(dev_node);
1750 if (!pdev)
1751 return; 1744 return;
1752 1745
1753 ctrldev = &pdev->dev; 1746 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1754 of_node_put(dev_node);
1755 priv = dev_get_drvdata(ctrldev);
1756
1757 if (!priv->hash_list.next)
1758 return;
1759
1760 list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) {
1761 crypto_unregister_ahash(&t_alg->ahash_alg); 1747 crypto_unregister_ahash(&t_alg->ahash_alg);
1762 list_del(&t_alg->entry); 1748 list_del(&t_alg->entry);
1763 kfree(t_alg); 1749 kfree(t_alg);
@@ -1765,7 +1751,7 @@ static void __exit caam_algapi_hash_exit(void)
1765} 1751}
1766 1752
1767static struct caam_hash_alg * 1753static struct caam_hash_alg *
1768caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, 1754caam_hash_alloc(struct caam_hash_template *template,
1769 bool keyed) 1755 bool keyed)
1770{ 1756{
1771 struct caam_hash_alg *t_alg; 1757 struct caam_hash_alg *t_alg;
@@ -1774,7 +1760,7 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
1774 1760
1775 t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL); 1761 t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1776 if (!t_alg) { 1762 if (!t_alg) {
1777 dev_err(ctrldev, "failed to allocate t_alg\n"); 1763 pr_err("failed to allocate t_alg\n");
1778 return ERR_PTR(-ENOMEM); 1764 return ERR_PTR(-ENOMEM);
1779 } 1765 }
1780 1766
@@ -1805,37 +1791,15 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
1805 1791
1806 t_alg->alg_type = template->alg_type; 1792 t_alg->alg_type = template->alg_type;
1807 t_alg->alg_op = template->alg_op; 1793 t_alg->alg_op = template->alg_op;
1808 t_alg->ctrldev = ctrldev;
1809 1794
1810 return t_alg; 1795 return t_alg;
1811} 1796}
1812 1797
1813static int __init caam_algapi_hash_init(void) 1798static int __init caam_algapi_hash_init(void)
1814{ 1799{
1815 struct device_node *dev_node;
1816 struct platform_device *pdev;
1817 struct device *ctrldev;
1818 struct caam_drv_private *priv;
1819 int i = 0, err = 0; 1800 int i = 0, err = 0;
1820 1801
1821 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 1802 INIT_LIST_HEAD(&hash_list);
1822 if (!dev_node) {
1823 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1824 if (!dev_node)
1825 return -ENODEV;
1826 }
1827
1828 pdev = of_find_device_by_node(dev_node);
1829 if (!pdev)
1830 return -ENODEV;
1831
1832 ctrldev = &pdev->dev;
1833 priv = dev_get_drvdata(ctrldev);
1834 of_node_put(dev_node);
1835
1836 INIT_LIST_HEAD(&priv->hash_list);
1837
1838 atomic_set(&priv->tfm_count, -1);
1839 1803
1840 /* register crypto algorithms the device supports */ 1804 /* register crypto algorithms the device supports */
1841 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { 1805 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
@@ -1843,38 +1807,38 @@ static int __init caam_algapi_hash_init(void)
1843 struct caam_hash_alg *t_alg; 1807 struct caam_hash_alg *t_alg;
1844 1808
1845 /* register hmac version */ 1809 /* register hmac version */
1846 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true); 1810 t_alg = caam_hash_alloc(&driver_hash[i], true);
1847 if (IS_ERR(t_alg)) { 1811 if (IS_ERR(t_alg)) {
1848 err = PTR_ERR(t_alg); 1812 err = PTR_ERR(t_alg);
1849 dev_warn(ctrldev, "%s alg allocation failed\n", 1813 pr_warn("%s alg allocation failed\n",
1850 driver_hash[i].driver_name); 1814 driver_hash[i].driver_name);
1851 continue; 1815 continue;
1852 } 1816 }
1853 1817
1854 err = crypto_register_ahash(&t_alg->ahash_alg); 1818 err = crypto_register_ahash(&t_alg->ahash_alg);
1855 if (err) { 1819 if (err) {
1856 dev_warn(ctrldev, "%s alg registration failed\n", 1820 pr_warn("%s alg registration failed\n",
1857 t_alg->ahash_alg.halg.base.cra_driver_name); 1821 t_alg->ahash_alg.halg.base.cra_driver_name);
1858 kfree(t_alg); 1822 kfree(t_alg);
1859 } else 1823 } else
1860 list_add_tail(&t_alg->entry, &priv->hash_list); 1824 list_add_tail(&t_alg->entry, &hash_list);
1861 1825
1862 /* register unkeyed version */ 1826 /* register unkeyed version */
1863 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false); 1827 t_alg = caam_hash_alloc(&driver_hash[i], false);
1864 if (IS_ERR(t_alg)) { 1828 if (IS_ERR(t_alg)) {
1865 err = PTR_ERR(t_alg); 1829 err = PTR_ERR(t_alg);
1866 dev_warn(ctrldev, "%s alg allocation failed\n", 1830 pr_warn("%s alg allocation failed\n",
1867 driver_hash[i].driver_name); 1831 driver_hash[i].driver_name);
1868 continue; 1832 continue;
1869 } 1833 }
1870 1834
1871 err = crypto_register_ahash(&t_alg->ahash_alg); 1835 err = crypto_register_ahash(&t_alg->ahash_alg);
1872 if (err) { 1836 if (err) {
1873 dev_warn(ctrldev, "%s alg registration failed\n", 1837 pr_warn("%s alg registration failed\n",
1874 t_alg->ahash_alg.halg.base.cra_driver_name); 1838 t_alg->ahash_alg.halg.base.cra_driver_name);
1875 kfree(t_alg); 1839 kfree(t_alg);
1876 } else 1840 } else
1877 list_add_tail(&t_alg->entry, &priv->hash_list); 1841 list_add_tail(&t_alg->entry, &hash_list);
1878 } 1842 }
1879 1843
1880 return err; 1844 return err;
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index d1939a9539c0..28486b19fc36 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -273,34 +273,23 @@ static struct hwrng caam_rng = {
273 273
274static void __exit caam_rng_exit(void) 274static void __exit caam_rng_exit(void)
275{ 275{
276 caam_jr_free(rng_ctx.jrdev);
276 hwrng_unregister(&caam_rng); 277 hwrng_unregister(&caam_rng);
277} 278}
278 279
279static int __init caam_rng_init(void) 280static int __init caam_rng_init(void)
280{ 281{
281 struct device_node *dev_node; 282 struct device *dev;
282 struct platform_device *pdev;
283 struct device *ctrldev;
284 struct caam_drv_private *priv;
285
286 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
287 if (!dev_node) {
288 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
289 if (!dev_node)
290 return -ENODEV;
291 }
292
293 pdev = of_find_device_by_node(dev_node);
294 if (!pdev)
295 return -ENODEV;
296 283
297 ctrldev = &pdev->dev; 284 dev = caam_jr_alloc();
298 priv = dev_get_drvdata(ctrldev); 285 if (IS_ERR(dev)) {
299 of_node_put(dev_node); 286 pr_err("Job Ring Device allocation for transform failed\n");
287 return PTR_ERR(dev);
288 }
300 289
301 caam_init_rng(&rng_ctx, priv->jrdev[0]); 290 caam_init_rng(&rng_ctx, dev);
302 291
303 dev_info(priv->jrdev[0], "registering rng-caam\n"); 292 dev_info(dev, "registering rng-caam\n");
304 return hwrng_register(&caam_rng); 293 return hwrng_register(&caam_rng);
305} 294}
306 295
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index bc6d820812b6..63fb1af2c431 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -16,82 +16,75 @@
16#include "error.h" 16#include "error.h"
17#include "ctrl.h" 17#include "ctrl.h"
18 18
19static int caam_remove(struct platform_device *pdev)
20{
21 struct device *ctrldev;
22 struct caam_drv_private *ctrlpriv;
23 struct caam_drv_private_jr *jrpriv;
24 struct caam_full __iomem *topregs;
25 int ring, ret = 0;
26
27 ctrldev = &pdev->dev;
28 ctrlpriv = dev_get_drvdata(ctrldev);
29 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
30
31 /* shut down JobRs */
32 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
33 ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
34 jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
35 irq_dispose_mapping(jrpriv->irq);
36 }
37
38 /* Shut down debug views */
39#ifdef CONFIG_DEBUG_FS
40 debugfs_remove_recursive(ctrlpriv->dfs_root);
41#endif
42
43 /* Unmap controller region */
44 iounmap(&topregs->ctrl);
45
46 kfree(ctrlpriv->jrdev);
47 kfree(ctrlpriv);
48
49 return ret;
50}
51
52/* 19/*
53 * Descriptor to instantiate RNG State Handle 0 in normal mode and 20 * Descriptor to instantiate RNG State Handle 0 in normal mode and
54 * load the JDKEK, TDKEK and TDSK registers 21 * load the JDKEK, TDKEK and TDSK registers
55 */ 22 */
56static void build_instantiation_desc(u32 *desc) 23static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
57{ 24{
58 u32 *jump_cmd; 25 u32 *jump_cmd, op_flags;
59 26
60 init_job_desc(desc, 0); 27 init_job_desc(desc, 0);
61 28
29 op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
30 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
31
62 /* INIT RNG in non-test mode */ 32 /* INIT RNG in non-test mode */
63 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | 33 append_operation(desc, op_flags);
64 OP_ALG_AS_INIT); 34
35 if (!handle && do_sk) {
36 /*
37 * For SH0, Secure Keys must be generated as well
38 */
39
40 /* wait for done */
41 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
42 set_jump_tgt_here(desc, jump_cmd);
43
44 /*
45 * load 1 to clear written reg:
46 * resets the done interrrupt and returns the RNG to idle.
47 */
48 append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
49
50 /* Initialize State Handle */
51 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
52 OP_ALG_AAI_RNG4_SK);
53 }
65 54
66 /* wait for done */ 55 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
67 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1); 56}
68 set_jump_tgt_here(desc, jump_cmd);
69 57
70 /* 58/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
71 * load 1 to clear written reg: 59static void build_deinstantiation_desc(u32 *desc, int handle)
72 * resets the done interrupt and returns the RNG to idle. 60{
73 */ 61 init_job_desc(desc, 0);
74 append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
75 62
76 /* generate secure keys (non-test) */ 63 /* Uninstantiate State Handle 0 */
77 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | 64 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
78 OP_ALG_RNG4_SK); 65 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
66
67 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
79} 68}
80 69
81static int instantiate_rng(struct device *ctrldev) 70/*
71 * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
72 * the software (no JR/QI used).
73 * @ctrldev - pointer to device
74 * @status - descriptor status, after being run
75 *
76 * Return: - 0 if no error occurred
77 * - -ENODEV if the DECO couldn't be acquired
78 * - -EAGAIN if an error occurred while executing the descriptor
79 */
80static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
81 u32 *status)
82{ 82{
83 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); 83 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
84 struct caam_full __iomem *topregs; 84 struct caam_full __iomem *topregs;
85 unsigned int timeout = 100000; 85 unsigned int timeout = 100000;
86 u32 *desc; 86 u32 deco_dbg_reg, flags;
87 int i, ret = 0; 87 int i;
88
89 desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA);
90 if (!desc) {
91 dev_err(ctrldev, "can't allocate RNG init descriptor memory\n");
92 return -ENOMEM;
93 }
94 build_instantiation_desc(desc);
95 88
96 /* Set the bit to request direct access to DECO0 */ 89 /* Set the bit to request direct access to DECO0 */
97 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; 90 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
@@ -103,36 +96,219 @@ static int instantiate_rng(struct device *ctrldev)
103 96
104 if (!timeout) { 97 if (!timeout) {
105 dev_err(ctrldev, "failed to acquire DECO 0\n"); 98 dev_err(ctrldev, "failed to acquire DECO 0\n");
106 ret = -EIO; 99 clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
107 goto out; 100 return -ENODEV;
108 } 101 }
109 102
110 for (i = 0; i < desc_len(desc); i++) 103 for (i = 0; i < desc_len(desc); i++)
111 topregs->deco.descbuf[i] = *(desc + i); 104 wr_reg32(&topregs->deco.descbuf[i], *(desc + i));
105
106 flags = DECO_JQCR_WHL;
107 /*
108 * If the descriptor length is longer than 4 words, then the
109 * FOUR bit in JRCTRL register must be set.
110 */
111 if (desc_len(desc) >= 4)
112 flags |= DECO_JQCR_FOUR;
112 113
113 wr_reg32(&topregs->deco.jr_ctl_hi, DECO_JQCR_WHL | DECO_JQCR_FOUR); 114 /* Instruct the DECO to execute it */
115 wr_reg32(&topregs->deco.jr_ctl_hi, flags);
114 116
115 timeout = 10000000; 117 timeout = 10000000;
116 while ((rd_reg32(&topregs->deco.desc_dbg) & DECO_DBG_VALID) && 118 do {
117 --timeout) 119 deco_dbg_reg = rd_reg32(&topregs->deco.desc_dbg);
120 /*
121 * If an error occured in the descriptor, then
122 * the DECO status field will be set to 0x0D
123 */
124 if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
125 DESC_DBG_DECO_STAT_HOST_ERR)
126 break;
118 cpu_relax(); 127 cpu_relax();
128 } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
119 129
120 if (!timeout) { 130 *status = rd_reg32(&topregs->deco.op_status_hi) &
121 dev_err(ctrldev, "failed to instantiate RNG\n"); 131 DECO_OP_STATUS_HI_ERR_MASK;
122 ret = -EIO;
123 }
124 132
133 /* Mark the DECO as free */
125 clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); 134 clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
126out: 135
136 if (!timeout)
137 return -EAGAIN;
138
139 return 0;
140}
141
142/*
143 * instantiate_rng - builds and executes a descriptor on DECO0,
144 * which initializes the RNG block.
145 * @ctrldev - pointer to device
146 * @state_handle_mask - bitmask containing the instantiation status
147 * for the RNG4 state handles which exist in
148 * the RNG4 block: 1 if it's been instantiated
149 * by an external entry, 0 otherwise.
150 * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
151 * Caution: this can be done only once; if the keys need to be
152 * regenerated, a POR is required
153 *
154 * Return: - 0 if no error occurred
155 * - -ENOMEM if there isn't enough memory to allocate the descriptor
156 * - -ENODEV if DECO0 couldn't be acquired
157 * - -EAGAIN if an error occurred when executing the descriptor
158 * f.i. there was a RNG hardware error due to not "good enough"
159 * entropy being aquired.
160 */
161static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
162 int gen_sk)
163{
164 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
165 struct caam_full __iomem *topregs;
166 struct rng4tst __iomem *r4tst;
167 u32 *desc, status, rdsta_val;
168 int ret = 0, sh_idx;
169
170 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
171 r4tst = &topregs->ctrl.r4tst[0];
172
173 desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
174 if (!desc)
175 return -ENOMEM;
176
177 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
178 /*
179 * If the corresponding bit is set, this state handle
180 * was initialized by somebody else, so it's left alone.
181 */
182 if ((1 << sh_idx) & state_handle_mask)
183 continue;
184
185 /* Create the descriptor for instantiating RNG State Handle */
186 build_instantiation_desc(desc, sh_idx, gen_sk);
187
188 /* Try to run it through DECO0 */
189 ret = run_descriptor_deco0(ctrldev, desc, &status);
190
191 /*
192 * If ret is not 0, or descriptor status is not 0, then
193 * something went wrong. No need to try the next state
194 * handle (if available), bail out here.
195 * Also, if for some reason, the State Handle didn't get
196 * instantiated although the descriptor has finished
197 * without any error (HW optimizations for later
198 * CAAM eras), then try again.
199 */
200 rdsta_val =
201 rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IFMASK;
202 if (status || !(rdsta_val & (1 << sh_idx)))
203 ret = -EAGAIN;
204 if (ret)
205 break;
206
207 dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
208 /* Clear the contents before recreating the descriptor */
209 memset(desc, 0x00, CAAM_CMD_SZ * 7);
210 }
211
127 kfree(desc); 212 kfree(desc);
213
128 return ret; 214 return ret;
129} 215}
130 216
131/* 217/*
132 * By default, the TRNG runs for 200 clocks per sample; 218 * deinstantiate_rng - builds and executes a descriptor on DECO0,
133 * 1600 clocks per sample generates better entropy. 219 * which deinitializes the RNG block.
220 * @ctrldev - pointer to device
221 * @state_handle_mask - bitmask containing the instantiation status
222 * for the RNG4 state handles which exist in
223 * the RNG4 block: 1 if it's been instantiated
224 *
225 * Return: - 0 if no error occurred
226 * - -ENOMEM if there isn't enough memory to allocate the descriptor
227 * - -ENODEV if DECO0 couldn't be acquired
228 * - -EAGAIN if an error occurred when executing the descriptor
134 */ 229 */
135static void kick_trng(struct platform_device *pdev) 230static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
231{
232 u32 *desc, status;
233 int sh_idx, ret = 0;
234
235 desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
236 if (!desc)
237 return -ENOMEM;
238
239 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
240 /*
241 * If the corresponding bit is set, then it means the state
242 * handle was initialized by us, and thus it needs to be
243 * deintialized as well
244 */
245 if ((1 << sh_idx) & state_handle_mask) {
246 /*
247 * Create the descriptor for deinstantating this state
248 * handle
249 */
250 build_deinstantiation_desc(desc, sh_idx);
251
252 /* Try to run it through DECO0 */
253 ret = run_descriptor_deco0(ctrldev, desc, &status);
254
255 if (ret || status) {
256 dev_err(ctrldev,
257 "Failed to deinstantiate RNG4 SH%d\n",
258 sh_idx);
259 break;
260 }
261 dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
262 }
263 }
264
265 kfree(desc);
266
267 return ret;
268}
269
270static int caam_remove(struct platform_device *pdev)
271{
272 struct device *ctrldev;
273 struct caam_drv_private *ctrlpriv;
274 struct caam_full __iomem *topregs;
275 int ring, ret = 0;
276
277 ctrldev = &pdev->dev;
278 ctrlpriv = dev_get_drvdata(ctrldev);
279 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
280
281 /* Remove platform devices for JobRs */
282 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
283 if (ctrlpriv->jrpdev[ring])
284 of_device_unregister(ctrlpriv->jrpdev[ring]);
285 }
286
287 /* De-initialize RNG state handles initialized by this driver. */
288 if (ctrlpriv->rng4_sh_init)
289 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
290
291 /* Shut down debug views */
292#ifdef CONFIG_DEBUG_FS
293 debugfs_remove_recursive(ctrlpriv->dfs_root);
294#endif
295
296 /* Unmap controller region */
297 iounmap(&topregs->ctrl);
298
299 kfree(ctrlpriv->jrpdev);
300 kfree(ctrlpriv);
301
302 return ret;
303}
304
305/*
306 * kick_trng - sets the various parameters for enabling the initialization
307 * of the RNG4 block in CAAM
308 * @pdev - pointer to the platform device
309 * @ent_delay - Defines the length (in system clocks) of each entropy sample.
310 */
311static void kick_trng(struct platform_device *pdev, int ent_delay)
136{ 312{
137 struct device *ctrldev = &pdev->dev; 313 struct device *ctrldev = &pdev->dev;
138 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); 314 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
@@ -145,14 +321,31 @@ static void kick_trng(struct platform_device *pdev)
145 321
146 /* put RNG4 into program mode */ 322 /* put RNG4 into program mode */
147 setbits32(&r4tst->rtmctl, RTMCTL_PRGM); 323 setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
148 /* 1600 clocks per sample */ 324
325 /*
326 * Performance-wise, it does not make sense to
327 * set the delay to a value that is lower
328 * than the last one that worked (i.e. the state handles
329 * were instantiated properly. Thus, instead of wasting
330 * time trying to set the values controlling the sample
331 * frequency, the function simply returns.
332 */
333 val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
334 >> RTSDCTL_ENT_DLY_SHIFT;
335 if (ent_delay <= val) {
336 /* put RNG4 into run mode */
337 clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
338 return;
339 }
340
149 val = rd_reg32(&r4tst->rtsdctl); 341 val = rd_reg32(&r4tst->rtsdctl);
150 val = (val & ~RTSDCTL_ENT_DLY_MASK) | (1600 << RTSDCTL_ENT_DLY_SHIFT); 342 val = (val & ~RTSDCTL_ENT_DLY_MASK) |
343 (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
151 wr_reg32(&r4tst->rtsdctl, val); 344 wr_reg32(&r4tst->rtsdctl, val);
152 /* min. freq. count */ 345 /* min. freq. count, equal to 1/4 of the entropy sample length */
153 wr_reg32(&r4tst->rtfrqmin, 400); 346 wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
154 /* max. freq. count */ 347 /* max. freq. count, equal to 8 times the entropy sample length */
155 wr_reg32(&r4tst->rtfrqmax, 6400); 348 wr_reg32(&r4tst->rtfrqmax, ent_delay << 3);
156 /* put RNG4 into run mode */ 349 /* put RNG4 into run mode */
157 clrbits32(&r4tst->rtmctl, RTMCTL_PRGM); 350 clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
158} 351}
@@ -193,7 +386,7 @@ EXPORT_SYMBOL(caam_get_era);
193/* Probe routine for CAAM top (controller) level */ 386/* Probe routine for CAAM top (controller) level */
194static int caam_probe(struct platform_device *pdev) 387static int caam_probe(struct platform_device *pdev)
195{ 388{
196 int ret, ring, rspec; 389 int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
197 u64 caam_id; 390 u64 caam_id;
198 struct device *dev; 391 struct device *dev;
199 struct device_node *nprop, *np; 392 struct device_node *nprop, *np;
@@ -258,8 +451,9 @@ static int caam_probe(struct platform_device *pdev)
258 rspec++; 451 rspec++;
259 } 452 }
260 453
261 ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL); 454 ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec,
262 if (ctrlpriv->jrdev == NULL) { 455 GFP_KERNEL);
456 if (ctrlpriv->jrpdev == NULL) {
263 iounmap(&topregs->ctrl); 457 iounmap(&topregs->ctrl);
264 return -ENOMEM; 458 return -ENOMEM;
265 } 459 }
@@ -267,13 +461,24 @@ static int caam_probe(struct platform_device *pdev)
267 ring = 0; 461 ring = 0;
268 ctrlpriv->total_jobrs = 0; 462 ctrlpriv->total_jobrs = 0;
269 for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") { 463 for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
270 caam_jr_probe(pdev, np, ring); 464 ctrlpriv->jrpdev[ring] =
465 of_platform_device_create(np, NULL, dev);
466 if (!ctrlpriv->jrpdev[ring]) {
467 pr_warn("JR%d Platform device creation error\n", ring);
468 continue;
469 }
271 ctrlpriv->total_jobrs++; 470 ctrlpriv->total_jobrs++;
272 ring++; 471 ring++;
273 } 472 }
274 if (!ring) { 473 if (!ring) {
275 for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") { 474 for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
276 caam_jr_probe(pdev, np, ring); 475 ctrlpriv->jrpdev[ring] =
476 of_platform_device_create(np, NULL, dev);
477 if (!ctrlpriv->jrpdev[ring]) {
478 pr_warn("JR%d Platform device creation error\n",
479 ring);
480 continue;
481 }
277 ctrlpriv->total_jobrs++; 482 ctrlpriv->total_jobrs++;
278 ring++; 483 ring++;
279 } 484 }
@@ -299,16 +504,55 @@ static int caam_probe(struct platform_device *pdev)
299 504
300 /* 505 /*
301 * If SEC has RNG version >= 4 and RNG state handle has not been 506 * If SEC has RNG version >= 4 and RNG state handle has not been
302 * already instantiated ,do RNG instantiation 507 * already instantiated, do RNG instantiation
303 */ 508 */
304 if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 && 509 if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) {
305 !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) { 510 ctrlpriv->rng4_sh_init =
306 kick_trng(pdev); 511 rd_reg32(&topregs->ctrl.r4tst[0].rdsta);
307 ret = instantiate_rng(dev); 512 /*
513 * If the secure keys (TDKEK, JDKEK, TDSK), were already
514 * generated, signal this to the function that is instantiating
515 * the state handles. An error would occur if RNG4 attempts
516 * to regenerate these keys before the next POR.
517 */
518 gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
519 ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
520 do {
521 int inst_handles =
522 rd_reg32(&topregs->ctrl.r4tst[0].rdsta) &
523 RDSTA_IFMASK;
524 /*
525 * If either SH were instantiated by somebody else
526 * (e.g. u-boot) then it is assumed that the entropy
527 * parameters are properly set and thus the function
528 * setting these (kick_trng(...)) is skipped.
529 * Also, if a handle was instantiated, do not change
530 * the TRNG parameters.
531 */
532 if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
533 kick_trng(pdev, ent_delay);
534 ent_delay += 400;
535 }
536 /*
537 * if instantiate_rng(...) fails, the loop will rerun
538 * and the kick_trng(...) function will modfiy the
539 * upper and lower limits of the entropy sampling
540 * interval, leading to a sucessful initialization of
541 * the RNG.
542 */
543 ret = instantiate_rng(dev, inst_handles,
544 gen_sk);
545 } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
308 if (ret) { 546 if (ret) {
547 dev_err(dev, "failed to instantiate RNG");
309 caam_remove(pdev); 548 caam_remove(pdev);
310 return ret; 549 return ret;
311 } 550 }
551 /*
552 * Set handles init'ed by this module as the complement of the
553 * already initialized ones
554 */
555 ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
312 556
313 /* Enable RDB bit so that RNG works faster */ 557 /* Enable RDB bit so that RNG works faster */
314 setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE); 558 setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE);
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 53b296f78b0d..7e4500f18df6 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -1155,8 +1155,15 @@ struct sec4_sg_entry {
1155 1155
1156/* randomizer AAI set */ 1156/* randomizer AAI set */
1157#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT) 1157#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT)
1158#define OP_ALG_AAI_RNG_NOZERO (0x10 << OP_ALG_AAI_SHIFT) 1158#define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT)
1159#define OP_ALG_AAI_RNG_ODD (0x20 << OP_ALG_AAI_SHIFT) 1159#define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT)
1160
1161/* RNG4 AAI set */
1162#define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_SHIFT)
1163#define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_SHIFT)
1164#define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT)
1165#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
1166#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
1160 1167
1161/* hmac/smac AAI set */ 1168/* hmac/smac AAI set */
1162#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT) 1169#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
@@ -1178,12 +1185,6 @@ struct sec4_sg_entry {
1178#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT) 1185#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT)
1179#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT) 1186#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT)
1180 1187
1181/* RNG4 set */
1182#define OP_ALG_RNG4_SHIFT 4
1183#define OP_ALG_RNG4_MASK (0x1f3 << OP_ALG_RNG4_SHIFT)
1184
1185#define OP_ALG_RNG4_SK (0x100 << OP_ALG_RNG4_SHIFT)
1186
1187#define OP_ALG_AS_SHIFT 2 1188#define OP_ALG_AS_SHIFT 2
1188#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT) 1189#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
1189#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT) 1190#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT)
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 34c4b9f7fbfa..6d85fcc5bd0a 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -37,13 +37,16 @@ struct caam_jrentry_info {
37 37
38/* Private sub-storage for a single JobR */ 38/* Private sub-storage for a single JobR */
39struct caam_drv_private_jr { 39struct caam_drv_private_jr {
40 struct device *parentdev; /* points back to controller dev */ 40 struct list_head list_node; /* Job Ring device list */
41 struct platform_device *jr_pdev;/* points to platform device for JR */ 41 struct device *dev;
42 int ridx; 42 int ridx;
43 struct caam_job_ring __iomem *rregs; /* JobR's register space */ 43 struct caam_job_ring __iomem *rregs; /* JobR's register space */
44 struct tasklet_struct irqtask; 44 struct tasklet_struct irqtask;
45 int irq; /* One per queue */ 45 int irq; /* One per queue */
46 46
47 /* Number of scatterlist crypt transforms active on the JobR */
48 atomic_t tfm_count ____cacheline_aligned;
49
47 /* Job ring info */ 50 /* Job ring info */
48 int ringsize; /* Size of rings (assume input = output) */ 51 int ringsize; /* Size of rings (assume input = output) */
49 struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */ 52 struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */
@@ -63,7 +66,7 @@ struct caam_drv_private_jr {
63struct caam_drv_private { 66struct caam_drv_private {
64 67
65 struct device *dev; 68 struct device *dev;
66 struct device **jrdev; /* Alloc'ed array per sub-device */ 69 struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
67 struct platform_device *pdev; 70 struct platform_device *pdev;
68 71
69 /* Physical-presence section */ 72 /* Physical-presence section */
@@ -80,12 +83,11 @@ struct caam_drv_private {
80 u8 qi_present; /* Nonzero if QI present in device */ 83 u8 qi_present; /* Nonzero if QI present in device */
81 int secvio_irq; /* Security violation interrupt number */ 84 int secvio_irq; /* Security violation interrupt number */
82 85
83 /* which jr allocated to scatterlist crypto */ 86#define RNG4_MAX_HANDLES 2
84 atomic_t tfm_count ____cacheline_aligned; 87 /* RNG4 block */
85 /* list of registered crypto algorithms (mk generic context handle?) */ 88 u32 rng4_sh_init; /* This bitmap shows which of the State
86 struct list_head alg_list; 89 Handles of the RNG4 block are initialized
87 /* list of registered hash algorithms (mk generic context handle?) */ 90 by this driver */
88 struct list_head hash_list;
89 91
90 /* 92 /*
91 * debugfs entries for developer view into driver/device 93 * debugfs entries for developer view into driver/device
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index bdb786d5a5e5..1d80bd3636c5 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include <linux/of_irq.h> 8#include <linux/of_irq.h>
9#include <linux/of_address.h>
9 10
10#include "compat.h" 11#include "compat.h"
11#include "regs.h" 12#include "regs.h"
@@ -13,6 +14,113 @@
13#include "desc.h" 14#include "desc.h"
14#include "intern.h" 15#include "intern.h"
15 16
17struct jr_driver_data {
18 /* List of Physical JobR's with the Driver */
19 struct list_head jr_list;
20 spinlock_t jr_alloc_lock; /* jr_list lock */
21} ____cacheline_aligned;
22
23static struct jr_driver_data driver_data;
24
25static int caam_reset_hw_jr(struct device *dev)
26{
27 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
28 unsigned int timeout = 100000;
29
30 /*
31 * mask interrupts since we are going to poll
32 * for reset completion status
33 */
34 setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
35
36 /* initiate flush (required prior to reset) */
37 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
38 while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
39 JRINT_ERR_HALT_INPROGRESS) && --timeout)
40 cpu_relax();
41
42 if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
43 JRINT_ERR_HALT_COMPLETE || timeout == 0) {
44 dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
45 return -EIO;
46 }
47
48 /* initiate reset */
49 timeout = 100000;
50 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
51 while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
52 cpu_relax();
53
54 if (timeout == 0) {
55 dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
56 return -EIO;
57 }
58
59 /* unmask interrupts */
60 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
61
62 return 0;
63}
64
65/*
66 * Shutdown JobR independent of platform property code
67 */
68int caam_jr_shutdown(struct device *dev)
69{
70 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
71 dma_addr_t inpbusaddr, outbusaddr;
72 int ret;
73
74 ret = caam_reset_hw_jr(dev);
75
76 tasklet_kill(&jrp->irqtask);
77
78 /* Release interrupt */
79 free_irq(jrp->irq, dev);
80
81 /* Free rings */
82 inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
83 outbusaddr = rd_reg64(&jrp->rregs->outring_base);
84 dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
85 jrp->inpring, inpbusaddr);
86 dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
87 jrp->outring, outbusaddr);
88 kfree(jrp->entinfo);
89
90 return ret;
91}
92
93static int caam_jr_remove(struct platform_device *pdev)
94{
95 int ret;
96 struct device *jrdev;
97 struct caam_drv_private_jr *jrpriv;
98
99 jrdev = &pdev->dev;
100 jrpriv = dev_get_drvdata(jrdev);
101
102 /*
103 * Return EBUSY if job ring already allocated.
104 */
105 if (atomic_read(&jrpriv->tfm_count)) {
106 dev_err(jrdev, "Device is busy\n");
107 return -EBUSY;
108 }
109
110 /* Remove the node from Physical JobR list maintained by driver */
111 spin_lock(&driver_data.jr_alloc_lock);
112 list_del(&jrpriv->list_node);
113 spin_unlock(&driver_data.jr_alloc_lock);
114
115 /* Release ring */
116 ret = caam_jr_shutdown(jrdev);
117 if (ret)
118 dev_err(jrdev, "Failed to shut down job ring\n");
119 irq_dispose_mapping(jrpriv->irq);
120
121 return ret;
122}
123
16/* Main per-ring interrupt handler */ 124/* Main per-ring interrupt handler */
17static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) 125static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
18{ 126{
@@ -128,6 +236,59 @@ static void caam_jr_dequeue(unsigned long devarg)
128} 236}
129 237
130/** 238/**
239 * caam_jr_alloc() - Alloc a job ring for someone to use as needed.
240 *
241 * returns : pointer to the newly allocated physical
242 * JobR dev can be written to if successful.
243 **/
244struct device *caam_jr_alloc(void)
245{
246 struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
247 struct device *dev = NULL;
248 int min_tfm_cnt = INT_MAX;
249 int tfm_cnt;
250
251 spin_lock(&driver_data.jr_alloc_lock);
252
253 if (list_empty(&driver_data.jr_list)) {
254 spin_unlock(&driver_data.jr_alloc_lock);
255 return ERR_PTR(-ENODEV);
256 }
257
258 list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
259 tfm_cnt = atomic_read(&jrpriv->tfm_count);
260 if (tfm_cnt < min_tfm_cnt) {
261 min_tfm_cnt = tfm_cnt;
262 min_jrpriv = jrpriv;
263 }
264 if (!min_tfm_cnt)
265 break;
266 }
267
268 if (min_jrpriv) {
269 atomic_inc(&min_jrpriv->tfm_count);
270 dev = min_jrpriv->dev;
271 }
272 spin_unlock(&driver_data.jr_alloc_lock);
273
274 return dev;
275}
276EXPORT_SYMBOL(caam_jr_alloc);
277
278/**
279 * caam_jr_free() - Free the Job Ring
280 * @rdev - points to the dev that identifies the Job ring to
281 * be released.
282 **/
283void caam_jr_free(struct device *rdev)
284{
285 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
286
287 atomic_dec(&jrpriv->tfm_count);
288}
289EXPORT_SYMBOL(caam_jr_free);
290
291/**
131 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, 292 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
132 * -EBUSY if the queue is full, -EIO if it cannot map the caller's 293 * -EBUSY if the queue is full, -EIO if it cannot map the caller's
133 * descriptor. 294 * descriptor.
@@ -207,46 +368,6 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
207} 368}
208EXPORT_SYMBOL(caam_jr_enqueue); 369EXPORT_SYMBOL(caam_jr_enqueue);
209 370
210static int caam_reset_hw_jr(struct device *dev)
211{
212 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
213 unsigned int timeout = 100000;
214
215 /*
216 * mask interrupts since we are going to poll
217 * for reset completion status
218 */
219 setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
220
221 /* initiate flush (required prior to reset) */
222 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
223 while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
224 JRINT_ERR_HALT_INPROGRESS) && --timeout)
225 cpu_relax();
226
227 if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
228 JRINT_ERR_HALT_COMPLETE || timeout == 0) {
229 dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
230 return -EIO;
231 }
232
233 /* initiate reset */
234 timeout = 100000;
235 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
236 while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
237 cpu_relax();
238
239 if (timeout == 0) {
240 dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
241 return -EIO;
242 }
243
244 /* unmask interrupts */
245 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
246
247 return 0;
248}
249
250/* 371/*
251 * Init JobR independent of platform property detection 372 * Init JobR independent of platform property detection
252 */ 373 */
@@ -262,7 +383,7 @@ static int caam_jr_init(struct device *dev)
262 383
263 /* Connect job ring interrupt handler. */ 384 /* Connect job ring interrupt handler. */
264 error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, 385 error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
265 "caam-jobr", dev); 386 dev_name(dev), dev);
266 if (error) { 387 if (error) {
267 dev_err(dev, "can't connect JobR %d interrupt (%d)\n", 388 dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
268 jrp->ridx, jrp->irq); 389 jrp->ridx, jrp->irq);
@@ -318,86 +439,43 @@ static int caam_jr_init(struct device *dev)
318 return 0; 439 return 0;
319} 440}
320 441
321/*
322 * Shutdown JobR independent of platform property code
323 */
324int caam_jr_shutdown(struct device *dev)
325{
326 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
327 dma_addr_t inpbusaddr, outbusaddr;
328 int ret;
329
330 ret = caam_reset_hw_jr(dev);
331
332 tasklet_kill(&jrp->irqtask);
333
334 /* Release interrupt */
335 free_irq(jrp->irq, dev);
336
337 /* Free rings */
338 inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
339 outbusaddr = rd_reg64(&jrp->rregs->outring_base);
340 dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
341 jrp->inpring, inpbusaddr);
342 dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
343 jrp->outring, outbusaddr);
344 kfree(jrp->entinfo);
345 of_device_unregister(jrp->jr_pdev);
346
347 return ret;
348}
349 442
350/* 443/*
351 * Probe routine for each detected JobR subsystem. It assumes that 444 * Probe routine for each detected JobR subsystem.
352 * property detection was picked up externally.
353 */ 445 */
354int caam_jr_probe(struct platform_device *pdev, struct device_node *np, 446static int caam_jr_probe(struct platform_device *pdev)
355 int ring)
356{ 447{
357 struct device *ctrldev, *jrdev; 448 struct device *jrdev;
358 struct platform_device *jr_pdev; 449 struct device_node *nprop;
359 struct caam_drv_private *ctrlpriv; 450 struct caam_job_ring __iomem *ctrl;
360 struct caam_drv_private_jr *jrpriv; 451 struct caam_drv_private_jr *jrpriv;
361 u32 *jroffset; 452 static int total_jobrs;
362 int error; 453 int error;
363 454
364 ctrldev = &pdev->dev; 455 jrdev = &pdev->dev;
365 ctrlpriv = dev_get_drvdata(ctrldev);
366
367 jrpriv = kmalloc(sizeof(struct caam_drv_private_jr), 456 jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
368 GFP_KERNEL); 457 GFP_KERNEL);
369 if (jrpriv == NULL) { 458 if (!jrpriv)
370 dev_err(ctrldev, "can't alloc private mem for job ring %d\n",
371 ring);
372 return -ENOMEM; 459 return -ENOMEM;
373 }
374 jrpriv->parentdev = ctrldev; /* point back to parent */
375 jrpriv->ridx = ring; /* save ring identity relative to detection */
376 460
377 /* 461 dev_set_drvdata(jrdev, jrpriv);
378 * Derive a pointer to the detected JobRs regs
379 * Driver has already iomapped the entire space, we just
380 * need to add in the offset to this JobR. Don't know if I
381 * like this long-term, but it'll run
382 */
383 jroffset = (u32 *)of_get_property(np, "reg", NULL);
384 jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl
385 + *jroffset);
386 462
387 /* Build a local dev for each detected queue */ 463 /* save ring identity relative to detection */
388 jr_pdev = of_platform_device_create(np, NULL, ctrldev); 464 jrpriv->ridx = total_jobrs++;
389 if (jr_pdev == NULL) { 465
390 kfree(jrpriv); 466 nprop = pdev->dev.of_node;
391 return -EINVAL; 467 /* Get configuration properties from device tree */
468 /* First, get register page */
469 ctrl = of_iomap(nprop, 0);
470 if (!ctrl) {
471 dev_err(jrdev, "of_iomap() failed\n");
472 return -ENOMEM;
392 } 473 }
393 474
394 jrpriv->jr_pdev = jr_pdev; 475 jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
395 jrdev = &jr_pdev->dev;
396 dev_set_drvdata(jrdev, jrpriv);
397 ctrlpriv->jrdev[ring] = jrdev;
398 476
399 if (sizeof(dma_addr_t) == sizeof(u64)) 477 if (sizeof(dma_addr_t) == sizeof(u64))
400 if (of_device_is_compatible(np, "fsl,sec-v5.0-job-ring")) 478 if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
401 dma_set_mask(jrdev, DMA_BIT_MASK(40)); 479 dma_set_mask(jrdev, DMA_BIT_MASK(40));
402 else 480 else
403 dma_set_mask(jrdev, DMA_BIT_MASK(36)); 481 dma_set_mask(jrdev, DMA_BIT_MASK(36));
@@ -405,15 +483,61 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
405 dma_set_mask(jrdev, DMA_BIT_MASK(32)); 483 dma_set_mask(jrdev, DMA_BIT_MASK(32));
406 484
407 /* Identify the interrupt */ 485 /* Identify the interrupt */
408 jrpriv->irq = irq_of_parse_and_map(np, 0); 486 jrpriv->irq = irq_of_parse_and_map(nprop, 0);
409 487
410 /* Now do the platform independent part */ 488 /* Now do the platform independent part */
411 error = caam_jr_init(jrdev); /* now turn on hardware */ 489 error = caam_jr_init(jrdev); /* now turn on hardware */
412 if (error) { 490 if (error) {
413 of_device_unregister(jr_pdev);
414 kfree(jrpriv); 491 kfree(jrpriv);
415 return error; 492 return error;
416 } 493 }
417 494
418 return error; 495 jrpriv->dev = jrdev;
496 spin_lock(&driver_data.jr_alloc_lock);
497 list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
498 spin_unlock(&driver_data.jr_alloc_lock);
499
500 atomic_set(&jrpriv->tfm_count, 0);
501
502 return 0;
503}
504
505static struct of_device_id caam_jr_match[] = {
506 {
507 .compatible = "fsl,sec-v4.0-job-ring",
508 },
509 {
510 .compatible = "fsl,sec4.0-job-ring",
511 },
512 {},
513};
514MODULE_DEVICE_TABLE(of, caam_jr_match);
515
516static struct platform_driver caam_jr_driver = {
517 .driver = {
518 .name = "caam_jr",
519 .owner = THIS_MODULE,
520 .of_match_table = caam_jr_match,
521 },
522 .probe = caam_jr_probe,
523 .remove = caam_jr_remove,
524};
525
526static int __init jr_driver_init(void)
527{
528 spin_lock_init(&driver_data.jr_alloc_lock);
529 INIT_LIST_HEAD(&driver_data.jr_list);
530 return platform_driver_register(&caam_jr_driver);
531}
532
533static void __exit jr_driver_exit(void)
534{
535 platform_driver_unregister(&caam_jr_driver);
419} 536}
537
538module_init(jr_driver_init);
539module_exit(jr_driver_exit);
540
541MODULE_LICENSE("GPL");
542MODULE_DESCRIPTION("FSL CAAM JR request backend");
543MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h
index 9d8741a59037..97113a6d6c58 100644
--- a/drivers/crypto/caam/jr.h
+++ b/drivers/crypto/caam/jr.h
@@ -8,12 +8,11 @@
8#define JR_H 8#define JR_H
9 9
10/* Prototypes for backend-level services exposed to APIs */ 10/* Prototypes for backend-level services exposed to APIs */
11struct device *caam_jr_alloc(void);
12void caam_jr_free(struct device *rdev);
11int caam_jr_enqueue(struct device *dev, u32 *desc, 13int caam_jr_enqueue(struct device *dev, u32 *desc,
12 void (*cbk)(struct device *dev, u32 *desc, u32 status, 14 void (*cbk)(struct device *dev, u32 *desc, u32 status,
13 void *areq), 15 void *areq),
14 void *areq); 16 void *areq);
15 17
16extern int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
17 int ring);
18extern int caam_jr_shutdown(struct device *dev);
19#endif /* JR_H */ 18#endif /* JR_H */
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 4455396918de..d50174f45b21 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -245,7 +245,7 @@ struct rngtst {
245 245
246/* RNG4 TRNG test registers */ 246/* RNG4 TRNG test registers */
247struct rng4tst { 247struct rng4tst {
248#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */ 248#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
249 u32 rtmctl; /* misc. control register */ 249 u32 rtmctl; /* misc. control register */
250 u32 rtscmisc; /* statistical check misc. register */ 250 u32 rtscmisc; /* statistical check misc. register */
251 u32 rtpkrrng; /* poker range register */ 251 u32 rtpkrrng; /* poker range register */
@@ -255,6 +255,8 @@ struct rng4tst {
255 }; 255 };
256#define RTSDCTL_ENT_DLY_SHIFT 16 256#define RTSDCTL_ENT_DLY_SHIFT 16
257#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT) 257#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
258#define RTSDCTL_ENT_DLY_MIN 1200
259#define RTSDCTL_ENT_DLY_MAX 12800
258 u32 rtsdctl; /* seed control register */ 260 u32 rtsdctl; /* seed control register */
259 union { 261 union {
260 u32 rtsblim; /* PRGM=1: sparse bit limit register */ 262 u32 rtsblim; /* PRGM=1: sparse bit limit register */
@@ -266,7 +268,11 @@ struct rng4tst {
266 u32 rtfrqcnt; /* PRGM=0: freq. count register */ 268 u32 rtfrqcnt; /* PRGM=0: freq. count register */
267 }; 269 };
268 u32 rsvd1[40]; 270 u32 rsvd1[40];
271#define RDSTA_SKVT 0x80000000
272#define RDSTA_SKVN 0x40000000
269#define RDSTA_IF0 0x00000001 273#define RDSTA_IF0 0x00000001
274#define RDSTA_IF1 0x00000002
275#define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0)
270 u32 rdsta; 276 u32 rdsta;
271 u32 rsvd2[15]; 277 u32 rsvd2[15];
272}; 278};
@@ -692,6 +698,7 @@ struct caam_deco {
692 u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */ 698 u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */
693 u32 jr_ctl_lo; 699 u32 jr_ctl_lo;
694 u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */ 700 u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */
701#define DECO_OP_STATUS_HI_ERR_MASK 0xF00000FF
695 u32 op_status_hi; /* DxOPSTA - DECO Operation Status */ 702 u32 op_status_hi; /* DxOPSTA - DECO Operation Status */
696 u32 op_status_lo; 703 u32 op_status_lo;
697 u32 rsvd24[2]; 704 u32 rsvd24[2];
@@ -706,12 +713,13 @@ struct caam_deco {
706 u32 rsvd29[48]; 713 u32 rsvd29[48];
707 u32 descbuf[64]; /* DxDESB - Descriptor buffer */ 714 u32 descbuf[64]; /* DxDESB - Descriptor buffer */
708 u32 rscvd30[193]; 715 u32 rscvd30[193];
716#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000
717#define DESC_DBG_DECO_STAT_VALID 0x80000000
718#define DESC_DBG_DECO_STAT_MASK 0x00F00000
709 u32 desc_dbg; /* DxDDR - DECO Debug Register */ 719 u32 desc_dbg; /* DxDDR - DECO Debug Register */
710 u32 rsvd31[126]; 720 u32 rsvd31[126];
711}; 721};
712 722
713/* DECO DBG Register Valid Bit*/
714#define DECO_DBG_VALID 0x80000000
715#define DECO_JQCR_WHL 0x20000000 723#define DECO_JQCR_WHL 0x20000000
716#define DECO_JQCR_FOUR 0x10000000 724#define DECO_JQCR_FOUR 0x10000000
717 725
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index e0037c8ee243..b12ff85f4241 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -117,6 +117,21 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
117 return nents; 117 return nents;
118} 118}
119 119
120/* Map SG page in kernel virtual address space and copy */
121static inline void sg_map_copy(u8 *dest, struct scatterlist *sg,
122 int len, int offset)
123{
124 u8 *mapped_addr;
125
126 /*
127 * Page here can be user-space pinned using get_user_pages
128 * Same must be kmapped before use and kunmapped subsequently
129 */
130 mapped_addr = kmap_atomic(sg_page(sg));
131 memcpy(dest, mapped_addr + offset, len);
132 kunmap_atomic(mapped_addr);
133}
134
120/* Copy from len bytes of sg to dest, starting from beginning */ 135/* Copy from len bytes of sg to dest, starting from beginning */
121static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len) 136static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
122{ 137{
@@ -124,15 +139,15 @@ static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
124 int cpy_index = 0, next_cpy_index = current_sg->length; 139 int cpy_index = 0, next_cpy_index = current_sg->length;
125 140
126 while (next_cpy_index < len) { 141 while (next_cpy_index < len) {
127 memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), 142 sg_map_copy(dest + cpy_index, current_sg, current_sg->length,
128 current_sg->length); 143 current_sg->offset);
129 current_sg = scatterwalk_sg_next(current_sg); 144 current_sg = scatterwalk_sg_next(current_sg);
130 cpy_index = next_cpy_index; 145 cpy_index = next_cpy_index;
131 next_cpy_index += current_sg->length; 146 next_cpy_index += current_sg->length;
132 } 147 }
133 if (cpy_index < len) 148 if (cpy_index < len)
134 memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), 149 sg_map_copy(dest + cpy_index, current_sg, len-cpy_index,
135 len - cpy_index); 150 current_sg->offset);
136} 151}
137 152
138/* Copy sg data, from to_skip to end, to dest */ 153/* Copy sg data, from to_skip to end, to dest */
@@ -140,7 +155,7 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
140 int to_skip, unsigned int end) 155 int to_skip, unsigned int end)
141{ 156{
142 struct scatterlist *current_sg = sg; 157 struct scatterlist *current_sg = sg;
143 int sg_index, cpy_index; 158 int sg_index, cpy_index, offset;
144 159
145 sg_index = current_sg->length; 160 sg_index = current_sg->length;
146 while (sg_index <= to_skip) { 161 while (sg_index <= to_skip) {
@@ -148,9 +163,10 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
148 sg_index += current_sg->length; 163 sg_index += current_sg->length;
149 } 164 }
150 cpy_index = sg_index - to_skip; 165 cpy_index = sg_index - to_skip;
151 memcpy(dest, (u8 *) sg_virt(current_sg) + 166 offset = current_sg->offset + current_sg->length - cpy_index;
152 current_sg->length - cpy_index, cpy_index); 167 sg_map_copy(dest, current_sg, cpy_index, offset);
153 current_sg = scatterwalk_sg_next(current_sg); 168 if (end - sg_index) {
154 if (end - sg_index) 169 current_sg = scatterwalk_sg_next(current_sg);
155 sg_copy(dest + cpy_index, current_sg, end - sg_index); 170 sg_copy(dest + cpy_index, current_sg, end - sg_index);
171 }
156} 172}
diff --git a/drivers/crypto/dcp.c b/drivers/crypto/dcp.c
index a8a7dd4b0d25..247ab8048f5b 100644
--- a/drivers/crypto/dcp.c
+++ b/drivers/crypto/dcp.c
@@ -733,12 +733,9 @@ static int dcp_probe(struct platform_device *pdev)
733 platform_set_drvdata(pdev, dev); 733 platform_set_drvdata(pdev, dev);
734 734
735 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 735 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
736 if (!r) { 736 dev->dcp_regs_base = devm_ioremap_resource(&pdev->dev, r);
737 dev_err(&pdev->dev, "failed to get IORESOURCE_MEM\n"); 737 if (IS_ERR(dev->dcp_regs_base))
738 return -ENXIO; 738 return PTR_ERR(dev->dcp_regs_base);
739 }
740 dev->dcp_regs_base = devm_ioremap(&pdev->dev, r->start,
741 resource_size(r));
742 739
743 dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL); 740 dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL);
744 udelay(10); 741 udelay(10);
@@ -762,7 +759,8 @@ static int dcp_probe(struct platform_device *pdev)
762 return -EIO; 759 return -EIO;
763 } 760 }
764 dev->dcp_vmi_irq = r->start; 761 dev->dcp_vmi_irq = r->start;
765 ret = request_irq(dev->dcp_vmi_irq, dcp_vmi_irq, 0, "dcp", dev); 762 ret = devm_request_irq(&pdev->dev, dev->dcp_vmi_irq, dcp_vmi_irq, 0,
763 "dcp", dev);
766 if (ret != 0) { 764 if (ret != 0) {
767 dev_err(&pdev->dev, "can't request_irq (0)\n"); 765 dev_err(&pdev->dev, "can't request_irq (0)\n");
768 return -EIO; 766 return -EIO;
@@ -771,15 +769,14 @@ static int dcp_probe(struct platform_device *pdev)
771 r = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 769 r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
772 if (!r) { 770 if (!r) {
773 dev_err(&pdev->dev, "can't get IRQ resource (1)\n"); 771 dev_err(&pdev->dev, "can't get IRQ resource (1)\n");
774 ret = -EIO; 772 return -EIO;
775 goto err_free_irq0;
776 } 773 }
777 dev->dcp_irq = r->start; 774 dev->dcp_irq = r->start;
778 ret = request_irq(dev->dcp_irq, dcp_irq, 0, "dcp", dev); 775 ret = devm_request_irq(&pdev->dev, dev->dcp_irq, dcp_irq, 0, "dcp",
776 dev);
779 if (ret != 0) { 777 if (ret != 0) {
780 dev_err(&pdev->dev, "can't request_irq (1)\n"); 778 dev_err(&pdev->dev, "can't request_irq (1)\n");
781 ret = -EIO; 779 return -EIO;
782 goto err_free_irq0;
783 } 780 }
784 781
785 dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev, 782 dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev,
@@ -788,8 +785,7 @@ static int dcp_probe(struct platform_device *pdev)
788 GFP_KERNEL); 785 GFP_KERNEL);
789 if (!dev->hw_pkg[0]) { 786 if (!dev->hw_pkg[0]) {
790 dev_err(&pdev->dev, "Could not allocate hw descriptors\n"); 787 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
791 ret = -ENOMEM; 788 return -ENOMEM;
792 goto err_free_irq1;
793 } 789 }
794 790
795 for (i = 1; i < DCP_MAX_PKG; i++) { 791 for (i = 1; i < DCP_MAX_PKG; i++) {
@@ -848,16 +844,14 @@ err_unregister:
848 for (j = 0; j < i; j++) 844 for (j = 0; j < i; j++)
849 crypto_unregister_alg(&algs[j]); 845 crypto_unregister_alg(&algs[j]);
850err_free_key_iv: 846err_free_key_iv:
847 tasklet_kill(&dev->done_task);
848 tasklet_kill(&dev->queue_task);
851 dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, 849 dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
852 dev->payload_base_dma); 850 dev->payload_base_dma);
853err_free_hw_packet: 851err_free_hw_packet:
854 dma_free_coherent(&pdev->dev, DCP_MAX_PKG * 852 dma_free_coherent(&pdev->dev, DCP_MAX_PKG *
855 sizeof(struct dcp_hw_packet), dev->hw_pkg[0], 853 sizeof(struct dcp_hw_packet), dev->hw_pkg[0],
856 dev->hw_phys_pkg); 854 dev->hw_phys_pkg);
857err_free_irq1:
858 free_irq(dev->dcp_irq, dev);
859err_free_irq0:
860 free_irq(dev->dcp_vmi_irq, dev);
861 855
862 return ret; 856 return ret;
863} 857}
@@ -868,23 +862,20 @@ static int dcp_remove(struct platform_device *pdev)
868 int j; 862 int j;
869 dev = platform_get_drvdata(pdev); 863 dev = platform_get_drvdata(pdev);
870 864
871 dma_free_coherent(&pdev->dev, 865 misc_deregister(&dev->dcp_bootstream_misc);
872 DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
873 dev->hw_pkg[0], dev->hw_phys_pkg);
874
875 dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
876 dev->payload_base_dma);
877 866
878 free_irq(dev->dcp_irq, dev); 867 for (j = 0; j < ARRAY_SIZE(algs); j++)
879 free_irq(dev->dcp_vmi_irq, dev); 868 crypto_unregister_alg(&algs[j]);
880 869
881 tasklet_kill(&dev->done_task); 870 tasklet_kill(&dev->done_task);
882 tasklet_kill(&dev->queue_task); 871 tasklet_kill(&dev->queue_task);
883 872
884 for (j = 0; j < ARRAY_SIZE(algs); j++) 873 dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
885 crypto_unregister_alg(&algs[j]); 874 dev->payload_base_dma);
886 875
887 misc_deregister(&dev->dcp_bootstream_misc); 876 dma_free_coherent(&pdev->dev,
877 DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
878 dev->hw_pkg[0], dev->hw_phys_pkg);
888 879
889 return 0; 880 return 0;
890} 881}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 214357e12dc0..f757a0f428bd 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1149,32 +1149,24 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1149 unsigned int keylen) 1149 unsigned int keylen)
1150{ 1150{
1151 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 1151 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1152 struct rtattr *rta = (struct rtattr *)key; 1152 struct crypto_authenc_keys keys;
1153 struct crypto_authenc_key_param *param;
1154 1153
1155 if (!RTA_OK(rta, keylen)) 1154 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1156 goto badkey; 1155 goto badkey;
1157 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1158 goto badkey;
1159 if (RTA_PAYLOAD(rta) < sizeof(*param))
1160 goto badkey;
1161
1162 param = RTA_DATA(rta);
1163 ctx->enckey_len = be32_to_cpu(param->enckeylen);
1164 1156
1165 key += RTA_ALIGN(rta->rta_len); 1157 if (keys.authkeylen > sizeof(ctx->authkey))
1166 keylen -= RTA_ALIGN(rta->rta_len); 1158 goto badkey;
1167 1159
1168 if (keylen < ctx->enckey_len) 1160 if (keys.enckeylen > sizeof(ctx->enckey))
1169 goto badkey; 1161 goto badkey;
1170 1162
1171 ctx->authkey_len = keylen - ctx->enckey_len; 1163 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1172 memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len); 1164 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1173 memcpy(ctx->authkey, key, ctx->authkey_len); 1165 ctx->authkey_len = keys.authkeylen;
1166 ctx->enckey_len = keys.enckeylen;
1174 1167
1175 return aead_setup(tfm, crypto_aead_authsize(tfm)); 1168 return aead_setup(tfm, crypto_aead_authsize(tfm));
1176badkey: 1169badkey:
1177 ctx->enckey_len = 0;
1178 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 1170 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1179 return -EINVAL; 1171 return -EINVAL;
1180} 1172}
@@ -1418,14 +1410,12 @@ static const struct platform_device_info ixp_dev_info __initdata = {
1418static int __init ixp_module_init(void) 1410static int __init ixp_module_init(void)
1419{ 1411{
1420 int num = ARRAY_SIZE(ixp4xx_algos); 1412 int num = ARRAY_SIZE(ixp4xx_algos);
1421 int i, err ; 1413 int i, err;
1422 1414
1423 pdev = platform_device_register_full(&ixp_dev_info); 1415 pdev = platform_device_register_full(&ixp_dev_info);
1424 if (IS_ERR(pdev)) 1416 if (IS_ERR(pdev))
1425 return PTR_ERR(pdev); 1417 return PTR_ERR(pdev);
1426 1418
1427 dev = &pdev->dev;
1428
1429 spin_lock_init(&desc_lock); 1419 spin_lock_init(&desc_lock);
1430 spin_lock_init(&emerg_lock); 1420 spin_lock_init(&emerg_lock);
1431 1421
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 3374a3ebe4c7..8d1e6f8e9e9c 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -907,7 +907,7 @@ static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
907 return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE); 907 return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
908} 908}
909 909
910irqreturn_t crypto_int(int irq, void *priv) 910static irqreturn_t crypto_int(int irq, void *priv)
911{ 911{
912 u32 val; 912 u32 val;
913 913
@@ -928,7 +928,7 @@ irqreturn_t crypto_int(int irq, void *priv)
928 return IRQ_HANDLED; 928 return IRQ_HANDLED;
929} 929}
930 930
931struct crypto_alg mv_aes_alg_ecb = { 931static struct crypto_alg mv_aes_alg_ecb = {
932 .cra_name = "ecb(aes)", 932 .cra_name = "ecb(aes)",
933 .cra_driver_name = "mv-ecb-aes", 933 .cra_driver_name = "mv-ecb-aes",
934 .cra_priority = 300, 934 .cra_priority = 300,
@@ -951,7 +951,7 @@ struct crypto_alg mv_aes_alg_ecb = {
951 }, 951 },
952}; 952};
953 953
954struct crypto_alg mv_aes_alg_cbc = { 954static struct crypto_alg mv_aes_alg_cbc = {
955 .cra_name = "cbc(aes)", 955 .cra_name = "cbc(aes)",
956 .cra_driver_name = "mv-cbc-aes", 956 .cra_driver_name = "mv-cbc-aes",
957 .cra_priority = 300, 957 .cra_priority = 300,
@@ -975,7 +975,7 @@ struct crypto_alg mv_aes_alg_cbc = {
975 }, 975 },
976}; 976};
977 977
978struct ahash_alg mv_sha1_alg = { 978static struct ahash_alg mv_sha1_alg = {
979 .init = mv_hash_init, 979 .init = mv_hash_init,
980 .update = mv_hash_update, 980 .update = mv_hash_update,
981 .final = mv_hash_final, 981 .final = mv_hash_final,
@@ -999,7 +999,7 @@ struct ahash_alg mv_sha1_alg = {
999 } 999 }
1000}; 1000};
1001 1001
1002struct ahash_alg mv_hmac_sha1_alg = { 1002static struct ahash_alg mv_hmac_sha1_alg = {
1003 .init = mv_hash_init, 1003 .init = mv_hash_init,
1004 .update = mv_hash_update, 1004 .update = mv_hash_update,
1005 .final = mv_hash_final, 1005 .final = mv_hash_final,
@@ -1084,7 +1084,7 @@ static int mv_probe(struct platform_device *pdev)
1084 goto err_unmap_sram; 1084 goto err_unmap_sram;
1085 } 1085 }
1086 1086
1087 ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), 1087 ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev),
1088 cp); 1088 cp);
1089 if (ret) 1089 if (ret)
1090 goto err_thread; 1090 goto err_thread;
@@ -1187,7 +1187,7 @@ static struct platform_driver marvell_crypto = {
1187 .driver = { 1187 .driver = {
1188 .owner = THIS_MODULE, 1188 .owner = THIS_MODULE,
1189 .name = "mv_crypto", 1189 .name = "mv_crypto",
1190 .of_match_table = of_match_ptr(mv_cesa_of_match_table), 1190 .of_match_table = mv_cesa_of_match_table,
1191 }, 1191 },
1192}; 1192};
1193MODULE_ALIAS("platform:mv_crypto"); 1193MODULE_ALIAS("platform:mv_crypto");
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index ce791c2f81f7..a9ccbf14096e 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -275,7 +275,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
275 if (dd->flags & FLAGS_CBC) 275 if (dd->flags & FLAGS_CBC)
276 val |= AES_REG_CTRL_CBC; 276 val |= AES_REG_CTRL_CBC;
277 if (dd->flags & FLAGS_CTR) { 277 if (dd->flags & FLAGS_CTR) {
278 val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_32; 278 val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
279 mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK; 279 mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK;
280 } 280 }
281 if (dd->flags & FLAGS_ENCRYPT) 281 if (dd->flags & FLAGS_ENCRYPT)
@@ -554,7 +554,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
554 return err; 554 return err;
555} 555}
556 556
557int omap_aes_check_aligned(struct scatterlist *sg) 557static int omap_aes_check_aligned(struct scatterlist *sg)
558{ 558{
559 while (sg) { 559 while (sg) {
560 if (!IS_ALIGNED(sg->offset, 4)) 560 if (!IS_ALIGNED(sg->offset, 4))
@@ -566,7 +566,7 @@ int omap_aes_check_aligned(struct scatterlist *sg)
566 return 0; 566 return 0;
567} 567}
568 568
569int omap_aes_copy_sgs(struct omap_aes_dev *dd) 569static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
570{ 570{
571 void *buf_in, *buf_out; 571 void *buf_in, *buf_out;
572 int pages; 572 int pages;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index e28104b4aab0..e45aaaf0db30 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2033,3 +2033,4 @@ module_platform_driver(omap_sham_driver);
2033MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support."); 2033MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
2034MODULE_LICENSE("GPL v2"); 2034MODULE_LICENSE("GPL v2");
2035MODULE_AUTHOR("Dmitry Kasatkin"); 2035MODULE_AUTHOR("Dmitry Kasatkin");
2036MODULE_ALIAS("platform:omap-sham");
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 888f7f4a6d3f..a6175ba6d238 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -495,45 +495,29 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
495{ 495{
496 struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); 496 struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
497 struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); 497 struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
498 struct rtattr *rta = (void *)key; 498 struct crypto_authenc_keys keys;
499 struct crypto_authenc_key_param *param;
500 unsigned int authkeylen, enckeylen;
501 int err = -EINVAL; 499 int err = -EINVAL;
502 500
503 if (!RTA_OK(rta, keylen)) 501 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
504 goto badkey; 502 goto badkey;
505 503
506 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 504 if (keys.enckeylen > AES_MAX_KEY_SIZE)
507 goto badkey; 505 goto badkey;
508 506
509 if (RTA_PAYLOAD(rta) < sizeof(*param)) 507 if (keys.authkeylen > sizeof(ctx->hash_ctx))
510 goto badkey;
511
512 param = RTA_DATA(rta);
513 enckeylen = be32_to_cpu(param->enckeylen);
514
515 key += RTA_ALIGN(rta->rta_len);
516 keylen -= RTA_ALIGN(rta->rta_len);
517
518 if (keylen < enckeylen)
519 goto badkey;
520
521 authkeylen = keylen - enckeylen;
522
523 if (enckeylen > AES_MAX_KEY_SIZE)
524 goto badkey; 508 goto badkey;
525 509
526 if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == 510 if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
527 SPA_CTRL_CIPH_ALG_AES) 511 SPA_CTRL_CIPH_ALG_AES)
528 err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen); 512 err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen);
529 else 513 else
530 err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen); 514 err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen);
531 515
532 if (err) 516 if (err)
533 goto badkey; 517 goto badkey;
534 518
535 memcpy(ctx->hash_ctx, key, authkeylen); 519 memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
536 ctx->hash_key_len = authkeylen; 520 ctx->hash_key_len = keys.authkeylen;
537 521
538 return 0; 522 return 0;
539 523
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index d7bb8bac36e9..785a9ded7bdf 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1058,7 +1058,7 @@ static struct platform_driver sahara_driver = {
1058 .driver = { 1058 .driver = {
1059 .name = SAHARA_NAME, 1059 .name = SAHARA_NAME,
1060 .owner = THIS_MODULE, 1060 .owner = THIS_MODULE,
1061 .of_match_table = of_match_ptr(sahara_dt_ids), 1061 .of_match_table = sahara_dt_ids,
1062 }, 1062 },
1063 .id_table = sahara_platform_ids, 1063 .id_table = sahara_platform_ids,
1064}; 1064};
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 6cd0e6038583..b44f4ddc565c 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -673,39 +673,20 @@ static int aead_setkey(struct crypto_aead *authenc,
673 const u8 *key, unsigned int keylen) 673 const u8 *key, unsigned int keylen)
674{ 674{
675 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 675 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
676 struct rtattr *rta = (void *)key; 676 struct crypto_authenc_keys keys;
677 struct crypto_authenc_key_param *param;
678 unsigned int authkeylen;
679 unsigned int enckeylen;
680
681 if (!RTA_OK(rta, keylen))
682 goto badkey;
683 677
684 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 678 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
685 goto badkey; 679 goto badkey;
686 680
687 if (RTA_PAYLOAD(rta) < sizeof(*param)) 681 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
688 goto badkey; 682 goto badkey;
689 683
690 param = RTA_DATA(rta); 684 memcpy(ctx->key, keys.authkey, keys.authkeylen);
691 enckeylen = be32_to_cpu(param->enckeylen); 685 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
692
693 key += RTA_ALIGN(rta->rta_len);
694 keylen -= RTA_ALIGN(rta->rta_len);
695 686
696 if (keylen < enckeylen) 687 ctx->keylen = keys.authkeylen + keys.enckeylen;
697 goto badkey; 688 ctx->enckeylen = keys.enckeylen;
698 689 ctx->authkeylen = keys.authkeylen;
699 authkeylen = keylen - enckeylen;
700
701 if (keylen > TALITOS_MAX_KEY_SIZE)
702 goto badkey;
703
704 memcpy(&ctx->key, key, keylen);
705
706 ctx->keylen = keylen;
707 ctx->enckeylen = enckeylen;
708 ctx->authkeylen = authkeylen;
709 690
710 return 0; 691 return 0;
711 692
@@ -809,7 +790,7 @@ static void ipsec_esp_unmap(struct device *dev,
809 790
810 if (edesc->assoc_chained) 791 if (edesc->assoc_chained)
811 talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE); 792 talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
812 else 793 else if (areq->assoclen)
813 /* assoc_nents counts also for IV in non-contiguous cases */ 794 /* assoc_nents counts also for IV in non-contiguous cases */
814 dma_unmap_sg(dev, areq->assoc, 795 dma_unmap_sg(dev, areq->assoc,
815 edesc->assoc_nents ? edesc->assoc_nents - 1 : 1, 796 edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
@@ -992,7 +973,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
992 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 973 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
993 edesc->dma_len, DMA_BIDIRECTIONAL); 974 edesc->dma_len, DMA_BIDIRECTIONAL);
994 } else { 975 } else {
995 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->assoc)); 976 if (areq->assoclen)
977 to_talitos_ptr(&desc->ptr[1],
978 sg_dma_address(areq->assoc));
979 else
980 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
996 desc->ptr[1].j_extent = 0; 981 desc->ptr[1].j_extent = 0;
997 } 982 }
998 983
@@ -1127,7 +1112,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1127 unsigned int authsize, 1112 unsigned int authsize,
1128 unsigned int ivsize, 1113 unsigned int ivsize,
1129 int icv_stashing, 1114 int icv_stashing,
1130 u32 cryptoflags) 1115 u32 cryptoflags,
1116 bool encrypt)
1131{ 1117{
1132 struct talitos_edesc *edesc; 1118 struct talitos_edesc *edesc;
1133 int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len; 1119 int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
@@ -1141,10 +1127,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1141 return ERR_PTR(-EINVAL); 1127 return ERR_PTR(-EINVAL);
1142 } 1128 }
1143 1129
1144 if (iv) 1130 if (ivsize)
1145 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); 1131 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1146 1132
1147 if (assoc) { 1133 if (assoclen) {
1148 /* 1134 /*
1149 * Currently it is assumed that iv is provided whenever assoc 1135 * Currently it is assumed that iv is provided whenever assoc
1150 * is. 1136 * is.
@@ -1160,19 +1146,17 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1160 assoc_nents = assoc_nents ? assoc_nents + 1 : 2; 1146 assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
1161 } 1147 }
1162 1148
1163 src_nents = sg_count(src, cryptlen + authsize, &src_chained); 1149 if (!dst || dst == src) {
1164 src_nents = (src_nents == 1) ? 0 : src_nents; 1150 src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1165 1151 src_nents = (src_nents == 1) ? 0 : src_nents;
1166 if (!dst) { 1152 dst_nents = dst ? src_nents : 0;
1167 dst_nents = 0; 1153 } else { /* dst && dst != src*/
1168 } else { 1154 src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
1169 if (dst == src) { 1155 &src_chained);
1170 dst_nents = src_nents; 1156 src_nents = (src_nents == 1) ? 0 : src_nents;
1171 } else { 1157 dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
1172 dst_nents = sg_count(dst, cryptlen + authsize, 1158 &dst_chained);
1173 &dst_chained); 1159 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1174 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1175 }
1176 } 1160 }
1177 1161
1178 /* 1162 /*
@@ -1192,9 +1176,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1192 1176
1193 edesc = kmalloc(alloc_len, GFP_DMA | flags); 1177 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1194 if (!edesc) { 1178 if (!edesc) {
1195 talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE); 1179 if (assoc_chained)
1180 talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
1181 else if (assoclen)
1182 dma_unmap_sg(dev, assoc,
1183 assoc_nents ? assoc_nents - 1 : 1,
1184 DMA_TO_DEVICE);
1185
1196 if (iv_dma) 1186 if (iv_dma)
1197 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 1187 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1188
1198 dev_err(dev, "could not allocate edescriptor\n"); 1189 dev_err(dev, "could not allocate edescriptor\n");
1199 return ERR_PTR(-ENOMEM); 1190 return ERR_PTR(-ENOMEM);
1200 } 1191 }
@@ -1216,7 +1207,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1216} 1207}
1217 1208
1218static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, 1209static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1219 int icv_stashing) 1210 int icv_stashing, bool encrypt)
1220{ 1211{
1221 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 1212 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1222 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1213 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
@@ -1225,7 +1216,7 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1225 return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst, 1216 return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
1226 iv, areq->assoclen, areq->cryptlen, 1217 iv, areq->assoclen, areq->cryptlen,
1227 ctx->authsize, ivsize, icv_stashing, 1218 ctx->authsize, ivsize, icv_stashing,
1228 areq->base.flags); 1219 areq->base.flags, encrypt);
1229} 1220}
1230 1221
1231static int aead_encrypt(struct aead_request *req) 1222static int aead_encrypt(struct aead_request *req)
@@ -1235,7 +1226,7 @@ static int aead_encrypt(struct aead_request *req)
1235 struct talitos_edesc *edesc; 1226 struct talitos_edesc *edesc;
1236 1227
1237 /* allocate extended descriptor */ 1228 /* allocate extended descriptor */
1238 edesc = aead_edesc_alloc(req, req->iv, 0); 1229 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1239 if (IS_ERR(edesc)) 1230 if (IS_ERR(edesc))
1240 return PTR_ERR(edesc); 1231 return PTR_ERR(edesc);
1241 1232
@@ -1258,7 +1249,7 @@ static int aead_decrypt(struct aead_request *req)
1258 req->cryptlen -= authsize; 1249 req->cryptlen -= authsize;
1259 1250
1260 /* allocate extended descriptor */ 1251 /* allocate extended descriptor */
1261 edesc = aead_edesc_alloc(req, req->iv, 1); 1252 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1262 if (IS_ERR(edesc)) 1253 if (IS_ERR(edesc))
1263 return PTR_ERR(edesc); 1254 return PTR_ERR(edesc);
1264 1255
@@ -1304,7 +1295,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *req)
1304 struct talitos_edesc *edesc; 1295 struct talitos_edesc *edesc;
1305 1296
1306 /* allocate extended descriptor */ 1297 /* allocate extended descriptor */
1307 edesc = aead_edesc_alloc(areq, req->giv, 0); 1298 edesc = aead_edesc_alloc(areq, req->giv, 0, true);
1308 if (IS_ERR(edesc)) 1299 if (IS_ERR(edesc))
1309 return PTR_ERR(edesc); 1300 return PTR_ERR(edesc);
1310 1301
@@ -1460,7 +1451,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1460} 1451}
1461 1452
1462static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * 1453static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1463 areq) 1454 areq, bool encrypt)
1464{ 1455{
1465 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1456 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1466 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1457 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
@@ -1468,7 +1459,7 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1468 1459
1469 return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst, 1460 return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
1470 areq->info, 0, areq->nbytes, 0, ivsize, 0, 1461 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1471 areq->base.flags); 1462 areq->base.flags, encrypt);
1472} 1463}
1473 1464
1474static int ablkcipher_encrypt(struct ablkcipher_request *areq) 1465static int ablkcipher_encrypt(struct ablkcipher_request *areq)
@@ -1478,7 +1469,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1478 struct talitos_edesc *edesc; 1469 struct talitos_edesc *edesc;
1479 1470
1480 /* allocate extended descriptor */ 1471 /* allocate extended descriptor */
1481 edesc = ablkcipher_edesc_alloc(areq); 1472 edesc = ablkcipher_edesc_alloc(areq, true);
1482 if (IS_ERR(edesc)) 1473 if (IS_ERR(edesc))
1483 return PTR_ERR(edesc); 1474 return PTR_ERR(edesc);
1484 1475
@@ -1495,7 +1486,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1495 struct talitos_edesc *edesc; 1486 struct talitos_edesc *edesc;
1496 1487
1497 /* allocate extended descriptor */ 1488 /* allocate extended descriptor */
1498 edesc = ablkcipher_edesc_alloc(areq); 1489 edesc = ablkcipher_edesc_alloc(areq, false);
1499 if (IS_ERR(edesc)) 1490 if (IS_ERR(edesc))
1500 return PTR_ERR(edesc); 1491 return PTR_ERR(edesc);
1501 1492
@@ -1647,7 +1638,7 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1647 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1638 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1648 1639
1649 return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0, 1640 return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
1650 nbytes, 0, 0, 0, areq->base.flags); 1641 nbytes, 0, 0, 0, areq->base.flags, false);
1651} 1642}
1652 1643
1653static int ahash_init(struct ahash_request *areq) 1644static int ahash_init(struct ahash_request *areq)
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
index fa05e3c329bd..060eecc5dbc3 100644
--- a/drivers/crypto/tegra-aes.c
+++ b/drivers/crypto/tegra-aes.c
@@ -27,6 +27,8 @@
27 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 27 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
28 */ 28 */
29 29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
30#include <linux/module.h> 32#include <linux/module.h>
31#include <linux/init.h> 33#include <linux/init.h>
32#include <linux/errno.h> 34#include <linux/errno.h>
@@ -199,8 +201,6 @@ static void aes_workqueue_handler(struct work_struct *work);
199static DECLARE_WORK(aes_work, aes_workqueue_handler); 201static DECLARE_WORK(aes_work, aes_workqueue_handler);
200static struct workqueue_struct *aes_wq; 202static struct workqueue_struct *aes_wq;
201 203
202extern unsigned long long tegra_chip_uid(void);
203
204static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset) 204static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset)
205{ 205{
206 return readl(dd->io_base + offset); 206 return readl(dd->io_base + offset);
@@ -713,13 +713,12 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
713 struct tegra_aes_dev *dd = aes_dev; 713 struct tegra_aes_dev *dd = aes_dev;
714 struct tegra_aes_ctx *ctx = &rng_ctx; 714 struct tegra_aes_ctx *ctx = &rng_ctx;
715 struct tegra_aes_slot *key_slot; 715 struct tegra_aes_slot *key_slot;
716 struct timespec ts;
717 int ret = 0; 716 int ret = 0;
718 u64 nsec, tmp[2]; 717 u8 tmp[16]; /* 16 bytes = 128 bits of entropy */
719 u8 *dt; 718 u8 *dt;
720 719
721 if (!ctx || !dd) { 720 if (!ctx || !dd) {
722 dev_err(dd->dev, "ctx=0x%x, dd=0x%x\n", 721 pr_err("ctx=0x%x, dd=0x%x\n",
723 (unsigned int)ctx, (unsigned int)dd); 722 (unsigned int)ctx, (unsigned int)dd);
724 return -EINVAL; 723 return -EINVAL;
725 } 724 }
@@ -778,14 +777,8 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
778 if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) { 777 if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
779 dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128; 778 dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128;
780 } else { 779 } else {
781 getnstimeofday(&ts); 780 get_random_bytes(tmp, sizeof(tmp));
782 nsec = timespec_to_ns(&ts); 781 dt = tmp;
783 do_div(nsec, 1000);
784 nsec ^= dd->ctr << 56;
785 dd->ctr++;
786 tmp[0] = nsec;
787 tmp[1] = tegra_chip_uid();
788 dt = (u8 *)tmp;
789 } 782 }
790 memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ); 783 memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ);
791 784
@@ -804,7 +797,7 @@ static int tegra_aes_cra_init(struct crypto_tfm *tfm)
804 return 0; 797 return 0;
805} 798}
806 799
807void tegra_aes_cra_exit(struct crypto_tfm *tfm) 800static void tegra_aes_cra_exit(struct crypto_tfm *tfm)
808{ 801{
809 struct tegra_aes_ctx *ctx = 802 struct tegra_aes_ctx *ctx =
810 crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm); 803 crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm);
@@ -924,7 +917,7 @@ static int tegra_aes_probe(struct platform_device *pdev)
924 } 917 }
925 918
926 /* Initialize the vde clock */ 919 /* Initialize the vde clock */
927 dd->aes_clk = clk_get(dev, "vde"); 920 dd->aes_clk = devm_clk_get(dev, "vde");
928 if (IS_ERR(dd->aes_clk)) { 921 if (IS_ERR(dd->aes_clk)) {
929 dev_err(dev, "iclock intialization failed.\n"); 922 dev_err(dev, "iclock intialization failed.\n");
930 err = -ENODEV; 923 err = -ENODEV;
@@ -1033,8 +1026,6 @@ out:
1033 if (dd->buf_out) 1026 if (dd->buf_out)
1034 dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, 1027 dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
1035 dd->buf_out, dd->dma_buf_out); 1028 dd->buf_out, dd->dma_buf_out);
1036 if (!IS_ERR(dd->aes_clk))
1037 clk_put(dd->aes_clk);
1038 if (aes_wq) 1029 if (aes_wq)
1039 destroy_workqueue(aes_wq); 1030 destroy_workqueue(aes_wq);
1040 spin_lock(&list_lock); 1031 spin_lock(&list_lock);
@@ -1068,7 +1059,6 @@ static int tegra_aes_remove(struct platform_device *pdev)
1068 dd->buf_in, dd->dma_buf_in); 1059 dd->buf_in, dd->dma_buf_in);
1069 dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, 1060 dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
1070 dd->buf_out, dd->dma_buf_out); 1061 dd->buf_out, dd->dma_buf_out);
1071 clk_put(dd->aes_clk);
1072 aes_dev = NULL; 1062 aes_dev = NULL;
1073 1063
1074 return 0; 1064 return 0;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 446687cc2334..c823daaf9043 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -62,6 +62,7 @@ config INTEL_IOATDMA
62 tristate "Intel I/OAT DMA support" 62 tristate "Intel I/OAT DMA support"
63 depends on PCI && X86 63 depends on PCI && X86
64 select DMA_ENGINE 64 select DMA_ENGINE
65 select DMA_ENGINE_RAID
65 select DCA 66 select DCA
66 help 67 help
67 Enable support for the Intel(R) I/OAT DMA engine present 68 Enable support for the Intel(R) I/OAT DMA engine present
@@ -112,6 +113,7 @@ config MV_XOR
112 bool "Marvell XOR engine support" 113 bool "Marvell XOR engine support"
113 depends on PLAT_ORION 114 depends on PLAT_ORION
114 select DMA_ENGINE 115 select DMA_ENGINE
116 select DMA_ENGINE_RAID
115 select ASYNC_TX_ENABLE_CHANNEL_SWITCH 117 select ASYNC_TX_ENABLE_CHANNEL_SWITCH
116 ---help--- 118 ---help---
117 Enable support for the Marvell XOR engine. 119 Enable support for the Marvell XOR engine.
@@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA
187 tristate "AMCC PPC440SPe ADMA support" 189 tristate "AMCC PPC440SPe ADMA support"
188 depends on 440SPe || 440SP 190 depends on 440SPe || 440SP
189 select DMA_ENGINE 191 select DMA_ENGINE
192 select DMA_ENGINE_RAID
190 select ARCH_HAS_ASYNC_TX_FIND_CHANNEL 193 select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
191 select ASYNC_TX_ENABLE_CHANNEL_SWITCH 194 select ASYNC_TX_ENABLE_CHANNEL_SWITCH
192 help 195 help
@@ -352,6 +355,7 @@ config NET_DMA
352 bool "Network: TCP receive copy offload" 355 bool "Network: TCP receive copy offload"
353 depends on DMA_ENGINE && NET 356 depends on DMA_ENGINE && NET
354 default (INTEL_IOATDMA || FSL_DMA) 357 default (INTEL_IOATDMA || FSL_DMA)
358 depends on BROKEN
355 help 359 help
356 This enables the use of DMA engines in the network stack to 360 This enables the use of DMA engines in the network stack to
357 offload receive copy-to-user operations, freeing CPU cycles. 361 offload receive copy-to-user operations, freeing CPU cycles.
@@ -377,4 +381,7 @@ config DMATEST
377 Simple DMA test client. Say N unless you're debugging a 381 Simple DMA test client. Say N unless you're debugging a
378 DMA Device driver. 382 DMA Device driver.
379 383
384config DMA_ENGINE_RAID
385 bool
386
380endif 387endif
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 16a2aa28f856..ec4ee5c1fe9d 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1169,7 +1169,7 @@ static void pl08x_desc_free(struct virt_dma_desc *vd)
1169 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1169 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
1170 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); 1170 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
1171 1171
1172 dma_descriptor_unmap(txd); 1172 dma_descriptor_unmap(&vd->tx);
1173 if (!txd->done) 1173 if (!txd->done)
1174 pl08x_release_mux(plchan); 1174 pl08x_release_mux(plchan);
1175 1175
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index f31d647acdfa..2787aba60c6b 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan)
347{ 347{
348 return &chan->dev->device; 348 return &chan->dev->device;
349} 349}
350static struct device *chan2parent(struct dma_chan *chan)
351{
352 return chan->dev->device.parent;
353}
354 350
355#if defined(VERBOSE_DEBUG) 351#if defined(VERBOSE_DEBUG)
356static void vdbg_dump_regs(struct at_dma_chan *atchan) 352static void vdbg_dump_regs(struct at_dma_chan *atchan)
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index ea806bdc12ef..ef63b9058f3c 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -912,7 +912,7 @@ struct dmaengine_unmap_pool {
912#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } 912#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
913static struct dmaengine_unmap_pool unmap_pool[] = { 913static struct dmaengine_unmap_pool unmap_pool[] = {
914 __UNMAP_POOL(2), 914 __UNMAP_POOL(2),
915 #if IS_ENABLED(CONFIG_ASYNC_TX_DMA) 915 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
916 __UNMAP_POOL(16), 916 __UNMAP_POOL(16),
917 __UNMAP_POOL(128), 917 __UNMAP_POOL(128),
918 __UNMAP_POOL(256), 918 __UNMAP_POOL(256),
@@ -1054,7 +1054,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1054 dma_cookie_t cookie; 1054 dma_cookie_t cookie;
1055 unsigned long flags; 1055 unsigned long flags;
1056 1056
1057 unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); 1057 unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
1058 if (!unmap) 1058 if (!unmap)
1059 return -ENOMEM; 1059 return -ENOMEM;
1060 1060
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 20f9a3aaf926..9dfcaf5c1288 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -539,9 +539,9 @@ static int dmatest_func(void *data)
539 539
540 um->len = params->buf_size; 540 um->len = params->buf_size;
541 for (i = 0; i < src_cnt; i++) { 541 for (i = 0; i < src_cnt; i++) {
542 unsigned long buf = (unsigned long) thread->srcs[i]; 542 void *buf = thread->srcs[i];
543 struct page *pg = virt_to_page(buf); 543 struct page *pg = virt_to_page(buf);
544 unsigned pg_off = buf & ~PAGE_MASK; 544 unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
545 545
546 um->addr[i] = dma_map_page(dev->dev, pg, pg_off, 546 um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
547 um->len, DMA_TO_DEVICE); 547 um->len, DMA_TO_DEVICE);
@@ -559,9 +559,9 @@ static int dmatest_func(void *data)
559 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ 559 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
560 dsts = &um->addr[src_cnt]; 560 dsts = &um->addr[src_cnt];
561 for (i = 0; i < dst_cnt; i++) { 561 for (i = 0; i < dst_cnt; i++) {
562 unsigned long buf = (unsigned long) thread->dsts[i]; 562 void *buf = thread->dsts[i];
563 struct page *pg = virt_to_page(buf); 563 struct page *pg = virt_to_page(buf);
564 unsigned pg_off = buf & ~PAGE_MASK; 564 unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
565 565
566 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, 566 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
567 DMA_BIDIRECTIONAL); 567 DMA_BIDIRECTIONAL);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 7086a16a55f2..f157c6f76b32 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan,
86 hw->count = CPU_TO_DMA(chan, count, 32); 86 hw->count = CPU_TO_DMA(chan, count, 32);
87} 87}
88 88
89static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
90{
91 return DMA_TO_CPU(chan, desc->hw.count, 32);
92}
93
94static void set_desc_src(struct fsldma_chan *chan, 89static void set_desc_src(struct fsldma_chan *chan,
95 struct fsl_dma_ld_hw *hw, dma_addr_t src) 90 struct fsl_dma_ld_hw *hw, dma_addr_t src)
96{ 91{
@@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan,
101 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); 96 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
102} 97}
103 98
104static dma_addr_t get_desc_src(struct fsldma_chan *chan,
105 struct fsl_desc_sw *desc)
106{
107 u64 snoop_bits;
108
109 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
110 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
111 return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
112}
113
114static void set_desc_dst(struct fsldma_chan *chan, 99static void set_desc_dst(struct fsldma_chan *chan,
115 struct fsl_dma_ld_hw *hw, dma_addr_t dst) 100 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
116{ 101{
@@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan,
121 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); 106 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
122} 107}
123 108
124static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
125 struct fsl_desc_sw *desc)
126{
127 u64 snoop_bits;
128
129 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
130 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
131 return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
132}
133
134static void set_desc_next(struct fsldma_chan *chan, 109static void set_desc_next(struct fsldma_chan *chan,
135 struct fsl_dma_ld_hw *hw, dma_addr_t next) 110 struct fsl_dma_ld_hw *hw, dma_addr_t next)
136{ 111{
@@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
408 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); 383 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
409 struct fsl_desc_sw *child; 384 struct fsl_desc_sw *child;
410 unsigned long flags; 385 unsigned long flags;
411 dma_cookie_t cookie; 386 dma_cookie_t cookie = -EINVAL;
412 387
413 spin_lock_irqsave(&chan->desc_lock, flags); 388 spin_lock_irqsave(&chan->desc_lock, flags);
414 389
@@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
854 struct fsl_desc_sw *desc) 829 struct fsl_desc_sw *desc)
855{ 830{
856 struct dma_async_tx_descriptor *txd = &desc->async_tx; 831 struct dma_async_tx_descriptor *txd = &desc->async_tx;
857 struct device *dev = chan->common.device->dev;
858 dma_addr_t src = get_desc_src(chan, desc);
859 dma_addr_t dst = get_desc_dst(chan, desc);
860 u32 len = get_desc_cnt(chan, desc);
861 832
862 /* Run the link descriptor callback function */ 833 /* Run the link descriptor callback function */
863 if (txd->callback) { 834 if (txd->callback) {
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1a49c777607c..87529181efcc 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -817,7 +817,15 @@ int ioat_dma_self_test(struct ioatdma_device *device)
817 } 817 }
818 818
819 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); 819 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
820 if (dma_mapping_error(dev, dma_src)) {
821 dev_err(dev, "mapping src buffer failed\n");
822 goto free_resources;
823 }
820 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); 824 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
825 if (dma_mapping_error(dev, dma_dest)) {
826 dev_err(dev, "mapping dest buffer failed\n");
827 goto unmap_src;
828 }
821 flags = DMA_PREP_INTERRUPT; 829 flags = DMA_PREP_INTERRUPT;
822 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, 830 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
823 IOAT_TEST_SIZE, flags); 831 IOAT_TEST_SIZE, flags);
@@ -855,8 +863,9 @@ int ioat_dma_self_test(struct ioatdma_device *device)
855 } 863 }
856 864
857unmap_dma: 865unmap_dma:
858 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
859 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); 866 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
867unmap_src:
868 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
860free_resources: 869free_resources:
861 dma->device_free_chan_resources(dma_chan); 870 dma->device_free_chan_resources(dma_chan);
862out: 871out:
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index dcb1e05149a7..8869500ab92b 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -1017,6 +1017,7 @@ static int mmp_pdma_probe(struct platform_device *op)
1017 } 1017 }
1018 } 1018 }
1019 1019
1020 platform_set_drvdata(op, pdev);
1020 dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels); 1021 dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
1021 return 0; 1022 return 0;
1022} 1023}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 7807f0ef4e20..53fb0c8365b0 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
54 hw_desc->desc_command = (1 << 31); 54 hw_desc->desc_command = (1 << 31);
55} 55}
56 56
57static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
58{
59 struct mv_xor_desc *hw_desc = desc->hw_desc;
60 return hw_desc->phy_dest_addr;
61}
62
63static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, 57static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
64 u32 byte_count) 58 u32 byte_count)
65{ 59{
@@ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
787/* 781/*
788 * Perform a transaction to verify the HW works. 782 * Perform a transaction to verify the HW works.
789 */ 783 */
790#define MV_XOR_TEST_SIZE 2000
791 784
792static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) 785static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
793{ 786{
@@ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
797 struct dma_chan *dma_chan; 790 struct dma_chan *dma_chan;
798 dma_cookie_t cookie; 791 dma_cookie_t cookie;
799 struct dma_async_tx_descriptor *tx; 792 struct dma_async_tx_descriptor *tx;
793 struct dmaengine_unmap_data *unmap;
800 int err = 0; 794 int err = 0;
801 795
802 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 796 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
803 if (!src) 797 if (!src)
804 return -ENOMEM; 798 return -ENOMEM;
805 799
806 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 800 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
807 if (!dest) { 801 if (!dest) {
808 kfree(src); 802 kfree(src);
809 return -ENOMEM; 803 return -ENOMEM;
810 } 804 }
811 805
812 /* Fill in src buffer */ 806 /* Fill in src buffer */
813 for (i = 0; i < MV_XOR_TEST_SIZE; i++) 807 for (i = 0; i < PAGE_SIZE; i++)
814 ((u8 *) src)[i] = (u8)i; 808 ((u8 *) src)[i] = (u8)i;
815 809
816 dma_chan = &mv_chan->dmachan; 810 dma_chan = &mv_chan->dmachan;
@@ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
819 goto out; 813 goto out;
820 } 814 }
821 815
822 dest_dma = dma_map_single(dma_chan->device->dev, dest, 816 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
823 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 817 if (!unmap) {
818 err = -ENOMEM;
819 goto free_resources;
820 }
821
822 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
823 PAGE_SIZE, DMA_TO_DEVICE);
824 unmap->to_cnt = 1;
825 unmap->addr[0] = src_dma;
824 826
825 src_dma = dma_map_single(dma_chan->device->dev, src, 827 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
826 MV_XOR_TEST_SIZE, DMA_TO_DEVICE); 828 PAGE_SIZE, DMA_FROM_DEVICE);
829 unmap->from_cnt = 1;
830 unmap->addr[1] = dest_dma;
831
832 unmap->len = PAGE_SIZE;
827 833
828 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 834 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
829 MV_XOR_TEST_SIZE, 0); 835 PAGE_SIZE, 0);
830 cookie = mv_xor_tx_submit(tx); 836 cookie = mv_xor_tx_submit(tx);
831 mv_xor_issue_pending(dma_chan); 837 mv_xor_issue_pending(dma_chan);
832 async_tx_ack(tx); 838 async_tx_ack(tx);
@@ -841,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
841 } 847 }
842 848
843 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 849 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
844 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 850 PAGE_SIZE, DMA_FROM_DEVICE);
845 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { 851 if (memcmp(src, dest, PAGE_SIZE)) {
846 dev_err(dma_chan->device->dev, 852 dev_err(dma_chan->device->dev,
847 "Self-test copy failed compare, disabling\n"); 853 "Self-test copy failed compare, disabling\n");
848 err = -ENODEV; 854 err = -ENODEV;
@@ -850,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
850 } 856 }
851 857
852free_resources: 858free_resources:
859 dmaengine_unmap_put(unmap);
853 mv_xor_free_chan_resources(dma_chan); 860 mv_xor_free_chan_resources(dma_chan);
854out: 861out:
855 kfree(src); 862 kfree(src);
@@ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
867 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 874 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
868 dma_addr_t dest_dma; 875 dma_addr_t dest_dma;
869 struct dma_async_tx_descriptor *tx; 876 struct dma_async_tx_descriptor *tx;
877 struct dmaengine_unmap_data *unmap;
870 struct dma_chan *dma_chan; 878 struct dma_chan *dma_chan;
871 dma_cookie_t cookie; 879 dma_cookie_t cookie;
872 u8 cmp_byte = 0; 880 u8 cmp_byte = 0;
873 u32 cmp_word; 881 u32 cmp_word;
874 int err = 0; 882 int err = 0;
883 int src_count = MV_XOR_NUM_SRC_TEST;
875 884
876 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 885 for (src_idx = 0; src_idx < src_count; src_idx++) {
877 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 886 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
878 if (!xor_srcs[src_idx]) { 887 if (!xor_srcs[src_idx]) {
879 while (src_idx--) 888 while (src_idx--)
@@ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
890 } 899 }
891 900
892 /* Fill in src buffers */ 901 /* Fill in src buffers */
893 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 902 for (src_idx = 0; src_idx < src_count; src_idx++) {
894 u8 *ptr = page_address(xor_srcs[src_idx]); 903 u8 *ptr = page_address(xor_srcs[src_idx]);
895 for (i = 0; i < PAGE_SIZE; i++) 904 for (i = 0; i < PAGE_SIZE; i++)
896 ptr[i] = (1 << src_idx); 905 ptr[i] = (1 << src_idx);
897 } 906 }
898 907
899 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) 908 for (src_idx = 0; src_idx < src_count; src_idx++)
900 cmp_byte ^= (u8) (1 << src_idx); 909 cmp_byte ^= (u8) (1 << src_idx);
901 910
902 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 911 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
@@ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
910 goto out; 919 goto out;
911 } 920 }
912 921
922 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
923 GFP_KERNEL);
924 if (!unmap) {
925 err = -ENOMEM;
926 goto free_resources;
927 }
928
913 /* test xor */ 929 /* test xor */
914 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 930 for (i = 0; i < src_count; i++) {
915 DMA_FROM_DEVICE); 931 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
932 0, PAGE_SIZE, DMA_TO_DEVICE);
933 dma_srcs[i] = unmap->addr[i];
934 unmap->to_cnt++;
935 }
916 936
917 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) 937 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
918 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 938 DMA_FROM_DEVICE);
919 0, PAGE_SIZE, DMA_TO_DEVICE); 939 dest_dma = unmap->addr[src_count];
940 unmap->from_cnt = 1;
941 unmap->len = PAGE_SIZE;
920 942
921 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 943 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
922 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); 944 src_count, PAGE_SIZE, 0);
923 945
924 cookie = mv_xor_tx_submit(tx); 946 cookie = mv_xor_tx_submit(tx);
925 mv_xor_issue_pending(dma_chan); 947 mv_xor_issue_pending(dma_chan);
@@ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
948 } 970 }
949 971
950free_resources: 972free_resources:
973 dmaengine_unmap_put(unmap);
951 mv_xor_free_chan_resources(dma_chan); 974 mv_xor_free_chan_resources(dma_chan);
952out: 975out:
953 src_idx = MV_XOR_NUM_SRC_TEST; 976 src_idx = src_count;
954 while (src_idx--) 977 while (src_idx--)
955 __free_page(xor_srcs[src_idx]); 978 __free_page(xor_srcs[src_idx]);
956 __free_page(dest); 979 __free_page(dest);
@@ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev)
1176 int i = 0; 1199 int i = 0;
1177 1200
1178 for_each_child_of_node(pdev->dev.of_node, np) { 1201 for_each_child_of_node(pdev->dev.of_node, np) {
1202 struct mv_xor_chan *chan;
1179 dma_cap_mask_t cap_mask; 1203 dma_cap_mask_t cap_mask;
1180 int irq; 1204 int irq;
1181 1205
@@ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev)
1193 goto err_channel_add; 1217 goto err_channel_add;
1194 } 1218 }
1195 1219
1196 xordev->channels[i] = 1220 chan = mv_xor_channel_add(xordev, pdev, i,
1197 mv_xor_channel_add(xordev, pdev, i, 1221 cap_mask, irq);
1198 cap_mask, irq); 1222 if (IS_ERR(chan)) {
1199 if (IS_ERR(xordev->channels[i])) { 1223 ret = PTR_ERR(chan);
1200 ret = PTR_ERR(xordev->channels[i]);
1201 xordev->channels[i] = NULL;
1202 irq_dispose_mapping(irq); 1224 irq_dispose_mapping(irq);
1203 goto err_channel_add; 1225 goto err_channel_add;
1204 } 1226 }
1205 1227
1228 xordev->channels[i] = chan;
1206 i++; 1229 i++;
1207 } 1230 }
1208 } else if (pdata && pdata->channels) { 1231 } else if (pdata && pdata->channels) {
1209 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1232 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1210 struct mv_xor_channel_data *cd; 1233 struct mv_xor_channel_data *cd;
1234 struct mv_xor_chan *chan;
1211 int irq; 1235 int irq;
1212 1236
1213 cd = &pdata->channels[i]; 1237 cd = &pdata->channels[i];
@@ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev)
1222 goto err_channel_add; 1246 goto err_channel_add;
1223 } 1247 }
1224 1248
1225 xordev->channels[i] = 1249 chan = mv_xor_channel_add(xordev, pdev, i,
1226 mv_xor_channel_add(xordev, pdev, i, 1250 cd->cap_mask, irq);
1227 cd->cap_mask, irq); 1251 if (IS_ERR(chan)) {
1228 if (IS_ERR(xordev->channels[i])) { 1252 ret = PTR_ERR(chan);
1229 ret = PTR_ERR(xordev->channels[i]);
1230 goto err_channel_add; 1253 goto err_channel_add;
1231 } 1254 }
1255
1256 xordev->channels[i] = chan;
1232 } 1257 }
1233 } 1258 }
1234 1259
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index cdf0483b8f2d..536632f6479c 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2492,12 +2492,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2492 2492
2493static inline void _init_desc(struct dma_pl330_desc *desc) 2493static inline void _init_desc(struct dma_pl330_desc *desc)
2494{ 2494{
2495 desc->pchan = NULL;
2496 desc->req.x = &desc->px; 2495 desc->req.x = &desc->px;
2497 desc->req.token = desc; 2496 desc->req.token = desc;
2498 desc->rqcfg.swap = SWAP_NO; 2497 desc->rqcfg.swap = SWAP_NO;
2499 desc->rqcfg.privileged = 0;
2500 desc->rqcfg.insnaccess = 0;
2501 desc->rqcfg.scctl = SCCTRL0; 2498 desc->rqcfg.scctl = SCCTRL0;
2502 desc->rqcfg.dcctl = DCCTRL0; 2499 desc->rqcfg.dcctl = DCCTRL0;
2503 desc->req.cfg = &desc->rqcfg; 2500 desc->req.cfg = &desc->rqcfg;
@@ -2517,7 +2514,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
2517 if (!pdmac) 2514 if (!pdmac)
2518 return 0; 2515 return 0;
2519 2516
2520 desc = kmalloc(count * sizeof(*desc), flg); 2517 desc = kcalloc(count, sizeof(*desc), flg);
2521 if (!desc) 2518 if (!desc)
2522 return 0; 2519 return 0;
2523 2520
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 8da48c6b2a38..8bba298535b0 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -533,29 +533,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
533} 533}
534 534
535/** 535/**
536 * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation
537 */
538static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc,
539 int value, unsigned long flags)
540{
541 struct dma_cdb *hw_desc = desc->hw_desc;
542
543 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
544 desc->hw_next = NULL;
545 desc->src_cnt = 1;
546 desc->dst_cnt = 1;
547
548 if (flags & DMA_PREP_INTERRUPT)
549 set_bit(PPC440SPE_DESC_INT, &desc->flags);
550 else
551 clear_bit(PPC440SPE_DESC_INT, &desc->flags);
552
553 hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
554 hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
555 hw_desc->opc = DMA_CDB_OPC_DFILL128;
556}
557
558/**
559 * ppc440spe_desc_set_src_addr - set source address into the descriptor 536 * ppc440spe_desc_set_src_addr - set source address into the descriptor
560 */ 537 */
561static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc, 538static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc,
@@ -1504,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
1504 struct ppc440spe_adma_chan *chan, 1481 struct ppc440spe_adma_chan *chan,
1505 dma_cookie_t cookie) 1482 dma_cookie_t cookie)
1506{ 1483{
1507 int i;
1508
1509 BUG_ON(desc->async_tx.cookie < 0); 1484 BUG_ON(desc->async_tx.cookie < 0);
1510 if (desc->async_tx.cookie > 0) { 1485 if (desc->async_tx.cookie > 0) {
1511 cookie = desc->async_tx.cookie; 1486 cookie = desc->async_tx.cookie;
@@ -3898,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
3898 ppc440spe_adma_prep_dma_interrupt; 3873 ppc440spe_adma_prep_dma_interrupt;
3899 } 3874 }
3900 pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " 3875 pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
3901 "( %s%s%s%s%s%s%s)\n", 3876 "( %s%s%s%s%s%s)\n",
3902 dev_name(adev->dev), 3877 dev_name(adev->dev),
3903 dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", 3878 dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
3904 dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", 3879 dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 4cb127978636..4eddedb6eb7d 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -628,42 +628,13 @@ retry:
628 s3cchan->state = S3C24XX_DMA_CHAN_IDLE; 628 s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
629} 629}
630 630
631static void s3c24xx_dma_unmap_buffers(struct s3c24xx_txd *txd)
632{
633 struct device *dev = txd->vd.tx.chan->device->dev;
634 struct s3c24xx_sg *dsg;
635
636 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
637 if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
638 list_for_each_entry(dsg, &txd->dsg_list, node)
639 dma_unmap_single(dev, dsg->src_addr, dsg->len,
640 DMA_TO_DEVICE);
641 else {
642 list_for_each_entry(dsg, &txd->dsg_list, node)
643 dma_unmap_page(dev, dsg->src_addr, dsg->len,
644 DMA_TO_DEVICE);
645 }
646 }
647
648 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
649 if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
650 list_for_each_entry(dsg, &txd->dsg_list, node)
651 dma_unmap_single(dev, dsg->dst_addr, dsg->len,
652 DMA_FROM_DEVICE);
653 else
654 list_for_each_entry(dsg, &txd->dsg_list, node)
655 dma_unmap_page(dev, dsg->dst_addr, dsg->len,
656 DMA_FROM_DEVICE);
657 }
658}
659
660static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd) 631static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd)
661{ 632{
662 struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); 633 struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
663 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan); 634 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan);
664 635
665 if (!s3cchan->slave) 636 if (!s3cchan->slave)
666 s3c24xx_dma_unmap_buffers(txd); 637 dma_descriptor_unmap(&vd->tx);
667 638
668 s3c24xx_dma_free_txd(txd); 639 s3c24xx_dma_free_txd(txd);
669} 640}
@@ -795,7 +766,7 @@ static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
795 766
796 spin_lock_irqsave(&s3cchan->vc.lock, flags); 767 spin_lock_irqsave(&s3cchan->vc.lock, flags);
797 ret = dma_cookie_status(chan, cookie, txstate); 768 ret = dma_cookie_status(chan, cookie, txstate);
798 if (ret == DMA_SUCCESS) { 769 if (ret == DMA_COMPLETE) {
799 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); 770 spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
800 return ret; 771 return ret;
801 } 772 }
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
index ebad84591a6e..3083d901a414 100644
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -60,6 +60,7 @@
60#define HPB_DMAE_DSTPR_DMSTP BIT(0) 60#define HPB_DMAE_DSTPR_DMSTP BIT(0)
61 61
62/* DMA status register (DSTSR) bits */ 62/* DMA status register (DSTSR) bits */
63#define HPB_DMAE_DSTSR_DQSTS BIT(2)
63#define HPB_DMAE_DSTSR_DMSTS BIT(0) 64#define HPB_DMAE_DSTSR_DMSTS BIT(0)
64 65
65/* DMA common registers */ 66/* DMA common registers */
@@ -286,6 +287,9 @@ static void hpb_dmae_halt(struct shdma_chan *schan)
286 287
287 ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR); 288 ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR);
288 ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR); 289 ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR);
290
291 chan->plane_idx = 0;
292 chan->first_desc = true;
289} 293}
290 294
291static const struct hpb_dmae_slave_config * 295static const struct hpb_dmae_slave_config *
@@ -385,7 +389,10 @@ static bool hpb_dmae_channel_busy(struct shdma_chan *schan)
385 struct hpb_dmae_chan *chan = to_chan(schan); 389 struct hpb_dmae_chan *chan = to_chan(schan);
386 u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR); 390 u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR);
387 391
388 return (dstsr & HPB_DMAE_DSTSR_DMSTS) == HPB_DMAE_DSTSR_DMSTS; 392 if (chan->xfer_mode == XFER_DOUBLE)
393 return dstsr & HPB_DMAE_DSTSR_DQSTS;
394 else
395 return dstsr & HPB_DMAE_DSTSR_DMSTS;
389} 396}
390 397
391static int 398static int
@@ -510,6 +517,8 @@ static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
510 } 517 }
511 518
512 schan = &new_hpb_chan->shdma_chan; 519 schan = &new_hpb_chan->shdma_chan;
520 schan->max_xfer_len = HPB_DMA_TCR_MAX;
521
513 shdma_chan_probe(sdev, schan, id); 522 shdma_chan_probe(sdev, schan, id);
514 523
515 if (pdev->id >= 0) 524 if (pdev->id >= 0)
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index bae6c29f5502..17686caf64d5 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
406 dma_async_tx_callback callback; 406 dma_async_tx_callback callback;
407 void *param; 407 void *param;
408 struct dma_async_tx_descriptor *txd = &desc->txd; 408 struct dma_async_tx_descriptor *txd = &desc->txd;
409 struct txx9dmac_slave *ds = dc->chan.private;
410 409
411 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", 410 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
412 txd->cookie, desc); 411 txd->cookie, desc);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 8472405c5586..d7f1b57bd3be 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -945,7 +945,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
945 u32 tad_offset; 945 u32 tad_offset;
946 u32 rir_way; 946 u32 rir_way;
947 u32 mb, kb; 947 u32 mb, kb;
948 u64 ch_addr, offset, limit, prv = 0; 948 u64 ch_addr, offset, limit = 0, prv = 0;
949 949
950 950
951 /* 951 /*
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 3c55ec856e39..a287cece0593 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -1082,7 +1082,7 @@ static void arizona_micd_set_level(struct arizona *arizona, int index,
1082static int arizona_extcon_probe(struct platform_device *pdev) 1082static int arizona_extcon_probe(struct platform_device *pdev)
1083{ 1083{
1084 struct arizona *arizona = dev_get_drvdata(pdev->dev.parent); 1084 struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
1085 struct arizona_pdata *pdata; 1085 struct arizona_pdata *pdata = &arizona->pdata;
1086 struct arizona_extcon_info *info; 1086 struct arizona_extcon_info *info;
1087 unsigned int val; 1087 unsigned int val;
1088 int jack_irq_fall, jack_irq_rise; 1088 int jack_irq_fall, jack_irq_rise;
@@ -1091,8 +1091,6 @@ static int arizona_extcon_probe(struct platform_device *pdev)
1091 if (!arizona->dapm || !arizona->dapm->card) 1091 if (!arizona->dapm || !arizona->dapm->card)
1092 return -EPROBE_DEFER; 1092 return -EPROBE_DEFER;
1093 1093
1094 pdata = dev_get_platdata(arizona->dev);
1095
1096 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); 1094 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
1097 if (!info) { 1095 if (!info) {
1098 dev_err(&pdev->dev, "Failed to allocate memory\n"); 1096 dev_err(&pdev->dev, "Failed to allocate memory\n");
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c
index 15443d3b6be1..76322330cbd7 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon-class.c
@@ -792,6 +792,8 @@ void extcon_dev_unregister(struct extcon_dev *edev)
792 return; 792 return;
793 } 793 }
794 794
795 device_unregister(&edev->dev);
796
795 if (edev->mutually_exclusive && edev->max_supported) { 797 if (edev->mutually_exclusive && edev->max_supported) {
796 for (index = 0; edev->mutually_exclusive[index]; 798 for (index = 0; edev->mutually_exclusive[index];
797 index++) 799 index++)
@@ -812,7 +814,6 @@ void extcon_dev_unregister(struct extcon_dev *edev)
812 if (switch_class) 814 if (switch_class)
813 class_compat_remove_link(switch_class, &edev->dev, NULL); 815 class_compat_remove_link(switch_class, &edev->dev, NULL);
814#endif 816#endif
815 device_unregister(&edev->dev);
816 put_device(&edev->dev); 817 put_device(&edev->dev);
817} 818}
818EXPORT_SYMBOL_GPL(extcon_dev_unregister); 819EXPORT_SYMBOL_GPL(extcon_dev_unregister);
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 299fad6b5867..5373dc5b6011 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
14 14
15obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ 15obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
16obj-$(CONFIG_EFI) += efi/ 16obj-$(CONFIG_EFI) += efi/
17obj-$(CONFIG_UEFI_CPER) += efi/
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 3150aa4874e8..6aecbc86ec94 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -36,7 +36,7 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE
36 backend for pstore by default. This setting can be overridden 36 backend for pstore by default. This setting can be overridden
37 using the efivars module's pstore_disable parameter. 37 using the efivars module's pstore_disable parameter.
38 38
39config UEFI_CPER
40 def_bool n
41
42endmenu 39endmenu
40
41config UEFI_CPER
42 bool
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 9ba156d3c775..6c2a41ec21ba 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -1,7 +1,7 @@
1# 1#
2# Makefile for linux kernel 2# Makefile for linux kernel
3# 3#
4obj-y += efi.o vars.o 4obj-$(CONFIG_EFI) += efi.o vars.o
5obj-$(CONFIG_EFI_VARS) += efivars.o 5obj-$(CONFIG_EFI_VARS) += efivars.o
6obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o 6obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
7obj-$(CONFIG_UEFI_CPER) += cper.o 7obj-$(CONFIG_UEFI_CPER) += cper.o
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 5002d50e3781..4b9dc836dcf9 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -18,14 +18,12 @@ module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
18 18
19static int efi_pstore_open(struct pstore_info *psi) 19static int efi_pstore_open(struct pstore_info *psi)
20{ 20{
21 efivar_entry_iter_begin();
22 psi->data = NULL; 21 psi->data = NULL;
23 return 0; 22 return 0;
24} 23}
25 24
26static int efi_pstore_close(struct pstore_info *psi) 25static int efi_pstore_close(struct pstore_info *psi)
27{ 26{
28 efivar_entry_iter_end();
29 psi->data = NULL; 27 psi->data = NULL;
30 return 0; 28 return 0;
31} 29}
@@ -39,6 +37,12 @@ struct pstore_read_data {
39 char **buf; 37 char **buf;
40}; 38};
41 39
40static inline u64 generic_id(unsigned long timestamp,
41 unsigned int part, int count)
42{
43 return (timestamp * 100 + part) * 1000 + count;
44}
45
42static int efi_pstore_read_func(struct efivar_entry *entry, void *data) 46static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
43{ 47{
44 efi_guid_t vendor = LINUX_EFI_CRASH_GUID; 48 efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
@@ -57,7 +61,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
57 61
58 if (sscanf(name, "dump-type%u-%u-%d-%lu-%c", 62 if (sscanf(name, "dump-type%u-%u-%d-%lu-%c",
59 cb_data->type, &part, &cnt, &time, &data_type) == 5) { 63 cb_data->type, &part, &cnt, &time, &data_type) == 5) {
60 *cb_data->id = part; 64 *cb_data->id = generic_id(time, part, cnt);
61 *cb_data->count = cnt; 65 *cb_data->count = cnt;
62 cb_data->timespec->tv_sec = time; 66 cb_data->timespec->tv_sec = time;
63 cb_data->timespec->tv_nsec = 0; 67 cb_data->timespec->tv_nsec = 0;
@@ -67,7 +71,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
67 *cb_data->compressed = false; 71 *cb_data->compressed = false;
68 } else if (sscanf(name, "dump-type%u-%u-%d-%lu", 72 } else if (sscanf(name, "dump-type%u-%u-%d-%lu",
69 cb_data->type, &part, &cnt, &time) == 4) { 73 cb_data->type, &part, &cnt, &time) == 4) {
70 *cb_data->id = part; 74 *cb_data->id = generic_id(time, part, cnt);
71 *cb_data->count = cnt; 75 *cb_data->count = cnt;
72 cb_data->timespec->tv_sec = time; 76 cb_data->timespec->tv_sec = time;
73 cb_data->timespec->tv_nsec = 0; 77 cb_data->timespec->tv_nsec = 0;
@@ -79,7 +83,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
79 * which doesn't support holding 83 * which doesn't support holding
80 * multiple logs, remains. 84 * multiple logs, remains.
81 */ 85 */
82 *cb_data->id = part; 86 *cb_data->id = generic_id(time, part, 0);
83 *cb_data->count = 0; 87 *cb_data->count = 0;
84 cb_data->timespec->tv_sec = time; 88 cb_data->timespec->tv_sec = time;
85 cb_data->timespec->tv_nsec = 0; 89 cb_data->timespec->tv_nsec = 0;
@@ -91,19 +95,125 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
91 __efivar_entry_get(entry, &entry->var.Attributes, 95 __efivar_entry_get(entry, &entry->var.Attributes,
92 &entry->var.DataSize, entry->var.Data); 96 &entry->var.DataSize, entry->var.Data);
93 size = entry->var.DataSize; 97 size = entry->var.DataSize;
98 memcpy(*cb_data->buf, entry->var.Data,
99 (size_t)min_t(unsigned long, EFIVARS_DATA_SIZE_MAX, size));
94 100
95 *cb_data->buf = kmemdup(entry->var.Data, size, GFP_KERNEL);
96 if (*cb_data->buf == NULL)
97 return -ENOMEM;
98 return size; 101 return size;
99} 102}
100 103
104/**
105 * efi_pstore_scan_sysfs_enter
106 * @entry: scanning entry
107 * @next: next entry
108 * @head: list head
109 */
110static void efi_pstore_scan_sysfs_enter(struct efivar_entry *pos,
111 struct efivar_entry *next,
112 struct list_head *head)
113{
114 pos->scanning = true;
115 if (&next->list != head)
116 next->scanning = true;
117}
118
119/**
120 * __efi_pstore_scan_sysfs_exit
121 * @entry: deleting entry
122 * @turn_off_scanning: Check if a scanning flag should be turned off
123 */
124static inline void __efi_pstore_scan_sysfs_exit(struct efivar_entry *entry,
125 bool turn_off_scanning)
126{
127 if (entry->deleting) {
128 list_del(&entry->list);
129 efivar_entry_iter_end();
130 efivar_unregister(entry);
131 efivar_entry_iter_begin();
132 } else if (turn_off_scanning)
133 entry->scanning = false;
134}
135
136/**
137 * efi_pstore_scan_sysfs_exit
138 * @pos: scanning entry
139 * @next: next entry
140 * @head: list head
141 * @stop: a flag checking if scanning will stop
142 */
143static void efi_pstore_scan_sysfs_exit(struct efivar_entry *pos,
144 struct efivar_entry *next,
145 struct list_head *head, bool stop)
146{
147 __efi_pstore_scan_sysfs_exit(pos, true);
148 if (stop)
149 __efi_pstore_scan_sysfs_exit(next, &next->list != head);
150}
151
152/**
153 * efi_pstore_sysfs_entry_iter
154 *
155 * @data: function-specific data to pass to callback
156 * @pos: entry to begin iterating from
157 *
158 * You MUST call efivar_enter_iter_begin() before this function, and
159 * efivar_entry_iter_end() afterwards.
160 *
161 * It is possible to begin iteration from an arbitrary entry within
162 * the list by passing @pos. @pos is updated on return to point to
163 * the next entry of the last one passed to efi_pstore_read_func().
164 * To begin iterating from the beginning of the list @pos must be %NULL.
165 */
166static int efi_pstore_sysfs_entry_iter(void *data, struct efivar_entry **pos)
167{
168 struct efivar_entry *entry, *n;
169 struct list_head *head = &efivar_sysfs_list;
170 int size = 0;
171
172 if (!*pos) {
173 list_for_each_entry_safe(entry, n, head, list) {
174 efi_pstore_scan_sysfs_enter(entry, n, head);
175
176 size = efi_pstore_read_func(entry, data);
177 efi_pstore_scan_sysfs_exit(entry, n, head, size < 0);
178 if (size)
179 break;
180 }
181 *pos = n;
182 return size;
183 }
184
185 list_for_each_entry_safe_from((*pos), n, head, list) {
186 efi_pstore_scan_sysfs_enter((*pos), n, head);
187
188 size = efi_pstore_read_func((*pos), data);
189 efi_pstore_scan_sysfs_exit((*pos), n, head, size < 0);
190 if (size)
191 break;
192 }
193 *pos = n;
194 return size;
195}
196
197/**
198 * efi_pstore_read
199 *
200 * This function returns a size of NVRAM entry logged via efi_pstore_write().
201 * The meaning and behavior of efi_pstore/pstore are as below.
202 *
203 * size > 0: Got data of an entry logged via efi_pstore_write() successfully,
204 * and pstore filesystem will continue reading subsequent entries.
205 * size == 0: Entry was not logged via efi_pstore_write(),
206 * and efi_pstore driver will continue reading subsequent entries.
207 * size < 0: Failed to get data of entry logging via efi_pstore_write(),
208 * and pstore will stop reading entry.
209 */
101static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, 210static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
102 int *count, struct timespec *timespec, 211 int *count, struct timespec *timespec,
103 char **buf, bool *compressed, 212 char **buf, bool *compressed,
104 struct pstore_info *psi) 213 struct pstore_info *psi)
105{ 214{
106 struct pstore_read_data data; 215 struct pstore_read_data data;
216 ssize_t size;
107 217
108 data.id = id; 218 data.id = id;
109 data.type = type; 219 data.type = type;
@@ -112,8 +222,17 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
112 data.compressed = compressed; 222 data.compressed = compressed;
113 data.buf = buf; 223 data.buf = buf;
114 224
115 return __efivar_entry_iter(efi_pstore_read_func, &efivar_sysfs_list, &data, 225 *data.buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL);
116 (struct efivar_entry **)&psi->data); 226 if (!*data.buf)
227 return -ENOMEM;
228
229 efivar_entry_iter_begin();
230 size = efi_pstore_sysfs_entry_iter(&data,
231 (struct efivar_entry **)&psi->data);
232 efivar_entry_iter_end();
233 if (size <= 0)
234 kfree(*data.buf);
235 return size;
117} 236}
118 237
119static int efi_pstore_write(enum pstore_type_id type, 238static int efi_pstore_write(enum pstore_type_id type,
@@ -184,9 +303,17 @@ static int efi_pstore_erase_func(struct efivar_entry *entry, void *data)
184 return 0; 303 return 0;
185 } 304 }
186 305
306 if (entry->scanning) {
307 /*
308 * Skip deletion because this entry will be deleted
309 * after scanning is completed.
310 */
311 entry->deleting = true;
312 } else
313 list_del(&entry->list);
314
187 /* found */ 315 /* found */
188 __efivar_entry_delete(entry); 316 __efivar_entry_delete(entry);
189 list_del(&entry->list);
190 317
191 return 1; 318 return 1;
192} 319}
@@ -199,14 +326,16 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
199 char name[DUMP_NAME_LEN]; 326 char name[DUMP_NAME_LEN];
200 efi_char16_t efi_name[DUMP_NAME_LEN]; 327 efi_char16_t efi_name[DUMP_NAME_LEN];
201 int found, i; 328 int found, i;
329 unsigned int part;
202 330
203 sprintf(name, "dump-type%u-%u-%d-%lu", type, (unsigned int)id, count, 331 do_div(id, 1000);
204 time.tv_sec); 332 part = do_div(id, 100);
333 sprintf(name, "dump-type%u-%u-%d-%lu", type, part, count, time.tv_sec);
205 334
206 for (i = 0; i < DUMP_NAME_LEN; i++) 335 for (i = 0; i < DUMP_NAME_LEN; i++)
207 efi_name[i] = name[i]; 336 efi_name[i] = name[i];
208 337
209 edata.id = id; 338 edata.id = part;
210 edata.type = type; 339 edata.type = type;
211 edata.count = count; 340 edata.count = count;
212 edata.time = time; 341 edata.time = time;
@@ -214,10 +343,12 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
214 343
215 efivar_entry_iter_begin(); 344 efivar_entry_iter_begin();
216 found = __efivar_entry_iter(efi_pstore_erase_func, &efivar_sysfs_list, &edata, &entry); 345 found = __efivar_entry_iter(efi_pstore_erase_func, &efivar_sysfs_list, &edata, &entry);
217 efivar_entry_iter_end();
218 346
219 if (found) 347 if (found && !entry->scanning) {
348 efivar_entry_iter_end();
220 efivar_unregister(entry); 349 efivar_unregister(entry);
350 } else
351 efivar_entry_iter_end();
221 352
222 return 0; 353 return 0;
223} 354}
@@ -225,6 +356,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
225static struct pstore_info efi_pstore_info = { 356static struct pstore_info efi_pstore_info = {
226 .owner = THIS_MODULE, 357 .owner = THIS_MODULE,
227 .name = "efi", 358 .name = "efi",
359 .flags = PSTORE_FLAGS_FRAGILE,
228 .open = efi_pstore_open, 360 .open = efi_pstore_open,
229 .close = efi_pstore_close, 361 .close = efi_pstore_close,
230 .read = efi_pstore_read, 362 .read = efi_pstore_read,
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 933eb027d527..3dc248239197 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -383,12 +383,16 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
383 else if (__efivar_entry_delete(entry)) 383 else if (__efivar_entry_delete(entry))
384 err = -EIO; 384 err = -EIO;
385 385
386 efivar_entry_iter_end(); 386 if (err) {
387 387 efivar_entry_iter_end();
388 if (err)
389 return err; 388 return err;
389 }
390 390
391 efivar_unregister(entry); 391 if (!entry->scanning) {
392 efivar_entry_iter_end();
393 efivar_unregister(entry);
394 } else
395 efivar_entry_iter_end();
392 396
393 /* It's dead Jim.... */ 397 /* It's dead Jim.... */
394 return count; 398 return count;
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 391c67b182d9..b22659cccca4 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -683,8 +683,16 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
683 if (!found) 683 if (!found)
684 return NULL; 684 return NULL;
685 685
686 if (remove) 686 if (remove) {
687 list_del(&entry->list); 687 if (entry->scanning) {
688 /*
689 * The entry will be deleted
690 * after scanning is completed.
691 */
692 entry->deleting = true;
693 } else
694 list_del(&entry->list);
695 }
688 696
689 return entry; 697 return entry;
690} 698}
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 72c927dc3be1..54c18c220a60 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -158,7 +158,7 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
158 spin_unlock_irqrestore(&kona_gpio->lock, flags); 158 spin_unlock_irqrestore(&kona_gpio->lock, flags);
159 159
160 /* return the specified bit status */ 160 /* return the specified bit status */
161 return !!(val & bit); 161 return !!(val & BIT(bit));
162} 162}
163 163
164static int bcm_kona_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) 164static int bcm_kona_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 8847adf392b7..84be70157ad6 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -327,7 +327,7 @@ static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset)
327 * NOTE: we assume for now that only irqs in the first gpio_chip 327 * NOTE: we assume for now that only irqs in the first gpio_chip
328 * can provide direct-mapped IRQs to AINTC (up to 32 GPIOs). 328 * can provide direct-mapped IRQs to AINTC (up to 32 GPIOs).
329 */ 329 */
330 if (offset < d->irq_base) 330 if (offset < d->gpio_unbanked)
331 return d->gpio_irq + offset; 331 return d->gpio_irq + offset;
332 else 332 else
333 return -ENODEV; 333 return -ENODEV;
@@ -419,6 +419,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
419 419
420 /* pass "bank 0" GPIO IRQs to AINTC */ 420 /* pass "bank 0" GPIO IRQs to AINTC */
421 chips[0].chip.to_irq = gpio_to_irq_unbanked; 421 chips[0].chip.to_irq = gpio_to_irq_unbanked;
422 chips[0].gpio_irq = bank_irq;
423 chips[0].gpio_unbanked = pdata->gpio_unbanked;
422 binten = BIT(0); 424 binten = BIT(0);
423 425
424 /* AINTC handles mask/unmask; GPIO handles triggering */ 426 /* AINTC handles mask/unmask; GPIO handles triggering */
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 914e859e3eda..d7d6d72eba33 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -70,10 +70,14 @@ static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio)
70 u32 val; 70 u32 val;
71 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); 71 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
72 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); 72 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
73 u32 out_mask, out_shadow;
73 74
74 val = in_be32(mm->regs + GPIO_DAT) & ~in_be32(mm->regs + GPIO_DIR); 75 out_mask = in_be32(mm->regs + GPIO_DIR);
75 76
76 return (val | mpc8xxx_gc->data) & mpc8xxx_gpio2mask(gpio); 77 val = in_be32(mm->regs + GPIO_DAT) & ~out_mask;
78 out_shadow = mpc8xxx_gc->data & out_mask;
79
80 return (val | out_shadow) & mpc8xxx_gpio2mask(gpio);
77} 81}
78 82
79static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio) 83static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio)
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index f7a0cc4da950..2baf0ddf7e02 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -102,7 +102,7 @@ struct msm_gpio_dev {
102 DECLARE_BITMAP(wake_irqs, MAX_NR_GPIO); 102 DECLARE_BITMAP(wake_irqs, MAX_NR_GPIO);
103 DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO); 103 DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
104 struct irq_domain *domain; 104 struct irq_domain *domain;
105 unsigned int summary_irq; 105 int summary_irq;
106 void __iomem *msm_tlmm_base; 106 void __iomem *msm_tlmm_base;
107}; 107};
108 108
@@ -252,7 +252,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
252 252
253 spin_lock_irqsave(&tlmm_lock, irq_flags); 253 spin_lock_irqsave(&tlmm_lock, irq_flags);
254 writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio)); 254 writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
255 clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); 255 clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
256 __clear_bit(gpio, msm_gpio.enabled_irqs); 256 __clear_bit(gpio, msm_gpio.enabled_irqs);
257 spin_unlock_irqrestore(&tlmm_lock, irq_flags); 257 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
258} 258}
@@ -264,7 +264,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
264 264
265 spin_lock_irqsave(&tlmm_lock, irq_flags); 265 spin_lock_irqsave(&tlmm_lock, irq_flags);
266 __set_bit(gpio, msm_gpio.enabled_irqs); 266 __set_bit(gpio, msm_gpio.enabled_irqs);
267 set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); 267 set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
268 writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio)); 268 writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
269 spin_unlock_irqrestore(&tlmm_lock, irq_flags); 269 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
270} 270}
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 3c3321f94053..db3129043e63 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -79,7 +79,7 @@ struct mvebu_gpio_chip {
79 spinlock_t lock; 79 spinlock_t lock;
80 void __iomem *membase; 80 void __iomem *membase;
81 void __iomem *percpu_membase; 81 void __iomem *percpu_membase;
82 unsigned int irqbase; 82 int irqbase;
83 struct irq_domain *domain; 83 struct irq_domain *domain;
84 int soc_variant; 84 int soc_variant;
85}; 85};
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index f22f7f3e2e53..b4d42112d02d 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -286,11 +286,6 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
286 if (!chip->base) 286 if (!chip->base)
287 return -ENOMEM; 287 return -ENOMEM;
288 288
289 chip->domain = irq_domain_add_simple(adev->dev.of_node, PL061_GPIO_NR,
290 irq_base, &pl061_domain_ops, chip);
291 if (!chip->domain)
292 return -ENODEV;
293
294 spin_lock_init(&chip->lock); 289 spin_lock_init(&chip->lock);
295 290
296 chip->gc.request = pl061_gpio_request; 291 chip->gc.request = pl061_gpio_request;
@@ -320,6 +315,11 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
320 irq_set_chained_handler(irq, pl061_irq_handler); 315 irq_set_chained_handler(irq, pl061_irq_handler);
321 irq_set_handler_data(irq, chip); 316 irq_set_handler_data(irq, chip);
322 317
318 chip->domain = irq_domain_add_simple(adev->dev.of_node, PL061_GPIO_NR,
319 irq_base, &pl061_domain_ops, chip);
320 if (!chip->domain)
321 return -ENODEV;
322
323 for (i = 0; i < PL061_GPIO_NR; i++) { 323 for (i = 0; i < PL061_GPIO_NR; i++) {
324 if (pdata) { 324 if (pdata) {
325 if (pdata->directions & (1 << i)) 325 if (pdata->directions & (1 << i))
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index d3f15ae93bd3..8b7e719a68c3 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -169,7 +169,8 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
169 u32 pending; 169 u32 pending;
170 unsigned int offset, irqs_handled = 0; 170 unsigned int offset, irqs_handled = 0;
171 171
172 while ((pending = gpio_rcar_read(p, INTDT))) { 172 while ((pending = gpio_rcar_read(p, INTDT) &
173 gpio_rcar_read(p, INTMSK))) {
173 offset = __ffs(pending); 174 offset = __ffs(pending);
174 gpio_rcar_write(p, INTCLR, BIT(offset)); 175 gpio_rcar_write(p, INTCLR, BIT(offset));
175 generic_handle_irq(irq_find_mapping(p->irq_domain, offset)); 176 generic_handle_irq(irq_find_mapping(p->irq_domain, offset));
@@ -381,7 +382,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
381 if (!p->irq_domain) { 382 if (!p->irq_domain) {
382 ret = -ENXIO; 383 ret = -ENXIO;
383 dev_err(&pdev->dev, "cannot initialize irq domain\n"); 384 dev_err(&pdev->dev, "cannot initialize irq domain\n");
384 goto err1; 385 goto err0;
385 } 386 }
386 387
387 if (devm_request_irq(&pdev->dev, irq->start, 388 if (devm_request_irq(&pdev->dev, irq->start,
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 0502b9a041a5..da071ddbad99 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -132,6 +132,7 @@ static int tb10x_gpio_direction_out(struct gpio_chip *chip,
132 int mask = BIT(offset); 132 int mask = BIT(offset);
133 int val = TB10X_GPIO_DIR_OUT << offset; 133 int val = TB10X_GPIO_DIR_OUT << offset;
134 134
135 tb10x_gpio_set(chip, offset, value);
135 tb10x_set_bits(tb10x_gpio, OFFSET_TO_REG_DDR, mask, val); 136 tb10x_set_bits(tb10x_gpio, OFFSET_TO_REG_DDR, mask, val);
136 137
137 return 0; 138 return 0;
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index 0c7e891c8651..f9996899c1f2 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -300,7 +300,7 @@ static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
300 if (offset < TWL4030_GPIO_MAX) 300 if (offset < TWL4030_GPIO_MAX)
301 ret = twl4030_set_gpio_direction(offset, 1); 301 ret = twl4030_set_gpio_direction(offset, 1);
302 else 302 else
303 ret = -EINVAL; 303 ret = -EINVAL; /* LED outputs can't be set as input */
304 304
305 if (!ret) 305 if (!ret)
306 priv->direction &= ~BIT(offset); 306 priv->direction &= ~BIT(offset);
@@ -354,17 +354,27 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
354static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) 354static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
355{ 355{
356 struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip); 356 struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
357 int ret = 0;
357 358
358 mutex_lock(&priv->mutex); 359 mutex_lock(&priv->mutex);
359 if (offset < TWL4030_GPIO_MAX) 360 if (offset < TWL4030_GPIO_MAX) {
360 twl4030_set_gpio_dataout(offset, value); 361 ret = twl4030_set_gpio_direction(offset, 0);
362 if (ret) {
363 mutex_unlock(&priv->mutex);
364 return ret;
365 }
366 }
367
368 /*
369 * LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output
370 */
361 371
362 priv->direction |= BIT(offset); 372 priv->direction |= BIT(offset);
363 mutex_unlock(&priv->mutex); 373 mutex_unlock(&priv->mutex);
364 374
365 twl_set(chip, offset, value); 375 twl_set(chip, offset, value);
366 376
367 return 0; 377 return ret;
368} 378}
369 379
370static int twl_to_irq(struct gpio_chip *chip, unsigned offset) 380static int twl_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -435,7 +445,8 @@ static int gpio_twl4030_debounce(u32 debounce, u8 mmc_cd)
435 445
436static int gpio_twl4030_remove(struct platform_device *pdev); 446static int gpio_twl4030_remove(struct platform_device *pdev);
437 447
438static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev) 448static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev,
449 struct twl4030_gpio_platform_data *pdata)
439{ 450{
440 struct twl4030_gpio_platform_data *omap_twl_info; 451 struct twl4030_gpio_platform_data *omap_twl_info;
441 452
@@ -443,6 +454,9 @@ static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev)
443 if (!omap_twl_info) 454 if (!omap_twl_info)
444 return NULL; 455 return NULL;
445 456
457 if (pdata)
458 *omap_twl_info = *pdata;
459
446 omap_twl_info->use_leds = of_property_read_bool(dev->of_node, 460 omap_twl_info->use_leds = of_property_read_bool(dev->of_node,
447 "ti,use-leds"); 461 "ti,use-leds");
448 462
@@ -500,7 +514,7 @@ no_irqs:
500 mutex_init(&priv->mutex); 514 mutex_init(&priv->mutex);
501 515
502 if (node) 516 if (node)
503 pdata = of_gpio_twl4030(&pdev->dev); 517 pdata = of_gpio_twl4030(&pdev->dev, pdata);
504 518
505 if (pdata == NULL) { 519 if (pdata == NULL) {
506 dev_err(&pdev->dev, "Platform data is missing\n"); 520 dev_err(&pdev->dev, "Platform data is missing\n");
diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c
index 1a605f2a0f55..06fb5cf99ded 100644
--- a/drivers/gpio/gpio-ucb1400.c
+++ b/drivers/gpio/gpio-ucb1400.c
@@ -105,3 +105,4 @@ module_platform_driver(ucb1400_gpio_driver);
105 105
106MODULE_DESCRIPTION("Philips UCB1400 GPIO driver"); 106MODULE_DESCRIPTION("Philips UCB1400 GPIO driver");
107MODULE_LICENSE("GPL"); 107MODULE_LICENSE("GPL");
108MODULE_ALIAS("platform:ucb1400_gpio");
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 4e10b10d3ddd..85f772c0b26a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -14,6 +14,7 @@
14#include <linux/idr.h> 14#include <linux/idr.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/acpi.h> 16#include <linux/acpi.h>
17#include <linux/gpio/driver.h>
17 18
18#define CREATE_TRACE_POINTS 19#define CREATE_TRACE_POINTS
19#include <trace/events/gpio.h> 20#include <trace/events/gpio.h>
@@ -1308,6 +1309,18 @@ struct gpio_chip *gpiochip_find(void *data,
1308} 1309}
1309EXPORT_SYMBOL_GPL(gpiochip_find); 1310EXPORT_SYMBOL_GPL(gpiochip_find);
1310 1311
1312static int gpiochip_match_name(struct gpio_chip *chip, void *data)
1313{
1314 const char *name = data;
1315
1316 return !strcmp(chip->label, name);
1317}
1318
1319static struct gpio_chip *find_chip_by_name(const char *name)
1320{
1321 return gpiochip_find((void *)name, gpiochip_match_name);
1322}
1323
1311#ifdef CONFIG_PINCTRL 1324#ifdef CONFIG_PINCTRL
1312 1325
1313/** 1326/**
@@ -1341,8 +1354,10 @@ int gpiochip_add_pingroup_range(struct gpio_chip *chip,
1341 ret = pinctrl_get_group_pins(pctldev, pin_group, 1354 ret = pinctrl_get_group_pins(pctldev, pin_group,
1342 &pin_range->range.pins, 1355 &pin_range->range.pins,
1343 &pin_range->range.npins); 1356 &pin_range->range.npins);
1344 if (ret < 0) 1357 if (ret < 0) {
1358 kfree(pin_range);
1345 return ret; 1359 return ret;
1360 }
1346 1361
1347 pinctrl_add_gpio_range(pctldev, &pin_range->range); 1362 pinctrl_add_gpio_range(pctldev, &pin_range->range);
1348 1363
@@ -2260,26 +2275,10 @@ void gpiod_add_table(struct gpiod_lookup *table, size_t size)
2260 mutex_unlock(&gpio_lookup_lock); 2275 mutex_unlock(&gpio_lookup_lock);
2261} 2276}
2262 2277
2263/*
2264 * Caller must have a acquired gpio_lookup_lock
2265 */
2266static struct gpio_chip *find_chip_by_name(const char *name)
2267{
2268 struct gpio_chip *chip = NULL;
2269
2270 list_for_each_entry(chip, &gpio_lookup_list, list) {
2271 if (chip->label == NULL)
2272 continue;
2273 if (!strcmp(chip->label, name))
2274 break;
2275 }
2276
2277 return chip;
2278}
2279
2280#ifdef CONFIG_OF 2278#ifdef CONFIG_OF
2281static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, 2279static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
2282 unsigned int idx, unsigned long *flags) 2280 unsigned int idx,
2281 enum gpio_lookup_flags *flags)
2283{ 2282{
2284 char prop_name[32]; /* 32 is max size of property name */ 2283 char prop_name[32]; /* 32 is max size of property name */
2285 enum of_gpio_flags of_flags; 2284 enum of_gpio_flags of_flags;
@@ -2297,20 +2296,22 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
2297 return desc; 2296 return desc;
2298 2297
2299 if (of_flags & OF_GPIO_ACTIVE_LOW) 2298 if (of_flags & OF_GPIO_ACTIVE_LOW)
2300 *flags |= GPIOF_ACTIVE_LOW; 2299 *flags |= GPIO_ACTIVE_LOW;
2301 2300
2302 return desc; 2301 return desc;
2303} 2302}
2304#else 2303#else
2305static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, 2304static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
2306 unsigned int idx, unsigned long *flags) 2305 unsigned int idx,
2306 enum gpio_lookup_flags *flags)
2307{ 2307{
2308 return ERR_PTR(-ENODEV); 2308 return ERR_PTR(-ENODEV);
2309} 2309}
2310#endif 2310#endif
2311 2311
2312static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id, 2312static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
2313 unsigned int idx, unsigned long *flags) 2313 unsigned int idx,
2314 enum gpio_lookup_flags *flags)
2314{ 2315{
2315 struct acpi_gpio_info info; 2316 struct acpi_gpio_info info;
2316 struct gpio_desc *desc; 2317 struct gpio_desc *desc;
@@ -2320,13 +2321,14 @@ static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
2320 return desc; 2321 return desc;
2321 2322
2322 if (info.gpioint && info.active_low) 2323 if (info.gpioint && info.active_low)
2323 *flags |= GPIOF_ACTIVE_LOW; 2324 *flags |= GPIO_ACTIVE_LOW;
2324 2325
2325 return desc; 2326 return desc;
2326} 2327}
2327 2328
2328static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, 2329static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
2329 unsigned int idx, unsigned long *flags) 2330 unsigned int idx,
2331 enum gpio_lookup_flags *flags)
2330{ 2332{
2331 const char *dev_id = dev ? dev_name(dev) : NULL; 2333 const char *dev_id = dev ? dev_name(dev) : NULL;
2332 struct gpio_desc *desc = ERR_PTR(-ENODEV); 2334 struct gpio_desc *desc = ERR_PTR(-ENODEV);
@@ -2366,7 +2368,7 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
2366 continue; 2368 continue;
2367 } 2369 }
2368 2370
2369 if (chip->ngpio >= p->chip_hwnum) { 2371 if (chip->ngpio <= p->chip_hwnum) {
2370 dev_warn(dev, "GPIO chip %s has %d GPIOs\n", 2372 dev_warn(dev, "GPIO chip %s has %d GPIOs\n",
2371 chip->label, chip->ngpio); 2373 chip->label, chip->ngpio);
2372 continue; 2374 continue;
@@ -2416,9 +2418,9 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
2416 const char *con_id, 2418 const char *con_id,
2417 unsigned int idx) 2419 unsigned int idx)
2418{ 2420{
2419 struct gpio_desc *desc; 2421 struct gpio_desc *desc = NULL;
2420 int status; 2422 int status;
2421 unsigned long flags = 0; 2423 enum gpio_lookup_flags flags = 0;
2422 2424
2423 dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id); 2425 dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
2424 2426
@@ -2429,13 +2431,23 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
2429 } else if (IS_ENABLED(CONFIG_ACPI) && dev && ACPI_HANDLE(dev)) { 2431 } else if (IS_ENABLED(CONFIG_ACPI) && dev && ACPI_HANDLE(dev)) {
2430 dev_dbg(dev, "using ACPI for GPIO lookup\n"); 2432 dev_dbg(dev, "using ACPI for GPIO lookup\n");
2431 desc = acpi_find_gpio(dev, con_id, idx, &flags); 2433 desc = acpi_find_gpio(dev, con_id, idx, &flags);
2432 } else { 2434 }
2435
2436 /*
2437 * Either we are not using DT or ACPI, or their lookup did not return
2438 * a result. In that case, use platform lookup as a fallback.
2439 */
2440 if (!desc || IS_ERR(desc)) {
2441 struct gpio_desc *pdesc;
2433 dev_dbg(dev, "using lookup tables for GPIO lookup"); 2442 dev_dbg(dev, "using lookup tables for GPIO lookup");
2434 desc = gpiod_find(dev, con_id, idx, &flags); 2443 pdesc = gpiod_find(dev, con_id, idx, &flags);
2444 /* If used as fallback, do not replace the previous error */
2445 if (!IS_ERR(pdesc) || !desc)
2446 desc = pdesc;
2435 } 2447 }
2436 2448
2437 if (IS_ERR(desc)) { 2449 if (IS_ERR(desc)) {
2438 dev_warn(dev, "lookup for GPIO %s failed\n", con_id); 2450 dev_dbg(dev, "lookup for GPIO %s failed\n", con_id);
2439 return desc; 2451 return desc;
2440 } 2452 }
2441 2453
@@ -2444,8 +2456,12 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
2444 if (status < 0) 2456 if (status < 0)
2445 return ERR_PTR(status); 2457 return ERR_PTR(status);
2446 2458
2447 if (flags & GPIOF_ACTIVE_LOW) 2459 if (flags & GPIO_ACTIVE_LOW)
2448 set_bit(FLAG_ACTIVE_LOW, &desc->flags); 2460 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
2461 if (flags & GPIO_OPEN_DRAIN)
2462 set_bit(FLAG_OPEN_DRAIN, &desc->flags);
2463 if (flags & GPIO_OPEN_SOURCE)
2464 set_bit(FLAG_OPEN_SOURCE, &desc->flags);
2449 2465
2450 return desc; 2466 return desc;
2451} 2467}
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index eef09ec9a5ff..a72cae03b99b 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -103,6 +103,7 @@ void armada_drm_queue_unref_work(struct drm_device *,
103extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs; 103extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
104 104
105int armada_fbdev_init(struct drm_device *); 105int armada_fbdev_init(struct drm_device *);
106void armada_fbdev_lastclose(struct drm_device *);
106void armada_fbdev_fini(struct drm_device *); 107void armada_fbdev_fini(struct drm_device *);
107 108
108int armada_overlay_plane_create(struct drm_device *, unsigned long); 109int armada_overlay_plane_create(struct drm_device *, unsigned long);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 4f2b28354915..62d0ff3efddf 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -321,6 +321,11 @@ static struct drm_ioctl_desc armada_ioctls[] = {
321 DRM_UNLOCKED), 321 DRM_UNLOCKED),
322}; 322};
323 323
324static void armada_drm_lastclose(struct drm_device *dev)
325{
326 armada_fbdev_lastclose(dev);
327}
328
324static const struct file_operations armada_drm_fops = { 329static const struct file_operations armada_drm_fops = {
325 .owner = THIS_MODULE, 330 .owner = THIS_MODULE,
326 .llseek = no_llseek, 331 .llseek = no_llseek,
@@ -337,7 +342,7 @@ static struct drm_driver armada_drm_driver = {
337 .open = NULL, 342 .open = NULL,
338 .preclose = NULL, 343 .preclose = NULL,
339 .postclose = NULL, 344 .postclose = NULL,
340 .lastclose = NULL, 345 .lastclose = armada_drm_lastclose,
341 .unload = armada_drm_unload, 346 .unload = armada_drm_unload,
342 .get_vblank_counter = drm_vblank_count, 347 .get_vblank_counter = drm_vblank_count,
343 .enable_vblank = armada_drm_enable_vblank, 348 .enable_vblank = armada_drm_enable_vblank,
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index dd5ea77dac96..948cb14c561e 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -105,9 +105,9 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
105 drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); 105 drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
106 drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); 106 drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
107 107
108 DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n", 108 DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
109 dfb->fb.width, dfb->fb.height, 109 dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel,
110 dfb->fb.bits_per_pixel, obj->phys_addr); 110 (unsigned long long)obj->phys_addr);
111 111
112 return 0; 112 return 0;
113 113
@@ -177,6 +177,16 @@ int armada_fbdev_init(struct drm_device *dev)
177 return ret; 177 return ret;
178} 178}
179 179
180void armada_fbdev_lastclose(struct drm_device *dev)
181{
182 struct armada_private *priv = dev->dev_private;
183
184 drm_modeset_lock_all(dev);
185 if (priv->fbdev)
186 drm_fb_helper_restore_fbdev_mode(priv->fbdev);
187 drm_modeset_unlock_all(dev);
188}
189
180void armada_fbdev_fini(struct drm_device *dev) 190void armada_fbdev_fini(struct drm_device *dev)
181{ 191{
182 struct armada_private *priv = dev->dev_private; 192 struct armada_private *priv = dev->dev_private;
@@ -192,11 +202,11 @@ void armada_fbdev_fini(struct drm_device *dev)
192 framebuffer_release(info); 202 framebuffer_release(info);
193 } 203 }
194 204
205 drm_fb_helper_fini(fbh);
206
195 if (fbh->fb) 207 if (fbh->fb)
196 fbh->fb->funcs->destroy(fbh->fb); 208 fbh->fb->funcs->destroy(fbh->fb);
197 209
198 drm_fb_helper_fini(fbh);
199
200 priv->fbdev = NULL; 210 priv->fbdev = NULL;
201 } 211 }
202} 212}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 9f2356bae7fd..887816f43476 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -172,8 +172,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
172 obj->dev_addr = obj->linear->start; 172 obj->dev_addr = obj->linear->start;
173 } 173 }
174 174
175 DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n", 175 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
176 obj, obj->phys_addr, obj->dev_addr); 176 (unsigned long long)obj->phys_addr,
177 (unsigned long long)obj->dev_addr);
177 178
178 return 0; 179 return 0;
179} 180}
@@ -557,7 +558,6 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
557 * refcount on the gem object itself. 558 * refcount on the gem object itself.
558 */ 559 */
559 drm_gem_object_reference(obj); 560 drm_gem_object_reference(obj);
560 dma_buf_put(buf);
561 return obj; 561 return obj;
562 } 562 }
563 } 563 }
@@ -573,6 +573,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
573 } 573 }
574 574
575 dobj->obj.import_attach = attach; 575 dobj->obj.import_attach = attach;
576 get_dma_buf(buf);
576 577
577 /* 578 /*
578 * Don't call dma_buf_map_attachment() here - it maps the 579 * Don't call dma_buf_map_attachment() here - it maps the
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index fb7cf0e796f6..8835dcddfac3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -68,6 +68,8 @@
68#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) 68#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
69/* Force reduced-blanking timings for detailed modes */ 69/* Force reduced-blanking timings for detailed modes */
70#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) 70#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
71/* Force 8bpc */
72#define EDID_QUIRK_FORCE_8BPC (1 << 8)
71 73
72struct detailed_mode_closure { 74struct detailed_mode_closure {
73 struct drm_connector *connector; 75 struct drm_connector *connector;
@@ -128,6 +130,9 @@ static struct edid_quirk {
128 130
129 /* Medion MD 30217 PG */ 131 /* Medion MD 30217 PG */
130 { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, 132 { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
133
134 /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
135 { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
131}; 136};
132 137
133/* 138/*
@@ -2674,7 +2679,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
2674 int modes = 0; 2679 int modes = 0;
2675 u8 cea_mode; 2680 u8 cea_mode;
2676 2681
2677 if (video_db == NULL || video_index > video_len) 2682 if (video_db == NULL || video_index >= video_len)
2678 return 0; 2683 return 0;
2679 2684
2680 /* CEA modes are numbered 1..127 */ 2685 /* CEA modes are numbered 1..127 */
@@ -2701,7 +2706,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
2701 if (structure & (1 << 8)) { 2706 if (structure & (1 << 8)) {
2702 newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); 2707 newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
2703 if (newmode) { 2708 if (newmode) {
2704 newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF; 2709 newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
2705 drm_mode_probed_add(connector, newmode); 2710 drm_mode_probed_add(connector, newmode);
2706 modes++; 2711 modes++;
2707 } 2712 }
@@ -3435,6 +3440,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
3435 3440
3436 drm_add_display_info(edid, &connector->display_info); 3441 drm_add_display_info(edid, &connector->display_info);
3437 3442
3443 if (quirks & EDID_QUIRK_FORCE_8BPC)
3444 connector->display_info.bpc = 8;
3445
3438 return num_modes; 3446 return num_modes;
3439} 3447}
3440EXPORT_SYMBOL(drm_add_edid_modes); 3448EXPORT_SYMBOL(drm_add_edid_modes);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index f53d5246979c..66dd3a001cf1 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -566,11 +566,11 @@ err_unload:
566 if (dev->driver->unload) 566 if (dev->driver->unload)
567 dev->driver->unload(dev); 567 dev->driver->unload(dev);
568err_primary_node: 568err_primary_node:
569 drm_put_minor(dev->primary); 569 drm_unplug_minor(dev->primary);
570err_render_node: 570err_render_node:
571 drm_put_minor(dev->render); 571 drm_unplug_minor(dev->render);
572err_control_node: 572err_control_node:
573 drm_put_minor(dev->control); 573 drm_unplug_minor(dev->control);
574err_agp: 574err_agp:
575 if (dev->driver->bus->agp_destroy) 575 if (dev->driver->bus->agp_destroy)
576 dev->driver->bus->agp_destroy(dev); 576 dev->driver->bus->agp_destroy(dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index bd2bca395792..c22c3097c3e8 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -516,7 +516,7 @@ int drm_sysfs_device_add(struct drm_minor *minor)
516 minor_str = "card%d"; 516 minor_str = "card%d";
517 517
518 minor->kdev = kzalloc(sizeof(*minor->kdev), GFP_KERNEL); 518 minor->kdev = kzalloc(sizeof(*minor->kdev), GFP_KERNEL);
519 if (!minor->dev) { 519 if (!minor->kdev) {
520 r = -ENOMEM; 520 r = -ENOMEM;
521 goto error; 521 goto error;
522 } 522 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index b676006a95a0..22b8f5eced80 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -173,28 +173,37 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
173static void exynos_drm_preclose(struct drm_device *dev, 173static void exynos_drm_preclose(struct drm_device *dev,
174 struct drm_file *file) 174 struct drm_file *file)
175{ 175{
176 exynos_drm_subdrv_close(dev, file);
177}
178
179static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
180{
176 struct exynos_drm_private *private = dev->dev_private; 181 struct exynos_drm_private *private = dev->dev_private;
177 struct drm_pending_vblank_event *e, *t; 182 struct drm_pending_vblank_event *v, *vt;
183 struct drm_pending_event *e, *et;
178 unsigned long flags; 184 unsigned long flags;
179 185
180 /* release events of current file */ 186 if (!file->driver_priv)
187 return;
188
189 /* Release all events not unhandled by page flip handler. */
181 spin_lock_irqsave(&dev->event_lock, flags); 190 spin_lock_irqsave(&dev->event_lock, flags);
182 list_for_each_entry_safe(e, t, &private->pageflip_event_list, 191 list_for_each_entry_safe(v, vt, &private->pageflip_event_list,
183 base.link) { 192 base.link) {
184 if (e->base.file_priv == file) { 193 if (v->base.file_priv == file) {
185 list_del(&e->base.link); 194 list_del(&v->base.link);
186 e->base.destroy(&e->base); 195 drm_vblank_put(dev, v->pipe);
196 v->base.destroy(&v->base);
187 } 197 }
188 } 198 }
189 spin_unlock_irqrestore(&dev->event_lock, flags);
190 199
191 exynos_drm_subdrv_close(dev, file); 200 /* Release all events handled by page flip handler but not freed. */
192} 201 list_for_each_entry_safe(e, et, &file->event_list, link) {
202 list_del(&e->link);
203 e->destroy(e);
204 }
205 spin_unlock_irqrestore(&dev->event_lock, flags);
193 206
194static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
195{
196 if (!file->driver_priv)
197 return;
198 207
199 kfree(file->driver_priv); 208 kfree(file->driver_priv);
200 file->driver_priv = NULL; 209 file->driver_priv = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 23da72b5eae9..a61878bf5dcd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -31,7 +31,7 @@
31#include "exynos_drm_iommu.h" 31#include "exynos_drm_iommu.h"
32 32
33/* 33/*
34 * FIMD is stand for Fully Interactive Mobile Display and 34 * FIMD stands for Fully Interactive Mobile Display and
35 * as a display controller, it transfers contents drawn on memory 35 * as a display controller, it transfers contents drawn on memory
36 * to a LCD Panel through Display Interfaces such as RGB or 36 * to a LCD Panel through Display Interfaces such as RGB or
37 * CPU Interface. 37 * CPU Interface.
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0cab2d045135..5c648425c1e0 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -83,6 +83,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
83 drm_i915_private_t *dev_priv = dev->dev_private; 83 drm_i915_private_t *dev_priv = dev->dev_private;
84 struct drm_i915_master_private *master_priv; 84 struct drm_i915_master_private *master_priv;
85 85
86 /*
87 * The dri breadcrumb update races against the drm master disappearing.
88 * Instead of trying to fix this (this is by far not the only ums issue)
89 * just don't do the update in kms mode.
90 */
91 if (drm_core_check_feature(dev, DRIVER_MODESET))
92 return;
93
86 if (dev->primary->master) { 94 if (dev->primary->master) {
87 master_priv = dev->primary->master->driver_priv; 95 master_priv = dev->primary->master->driver_priv;
88 if (master_priv->sarea_priv) 96 if (master_priv->sarea_priv)
@@ -1490,16 +1498,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1490 spin_lock_init(&dev_priv->uncore.lock); 1498 spin_lock_init(&dev_priv->uncore.lock);
1491 spin_lock_init(&dev_priv->mm.object_stat_lock); 1499 spin_lock_init(&dev_priv->mm.object_stat_lock);
1492 mutex_init(&dev_priv->dpio_lock); 1500 mutex_init(&dev_priv->dpio_lock);
1493 mutex_init(&dev_priv->rps.hw_lock);
1494 mutex_init(&dev_priv->modeset_restore_lock); 1501 mutex_init(&dev_priv->modeset_restore_lock);
1495 1502
1496 mutex_init(&dev_priv->pc8.lock); 1503 intel_pm_setup(dev);
1497 dev_priv->pc8.requirements_met = false;
1498 dev_priv->pc8.gpu_idle = false;
1499 dev_priv->pc8.irqs_disabled = false;
1500 dev_priv->pc8.enabled = false;
1501 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
1502 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
1503 1504
1504 intel_display_crc_init(dev); 1505 intel_display_crc_init(dev);
1505 1506
@@ -1603,7 +1604,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1603 } 1604 }
1604 1605
1605 intel_irq_init(dev); 1606 intel_irq_init(dev);
1606 intel_pm_init(dev);
1607 intel_uncore_sanitize(dev); 1607 intel_uncore_sanitize(dev);
1608 1608
1609 /* Try to make sure MCHBAR is enabled before poking at it */ 1609 /* Try to make sure MCHBAR is enabled before poking at it */
@@ -1848,8 +1848,10 @@ void i915_driver_lastclose(struct drm_device * dev)
1848 1848
1849void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1849void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1850{ 1850{
1851 mutex_lock(&dev->struct_mutex);
1851 i915_gem_context_close(dev, file_priv); 1852 i915_gem_context_close(dev, file_priv);
1852 i915_gem_release(dev, file_priv); 1853 i915_gem_release(dev, file_priv);
1854 mutex_unlock(&dev->struct_mutex);
1853} 1855}
1854 1856
1855void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 1857void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 989be12cdd6e..5b7b7e06cb3a 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -534,8 +534,10 @@ static int i915_drm_freeze(struct drm_device *dev)
534 * Disable CRTCs directly since we want to preserve sw state 534 * Disable CRTCs directly since we want to preserve sw state
535 * for _thaw. 535 * for _thaw.
536 */ 536 */
537 mutex_lock(&dev->mode_config.mutex);
537 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 538 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
538 dev_priv->display.crtc_disable(crtc); 539 dev_priv->display.crtc_disable(crtc);
540 mutex_unlock(&dev->mode_config.mutex);
539 541
540 intel_modeset_suspend_hw(dev); 542 intel_modeset_suspend_hw(dev);
541 } 543 }
@@ -649,6 +651,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
649 intel_modeset_init_hw(dev); 651 intel_modeset_init_hw(dev);
650 652
651 drm_modeset_lock_all(dev); 653 drm_modeset_lock_all(dev);
654 drm_mode_config_reset(dev);
652 intel_modeset_setup_hw_state(dev, true); 655 intel_modeset_setup_hw_state(dev, true);
653 drm_modeset_unlock_all(dev); 656 drm_modeset_unlock_all(dev);
654 657
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ccdbecca070d..90fcccba17b0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1755,8 +1755,13 @@ struct drm_i915_file_private {
1755#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1755#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1756#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 1756#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
1757 ((dev)->pdev->device & 0xFF00) == 0x0C00) 1757 ((dev)->pdev->device & 0xFF00) == 0x0C00)
1758#define IS_ULT(dev) (IS_HASWELL(dev) && \ 1758#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
1759 (((dev)->pdev->device & 0xf) == 0x2 || \
1760 ((dev)->pdev->device & 0xf) == 0x6 || \
1761 ((dev)->pdev->device & 0xf) == 0xe))
1762#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
1759 ((dev)->pdev->device & 0xFF00) == 0x0A00) 1763 ((dev)->pdev->device & 0xFF00) == 0x0A00)
1764#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
1760#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 1765#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
1761 ((dev)->pdev->device & 0x00F0) == 0x0020) 1766 ((dev)->pdev->device & 0x00F0) == 0x0020)
1762#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 1767#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
@@ -1901,9 +1906,7 @@ void i915_queue_hangcheck(struct drm_device *dev);
1901void i915_handle_error(struct drm_device *dev, bool wedged); 1906void i915_handle_error(struct drm_device *dev, bool wedged);
1902 1907
1903extern void intel_irq_init(struct drm_device *dev); 1908extern void intel_irq_init(struct drm_device *dev);
1904extern void intel_pm_init(struct drm_device *dev);
1905extern void intel_hpd_init(struct drm_device *dev); 1909extern void intel_hpd_init(struct drm_device *dev);
1906extern void intel_pm_init(struct drm_device *dev);
1907 1910
1908extern void intel_uncore_sanitize(struct drm_device *dev); 1911extern void intel_uncore_sanitize(struct drm_device *dev);
1909extern void intel_uncore_early_sanitize(struct drm_device *dev); 1912extern void intel_uncore_early_sanitize(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 12bbd5eac70d..76d3d1ab73c6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2343,15 +2343,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
2343 kfree(request); 2343 kfree(request);
2344} 2344}
2345 2345
2346static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, 2346static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2347 struct intel_ring_buffer *ring) 2347 struct intel_ring_buffer *ring)
2348{ 2348{
2349 u32 completed_seqno; 2349 u32 completed_seqno = ring->get_seqno(ring, false);
2350 u32 acthd; 2350 u32 acthd = intel_ring_get_active_head(ring);
2351 struct drm_i915_gem_request *request;
2352
2353 list_for_each_entry(request, &ring->request_list, list) {
2354 if (i915_seqno_passed(completed_seqno, request->seqno))
2355 continue;
2351 2356
2352 acthd = intel_ring_get_active_head(ring); 2357 i915_set_reset_status(ring, request, acthd);
2353 completed_seqno = ring->get_seqno(ring, false); 2358 }
2359}
2354 2360
2361static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2362 struct intel_ring_buffer *ring)
2363{
2355 while (!list_empty(&ring->request_list)) { 2364 while (!list_empty(&ring->request_list)) {
2356 struct drm_i915_gem_request *request; 2365 struct drm_i915_gem_request *request;
2357 2366
@@ -2359,9 +2368,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2359 struct drm_i915_gem_request, 2368 struct drm_i915_gem_request,
2360 list); 2369 list);
2361 2370
2362 if (request->seqno > completed_seqno)
2363 i915_set_reset_status(ring, request, acthd);
2364
2365 i915_gem_free_request(request); 2371 i915_gem_free_request(request);
2366 } 2372 }
2367 2373
@@ -2403,8 +2409,16 @@ void i915_gem_reset(struct drm_device *dev)
2403 struct intel_ring_buffer *ring; 2409 struct intel_ring_buffer *ring;
2404 int i; 2410 int i;
2405 2411
2412 /*
2413 * Before we free the objects from the requests, we need to inspect
2414 * them for finding the guilty party. As the requests only borrow
2415 * their reference to the objects, the inspection must be done first.
2416 */
2417 for_each_ring(ring, dev_priv, i)
2418 i915_gem_reset_ring_status(dev_priv, ring);
2419
2406 for_each_ring(ring, dev_priv, i) 2420 for_each_ring(ring, dev_priv, i)
2407 i915_gem_reset_ring_lists(dev_priv, ring); 2421 i915_gem_reset_ring_cleanup(dev_priv, ring);
2408 2422
2409 i915_gem_cleanup_ringbuffer(dev); 2423 i915_gem_cleanup_ringbuffer(dev);
2410 2424
@@ -4442,10 +4456,9 @@ i915_gem_init_hw(struct drm_device *dev)
4442 if (dev_priv->ellc_size) 4456 if (dev_priv->ellc_size)
4443 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4457 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4444 4458
4445 if (IS_HSW_GT3(dev)) 4459 if (IS_HASWELL(dev))
4446 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED); 4460 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4447 else 4461 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4448 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
4449 4462
4450 if (HAS_PCH_NOP(dev)) { 4463 if (HAS_PCH_NOP(dev)) {
4451 u32 temp = I915_READ(GEN7_MSG_CTL); 4464 u32 temp = I915_READ(GEN7_MSG_CTL);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 72a3df32292f..b0f42b9ca037 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -347,10 +347,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
347{ 347{
348 struct drm_i915_file_private *file_priv = file->driver_priv; 348 struct drm_i915_file_private *file_priv = file->driver_priv;
349 349
350 mutex_lock(&dev->struct_mutex);
351 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 350 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
352 idr_destroy(&file_priv->context_idr); 351 idr_destroy(&file_priv->context_idr);
353 mutex_unlock(&dev->struct_mutex);
354} 352}
355 353
356static struct i915_hw_context * 354static struct i915_hw_context *
@@ -423,11 +421,21 @@ static int do_switch(struct i915_hw_context *to)
423 if (ret) 421 if (ret)
424 return ret; 422 return ret;
425 423
426 /* Clear this page out of any CPU caches for coherent swap-in/out. Note 424 /*
425 * Pin can switch back to the default context if we end up calling into
426 * evict_everything - as a last ditch gtt defrag effort that also
427 * switches to the default context. Hence we need to reload from here.
428 */
429 from = ring->last_context;
430
431 /*
432 * Clear this page out of any CPU caches for coherent swap-in/out. Note
427 * that thanks to write = false in this call and us not setting any gpu 433 * that thanks to write = false in this call and us not setting any gpu
428 * write domains when putting a context object onto the active list 434 * write domains when putting a context object onto the active list
429 * (when switching away from it), this won't block. 435 * (when switching away from it), this won't block.
430 * XXX: We need a real interface to do this instead of trickery. */ 436 *
437 * XXX: We need a real interface to do this instead of trickery.
438 */
431 ret = i915_gem_object_set_to_gtt_domain(to->obj, false); 439 ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
432 if (ret) { 440 if (ret) {
433 i915_gem_object_unpin(to->obj); 441 i915_gem_object_unpin(to->obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 7d5752fda5f1..9bb533e0d762 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -125,13 +125,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
125 125
126 ret = i915_gem_object_get_pages(obj); 126 ret = i915_gem_object_get_pages(obj);
127 if (ret) 127 if (ret)
128 goto error; 128 goto err;
129
130 i915_gem_object_pin_pages(obj);
129 131
130 ret = -ENOMEM; 132 ret = -ENOMEM;
131 133
132 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); 134 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
133 if (pages == NULL) 135 if (pages == NULL)
134 goto error; 136 goto err_unpin;
135 137
136 i = 0; 138 i = 0;
137 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) 139 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
@@ -141,15 +143,16 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
141 drm_free_large(pages); 143 drm_free_large(pages);
142 144
143 if (!obj->dma_buf_vmapping) 145 if (!obj->dma_buf_vmapping)
144 goto error; 146 goto err_unpin;
145 147
146 obj->vmapping_count = 1; 148 obj->vmapping_count = 1;
147 i915_gem_object_pin_pages(obj);
148out_unlock: 149out_unlock:
149 mutex_unlock(&dev->struct_mutex); 150 mutex_unlock(&dev->struct_mutex);
150 return obj->dma_buf_vmapping; 151 return obj->dma_buf_vmapping;
151 152
152error: 153err_unpin:
154 i915_gem_object_unpin_pages(obj);
155err:
153 mutex_unlock(&dev->struct_mutex); 156 mutex_unlock(&dev->struct_mutex);
154 return ERR_PTR(ret); 157 return ERR_PTR(ret);
155} 158}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index b7376533633d..8f3adc7d0dc8 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -88,6 +88,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
88 } else 88 } else
89 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); 89 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
90 90
91search_again:
91 /* First see if there is a large enough contiguous idle region... */ 92 /* First see if there is a large enough contiguous idle region... */
92 list_for_each_entry(vma, &vm->inactive_list, mm_list) { 93 list_for_each_entry(vma, &vm->inactive_list, mm_list) {
93 if (mark_free(vma, &unwind_list)) 94 if (mark_free(vma, &unwind_list))
@@ -115,10 +116,17 @@ none:
115 list_del_init(&vma->exec_list); 116 list_del_init(&vma->exec_list);
116 } 117 }
117 118
118 /* We expect the caller to unpin, evict all and try again, or give up. 119 /* Can we unpin some objects such as idle hw contents,
119 * So calling i915_gem_evict_vm() is unnecessary. 120 * or pending flips?
120 */ 121 */
121 return -ENOSPC; 122 ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev);
123 if (ret)
124 return ret;
125
126 /* Only idle the GPU and repeat the search once */
127 i915_gem_retire_requests(dev);
128 nonblocking = true;
129 goto search_again;
122 130
123found: 131found:
124 /* drm_mm doesn't allow any other other operations while 132 /* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 885d595e0e02..a3ba9a8cd687 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,6 +33,9 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/dma_remapping.h> 34#include <linux/dma_remapping.h>
35 35
36#define __EXEC_OBJECT_HAS_PIN (1<<31)
37#define __EXEC_OBJECT_HAS_FENCE (1<<30)
38
36struct eb_vmas { 39struct eb_vmas {
37 struct list_head vmas; 40 struct list_head vmas;
38 int and; 41 int and;
@@ -90,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
90{ 93{
91 struct drm_i915_gem_object *obj; 94 struct drm_i915_gem_object *obj;
92 struct list_head objects; 95 struct list_head objects;
93 int i, ret = 0; 96 int i, ret;
94 97
95 INIT_LIST_HEAD(&objects); 98 INIT_LIST_HEAD(&objects);
96 spin_lock(&file->table_lock); 99 spin_lock(&file->table_lock);
@@ -103,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
103 DRM_DEBUG("Invalid object handle %d at index %d\n", 106 DRM_DEBUG("Invalid object handle %d at index %d\n",
104 exec[i].handle, i); 107 exec[i].handle, i);
105 ret = -ENOENT; 108 ret = -ENOENT;
106 goto out; 109 goto err;
107 } 110 }
108 111
109 if (!list_empty(&obj->obj_exec_link)) { 112 if (!list_empty(&obj->obj_exec_link)) {
@@ -111,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
111 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", 114 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
112 obj, exec[i].handle, i); 115 obj, exec[i].handle, i);
113 ret = -EINVAL; 116 ret = -EINVAL;
114 goto out; 117 goto err;
115 } 118 }
116 119
117 drm_gem_object_reference(&obj->base); 120 drm_gem_object_reference(&obj->base);
@@ -120,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb,
120 spin_unlock(&file->table_lock); 123 spin_unlock(&file->table_lock);
121 124
122 i = 0; 125 i = 0;
123 list_for_each_entry(obj, &objects, obj_exec_link) { 126 while (!list_empty(&objects)) {
124 struct i915_vma *vma; 127 struct i915_vma *vma;
125 128
129 obj = list_first_entry(&objects,
130 struct drm_i915_gem_object,
131 obj_exec_link);
132
126 /* 133 /*
127 * NOTE: We can leak any vmas created here when something fails 134 * NOTE: We can leak any vmas created here when something fails
128 * later on. But that's no issue since vma_unbind can deal with 135 * later on. But that's no issue since vma_unbind can deal with
@@ -135,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb,
135 if (IS_ERR(vma)) { 142 if (IS_ERR(vma)) {
136 DRM_DEBUG("Failed to lookup VMA\n"); 143 DRM_DEBUG("Failed to lookup VMA\n");
137 ret = PTR_ERR(vma); 144 ret = PTR_ERR(vma);
138 goto out; 145 goto err;
139 } 146 }
140 147
148 /* Transfer ownership from the objects list to the vmas list. */
141 list_add_tail(&vma->exec_list, &eb->vmas); 149 list_add_tail(&vma->exec_list, &eb->vmas);
150 list_del_init(&obj->obj_exec_link);
142 151
143 vma->exec_entry = &exec[i]; 152 vma->exec_entry = &exec[i];
144 if (eb->and < 0) { 153 if (eb->and < 0) {
@@ -152,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb,
152 ++i; 161 ++i;
153 } 162 }
154 163
164 return 0;
155 165
156out: 166
167err:
157 while (!list_empty(&objects)) { 168 while (!list_empty(&objects)) {
158 obj = list_first_entry(&objects, 169 obj = list_first_entry(&objects,
159 struct drm_i915_gem_object, 170 struct drm_i915_gem_object,
160 obj_exec_link); 171 obj_exec_link);
161 list_del_init(&obj->obj_exec_link); 172 list_del_init(&obj->obj_exec_link);
162 if (ret) 173 drm_gem_object_unreference(&obj->base);
163 drm_gem_object_unreference(&obj->base);
164 } 174 }
175 /*
176 * Objects already transfered to the vmas list will be unreferenced by
177 * eb_destroy.
178 */
179
165 return ret; 180 return ret;
166} 181}
167 182
@@ -187,7 +202,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
187 } 202 }
188} 203}
189 204
190static void eb_destroy(struct eb_vmas *eb) { 205static void
206i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
207{
208 struct drm_i915_gem_exec_object2 *entry;
209 struct drm_i915_gem_object *obj = vma->obj;
210
211 if (!drm_mm_node_allocated(&vma->node))
212 return;
213
214 entry = vma->exec_entry;
215
216 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
217 i915_gem_object_unpin_fence(obj);
218
219 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
220 i915_gem_object_unpin(obj);
221
222 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
223}
224
225static void eb_destroy(struct eb_vmas *eb)
226{
191 while (!list_empty(&eb->vmas)) { 227 while (!list_empty(&eb->vmas)) {
192 struct i915_vma *vma; 228 struct i915_vma *vma;
193 229
@@ -195,6 +231,7 @@ static void eb_destroy(struct eb_vmas *eb) {
195 struct i915_vma, 231 struct i915_vma,
196 exec_list); 232 exec_list);
197 list_del_init(&vma->exec_list); 233 list_del_init(&vma->exec_list);
234 i915_gem_execbuffer_unreserve_vma(vma);
198 drm_gem_object_unreference(&vma->obj->base); 235 drm_gem_object_unreference(&vma->obj->base);
199 } 236 }
200 kfree(eb); 237 kfree(eb);
@@ -478,9 +515,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb,
478 return ret; 515 return ret;
479} 516}
480 517
481#define __EXEC_OBJECT_HAS_PIN (1<<31)
482#define __EXEC_OBJECT_HAS_FENCE (1<<30)
483
484static int 518static int
485need_reloc_mappable(struct i915_vma *vma) 519need_reloc_mappable(struct i915_vma *vma)
486{ 520{
@@ -552,26 +586,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
552 return 0; 586 return 0;
553} 587}
554 588
555static void
556i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
557{
558 struct drm_i915_gem_exec_object2 *entry;
559 struct drm_i915_gem_object *obj = vma->obj;
560
561 if (!drm_mm_node_allocated(&vma->node))
562 return;
563
564 entry = vma->exec_entry;
565
566 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
567 i915_gem_object_unpin_fence(obj);
568
569 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
570 i915_gem_object_unpin(obj);
571
572 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
573}
574
575static int 589static int
576i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 590i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
577 struct list_head *vmas, 591 struct list_head *vmas,
@@ -670,13 +684,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
670 goto err; 684 goto err;
671 } 685 }
672 686
673err: /* Decrement pin count for bound objects */ 687err:
674 list_for_each_entry(vma, vmas, exec_list)
675 i915_gem_execbuffer_unreserve_vma(vma);
676
677 if (ret != -ENOSPC || retry++) 688 if (ret != -ENOSPC || retry++)
678 return ret; 689 return ret;
679 690
691 /* Decrement pin count for bound objects */
692 list_for_each_entry(vma, vmas, exec_list)
693 i915_gem_execbuffer_unreserve_vma(vma);
694
680 ret = i915_gem_evict_vm(vm, true); 695 ret = i915_gem_evict_vm(vm, true);
681 if (ret) 696 if (ret)
682 return ret; 697 return ret;
@@ -708,6 +723,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
708 while (!list_empty(&eb->vmas)) { 723 while (!list_empty(&eb->vmas)) {
709 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); 724 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
710 list_del_init(&vma->exec_list); 725 list_del_init(&vma->exec_list);
726 i915_gem_execbuffer_unreserve_vma(vma);
711 drm_gem_object_unreference(&vma->obj->base); 727 drm_gem_object_unreference(&vma->obj->base);
712 } 728 }
713 729
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 3620a1b0a73c..d3c3b5b15824 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -57,7 +57,9 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
57#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) 57#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
58#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) 58#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
59#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) 59#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
60#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
60#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) 61#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
62#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
61 63
62#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) 64#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
63#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) 65#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
@@ -185,10 +187,10 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
185 case I915_CACHE_NONE: 187 case I915_CACHE_NONE:
186 break; 188 break;
187 case I915_CACHE_WT: 189 case I915_CACHE_WT:
188 pte |= HSW_WT_ELLC_LLC_AGE0; 190 pte |= HSW_WT_ELLC_LLC_AGE3;
189 break; 191 break;
190 default: 192 default:
191 pte |= HSW_WB_ELLC_LLC_AGE0; 193 pte |= HSW_WB_ELLC_LLC_AGE3;
192 break; 194 break;
193 } 195 }
194 196
@@ -335,8 +337,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
335 kfree(ppgtt->gen8_pt_dma_addr[i]); 337 kfree(ppgtt->gen8_pt_dma_addr[i]);
336 } 338 }
337 339
338 __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT); 340 __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
339 __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT); 341 __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
340} 342}
341 343
342/** 344/**
@@ -904,14 +906,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
904 WARN_ON(readq(&gtt_entries[i-1]) 906 WARN_ON(readq(&gtt_entries[i-1])
905 != gen8_pte_encode(addr, level, true)); 907 != gen8_pte_encode(addr, level, true));
906 908
907#if 0 /* TODO: Still needed on GEN8? */
908 /* This next bit makes the above posting read even more important. We 909 /* This next bit makes the above posting read even more important. We
909 * want to flush the TLBs only after we're certain all the PTE updates 910 * want to flush the TLBs only after we're certain all the PTE updates
910 * have finished. 911 * have finished.
911 */ 912 */
912 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 913 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
913 POSTING_READ(GFX_FLSH_CNTL_GEN6); 914 POSTING_READ(GFX_FLSH_CNTL_GEN6);
914#endif
915} 915}
916 916
917/* 917/*
@@ -1239,6 +1239,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
1239 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; 1239 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
1240 if (bdw_gmch_ctl) 1240 if (bdw_gmch_ctl)
1241 bdw_gmch_ctl = 1 << bdw_gmch_ctl; 1241 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
1242 if (bdw_gmch_ctl > 4) {
1243 WARN_ON(!i915_preliminary_hw_support);
1244 return 4<<20;
1245 }
1246
1242 return bdw_gmch_ctl << 20; 1247 return bdw_gmch_ctl << 20;
1243} 1248}
1244 1249
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f9eafb6ed523..ee2742122a02 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -235,6 +235,7 @@
235 */ 235 */
236#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 236#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
237#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1) 237#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
238#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
238#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ 239#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
239#define MI_FLUSH_DW_STORE_INDEX (1<<21) 240#define MI_FLUSH_DW_STORE_INDEX (1<<21)
240#define MI_INVALIDATE_TLB (1<<18) 241#define MI_INVALIDATE_TLB (1<<18)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 330077bcd0bd..526c8ded16b0 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -173,7 +173,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
173 ddi_translations = ddi_translations_dp; 173 ddi_translations = ddi_translations_dp;
174 break; 174 break;
175 case PORT_D: 175 case PORT_D:
176 if (intel_dpd_is_edp(dev)) 176 if (intel_dp_is_edp(dev, PORT_D))
177 ddi_translations = ddi_translations_edp; 177 ddi_translations = ddi_translations_edp;
178 else 178 else
179 ddi_translations = ddi_translations_dp; 179 ddi_translations = ddi_translations_dp;
@@ -1158,9 +1158,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1158 if (wait) 1158 if (wait)
1159 intel_wait_ddi_buf_idle(dev_priv, port); 1159 intel_wait_ddi_buf_idle(dev_priv, port);
1160 1160
1161 if (type == INTEL_OUTPUT_EDP) { 1161 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
1162 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1162 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1163 ironlake_edp_panel_vdd_on(intel_dp); 1163 ironlake_edp_panel_vdd_on(intel_dp);
1164 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1164 ironlake_edp_panel_off(intel_dp); 1165 ironlake_edp_panel_off(intel_dp);
1165 } 1166 }
1166 1167
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7ec8b488bb1d..769b864465a9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5815,7 +5815,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
5815 uint16_t postoff = 0; 5815 uint16_t postoff = 0;
5816 5816
5817 if (intel_crtc->config.limited_color_range) 5817 if (intel_crtc->config.limited_color_range)
5818 postoff = (16 * (1 << 13) / 255) & 0x1fff; 5818 postoff = (16 * (1 << 12) / 255) & 0x1fff;
5819 5819
5820 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); 5820 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
5821 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); 5821 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
@@ -6303,7 +6303,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
6303 uint32_t val; 6303 uint32_t val;
6304 6304
6305 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) 6305 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6306 WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n", 6306 WARN(crtc->active, "CRTC for pipe %c enabled\n",
6307 pipe_name(crtc->pipe)); 6307 pipe_name(crtc->pipe));
6308 6308
6309 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 6309 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
@@ -6402,7 +6402,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6402 6402
6403 /* Make sure we're not on PC8 state before disabling PC8, otherwise 6403 /* Make sure we're not on PC8 state before disabling PC8, otherwise
6404 * we'll hang the machine! */ 6404 * we'll hang the machine! */
6405 dev_priv->uncore.funcs.force_wake_get(dev_priv); 6405 gen6_gt_force_wake_get(dev_priv);
6406 6406
6407 if (val & LCPLL_POWER_DOWN_ALLOW) { 6407 if (val & LCPLL_POWER_DOWN_ALLOW) {
6408 val &= ~LCPLL_POWER_DOWN_ALLOW; 6408 val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -6436,7 +6436,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6436 DRM_ERROR("Switching back to LCPLL failed\n"); 6436 DRM_ERROR("Switching back to LCPLL failed\n");
6437 } 6437 }
6438 6438
6439 dev_priv->uncore.funcs.force_wake_put(dev_priv); 6439 gen6_gt_force_wake_put(dev_priv);
6440} 6440}
6441 6441
6442void hsw_enable_pc8_work(struct work_struct *__work) 6442void hsw_enable_pc8_work(struct work_struct *__work)
@@ -8354,7 +8354,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
8354 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 8354 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
8355 DERRMR_PIPEB_PRI_FLIP_DONE | 8355 DERRMR_PIPEB_PRI_FLIP_DONE |
8356 DERRMR_PIPEC_PRI_FLIP_DONE)); 8356 DERRMR_PIPEC_PRI_FLIP_DONE));
8357 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1)); 8357 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
8358 MI_SRM_LRM_GLOBAL_GTT);
8358 intel_ring_emit(ring, DERRMR); 8359 intel_ring_emit(ring, DERRMR);
8359 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 8360 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
8360 } 8361 }
@@ -9134,7 +9135,7 @@ intel_pipe_config_compare(struct drm_device *dev,
9134 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 9135 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
9135 PIPE_CONF_CHECK_I(pipe_bpp); 9136 PIPE_CONF_CHECK_I(pipe_bpp);
9136 9137
9137 if (!IS_HASWELL(dev)) { 9138 if (!HAS_DDI(dev)) {
9138 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); 9139 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
9139 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 9140 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9140 } 9141 }
@@ -10049,7 +10050,7 @@ static void intel_setup_outputs(struct drm_device *dev)
10049 intel_ddi_init(dev, PORT_D); 10050 intel_ddi_init(dev, PORT_D);
10050 } else if (HAS_PCH_SPLIT(dev)) { 10051 } else if (HAS_PCH_SPLIT(dev)) {
10051 int found; 10052 int found;
10052 dpd_is_edp = intel_dpd_is_edp(dev); 10053 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
10053 10054
10054 if (has_edp_a(dev)) 10055 if (has_edp_a(dev))
10055 intel_dp_init(dev, DP_A, PORT_A); 10056 intel_dp_init(dev, DP_A, PORT_A);
@@ -10086,8 +10087,7 @@ static void intel_setup_outputs(struct drm_device *dev)
10086 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, 10087 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
10087 PORT_C); 10088 PORT_C);
10088 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) 10089 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
10089 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, 10090 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
10090 PORT_C);
10091 } 10091 }
10092 10092
10093 intel_dsi_init(dev); 10093 intel_dsi_init(dev);
@@ -10541,11 +10541,20 @@ static struct intel_quirk intel_quirks[] = {
10541 /* Sony Vaio Y cannot use SSC on LVDS */ 10541 /* Sony Vaio Y cannot use SSC on LVDS */
10542 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 10542 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
10543 10543
10544 /* 10544 /* Acer Aspire 5734Z must invert backlight brightness */
10545 * All GM45 Acer (and its brands eMachines and Packard Bell) laptops 10545 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
10546 * seem to use inverted backlight PWM. 10546
10547 */ 10547 /* Acer/eMachines G725 */
10548 { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness }, 10548 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
10549
10550 /* Acer/eMachines e725 */
10551 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
10552
10553 /* Acer/Packard Bell NCL20 */
10554 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
10555
10556 /* Acer Aspire 4736Z */
10557 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
10549 10558
10550 /* Dell XPS13 HD Sandy Bridge */ 10559 /* Dell XPS13 HD Sandy Bridge */
10551 { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, 10560 { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
@@ -11036,8 +11045,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
11036 } 11045 }
11037 11046
11038 intel_modeset_check_state(dev); 11047 intel_modeset_check_state(dev);
11039
11040 drm_mode_config_reset(dev);
11041} 11048}
11042 11049
11043void intel_modeset_gem_init(struct drm_device *dev) 11050void intel_modeset_gem_init(struct drm_device *dev)
@@ -11046,7 +11053,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
11046 11053
11047 intel_setup_overlay(dev); 11054 intel_setup_overlay(dev);
11048 11055
11056 drm_modeset_lock_all(dev);
11057 drm_mode_config_reset(dev);
11049 intel_modeset_setup_hw_state(dev, false); 11058 intel_modeset_setup_hw_state(dev, false);
11059 drm_modeset_unlock_all(dev);
11050} 11060}
11051 11061
11052void intel_modeset_cleanup(struct drm_device *dev) 11062void intel_modeset_cleanup(struct drm_device *dev)
@@ -11125,14 +11135,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
11125int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 11135int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
11126{ 11136{
11127 struct drm_i915_private *dev_priv = dev->dev_private; 11137 struct drm_i915_private *dev_priv = dev->dev_private;
11138 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
11128 u16 gmch_ctrl; 11139 u16 gmch_ctrl;
11129 11140
11130 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); 11141 pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
11131 if (state) 11142 if (state)
11132 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 11143 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
11133 else 11144 else
11134 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 11145 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
11135 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); 11146 pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
11136 return 0; 11147 return 0;
11137} 11148}
11138 11149
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 0b2e842fef01..30c627c7b7ba 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -3326,11 +3326,19 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
3326} 3326}
3327 3327
3328/* check the VBT to see whether the eDP is on DP-D port */ 3328/* check the VBT to see whether the eDP is on DP-D port */
3329bool intel_dpd_is_edp(struct drm_device *dev) 3329bool intel_dp_is_edp(struct drm_device *dev, enum port port)
3330{ 3330{
3331 struct drm_i915_private *dev_priv = dev->dev_private; 3331 struct drm_i915_private *dev_priv = dev->dev_private;
3332 union child_device_config *p_child; 3332 union child_device_config *p_child;
3333 int i; 3333 int i;
3334 static const short port_mapping[] = {
3335 [PORT_B] = PORT_IDPB,
3336 [PORT_C] = PORT_IDPC,
3337 [PORT_D] = PORT_IDPD,
3338 };
3339
3340 if (port == PORT_A)
3341 return true;
3334 3342
3335 if (!dev_priv->vbt.child_dev_num) 3343 if (!dev_priv->vbt.child_dev_num)
3336 return false; 3344 return false;
@@ -3338,7 +3346,7 @@ bool intel_dpd_is_edp(struct drm_device *dev)
3338 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 3346 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3339 p_child = dev_priv->vbt.child_dev + i; 3347 p_child = dev_priv->vbt.child_dev + i;
3340 3348
3341 if (p_child->common.dvo_port == PORT_IDPD && 3349 if (p_child->common.dvo_port == port_mapping[port] &&
3342 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) == 3350 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
3343 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) 3351 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
3344 return true; 3352 return true;
@@ -3616,26 +3624,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3616 intel_dp->DP = I915_READ(intel_dp->output_reg); 3624 intel_dp->DP = I915_READ(intel_dp->output_reg);
3617 intel_dp->attached_connector = intel_connector; 3625 intel_dp->attached_connector = intel_connector;
3618 3626
3619 type = DRM_MODE_CONNECTOR_DisplayPort; 3627 if (intel_dp_is_edp(dev, port))
3620 /*
3621 * FIXME : We need to initialize built-in panels before external panels.
3622 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
3623 */
3624 switch (port) {
3625 case PORT_A:
3626 type = DRM_MODE_CONNECTOR_eDP; 3628 type = DRM_MODE_CONNECTOR_eDP;
3627 break; 3629 else
3628 case PORT_C: 3630 type = DRM_MODE_CONNECTOR_DisplayPort;
3629 if (IS_VALLEYVIEW(dev))
3630 type = DRM_MODE_CONNECTOR_eDP;
3631 break;
3632 case PORT_D:
3633 if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
3634 type = DRM_MODE_CONNECTOR_eDP;
3635 break;
3636 default: /* silence GCC warning */
3637 break;
3638 }
3639 3631
3640 /* 3632 /*
3641 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 3633 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1e49aa8f5377..79f91f26e288 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -708,7 +708,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder);
708void intel_dp_check_link_status(struct intel_dp *intel_dp); 708void intel_dp_check_link_status(struct intel_dp *intel_dp);
709bool intel_dp_compute_config(struct intel_encoder *encoder, 709bool intel_dp_compute_config(struct intel_encoder *encoder,
710 struct intel_crtc_config *pipe_config); 710 struct intel_crtc_config *pipe_config);
711bool intel_dpd_is_edp(struct drm_device *dev); 711bool intel_dp_is_edp(struct drm_device *dev, enum port port);
712void ironlake_edp_backlight_on(struct intel_dp *intel_dp); 712void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
713void ironlake_edp_backlight_off(struct intel_dp *intel_dp); 713void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
714void ironlake_edp_panel_on(struct intel_dp *intel_dp); 714void ironlake_edp_panel_on(struct intel_dp *intel_dp);
@@ -821,6 +821,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
821 uint32_t sprite_width, int pixel_size, 821 uint32_t sprite_width, int pixel_size,
822 bool enabled, bool scaled); 822 bool enabled, bool scaled);
823void intel_init_pm(struct drm_device *dev); 823void intel_init_pm(struct drm_device *dev);
824void intel_pm_setup(struct drm_device *dev);
824bool intel_fbc_enabled(struct drm_device *dev); 825bool intel_fbc_enabled(struct drm_device *dev);
825void intel_update_fbc(struct drm_device *dev); 826void intel_update_fbc(struct drm_device *dev);
826void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 827void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index f161ac02c4f6..e6f782d1c669 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -451,7 +451,9 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
451 451
452 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 452 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
453 453
454 if (HAS_PCH_SPLIT(dev)) { 454 if (IS_BROADWELL(dev)) {
455 val = I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
456 } else if (HAS_PCH_SPLIT(dev)) {
455 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 457 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
456 } else { 458 } else {
457 if (IS_VALLEYVIEW(dev)) 459 if (IS_VALLEYVIEW(dev))
@@ -479,6 +481,13 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
479 return val; 481 return val;
480} 482}
481 483
484static void intel_bdw_panel_set_backlight(struct drm_device *dev, u32 level)
485{
486 struct drm_i915_private *dev_priv = dev->dev_private;
487 u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
488 I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
489}
490
482static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) 491static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
483{ 492{
484 struct drm_i915_private *dev_priv = dev->dev_private; 493 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -496,7 +505,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev,
496 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); 505 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
497 level = intel_panel_compute_brightness(dev, pipe, level); 506 level = intel_panel_compute_brightness(dev, pipe, level);
498 507
499 if (HAS_PCH_SPLIT(dev)) 508 if (IS_BROADWELL(dev))
509 return intel_bdw_panel_set_backlight(dev, level);
510 else if (HAS_PCH_SPLIT(dev))
500 return intel_pch_panel_set_backlight(dev, level); 511 return intel_pch_panel_set_backlight(dev, level);
501 512
502 if (is_backlight_combination_mode(dev)) { 513 if (is_backlight_combination_mode(dev)) {
@@ -666,7 +677,16 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
666 POSTING_READ(reg); 677 POSTING_READ(reg);
667 I915_WRITE(reg, tmp | BLM_PWM_ENABLE); 678 I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
668 679
669 if (HAS_PCH_SPLIT(dev) && 680 if (IS_BROADWELL(dev)) {
681 /*
682 * Broadwell requires PCH override to drive the PCH
683 * backlight pin. The above will configure the CPU
684 * backlight pin, which we don't plan to use.
685 */
686 tmp = I915_READ(BLC_PWM_PCH_CTL1);
687 tmp |= BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE;
688 I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
689 } else if (HAS_PCH_SPLIT(dev) &&
670 !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) { 690 !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
671 tmp = I915_READ(BLC_PWM_PCH_CTL1); 691 tmp = I915_READ(BLC_PWM_PCH_CTL1);
672 tmp |= BLM_PCH_PWM_ENABLE; 692 tmp |= BLM_PCH_PWM_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index caf2ee4e5441..26c29c173221 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1180,7 +1180,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1180 1180
1181 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1181 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1182 clock = adjusted_mode->crtc_clock; 1182 clock = adjusted_mode->crtc_clock;
1183 htotal = adjusted_mode->htotal; 1183 htotal = adjusted_mode->crtc_htotal;
1184 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1184 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1185 pixel_size = crtc->fb->bits_per_pixel / 8; 1185 pixel_size = crtc->fb->bits_per_pixel / 8;
1186 1186
@@ -1267,7 +1267,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1267 crtc = intel_get_crtc_for_plane(dev, plane); 1267 crtc = intel_get_crtc_for_plane(dev, plane);
1268 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1268 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1269 clock = adjusted_mode->crtc_clock; 1269 clock = adjusted_mode->crtc_clock;
1270 htotal = adjusted_mode->htotal; 1270 htotal = adjusted_mode->crtc_htotal;
1271 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1271 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1272 pixel_size = crtc->fb->bits_per_pixel / 8; 1272 pixel_size = crtc->fb->bits_per_pixel / 8;
1273 1273
@@ -1498,7 +1498,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1498 const struct drm_display_mode *adjusted_mode = 1498 const struct drm_display_mode *adjusted_mode =
1499 &to_intel_crtc(crtc)->config.adjusted_mode; 1499 &to_intel_crtc(crtc)->config.adjusted_mode;
1500 int clock = adjusted_mode->crtc_clock; 1500 int clock = adjusted_mode->crtc_clock;
1501 int htotal = adjusted_mode->htotal; 1501 int htotal = adjusted_mode->crtc_htotal;
1502 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1502 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1503 int pixel_size = crtc->fb->bits_per_pixel / 8; 1503 int pixel_size = crtc->fb->bits_per_pixel / 8;
1504 unsigned long line_time_us; 1504 unsigned long line_time_us;
@@ -1624,7 +1624,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1624 const struct drm_display_mode *adjusted_mode = 1624 const struct drm_display_mode *adjusted_mode =
1625 &to_intel_crtc(enabled)->config.adjusted_mode; 1625 &to_intel_crtc(enabled)->config.adjusted_mode;
1626 int clock = adjusted_mode->crtc_clock; 1626 int clock = adjusted_mode->crtc_clock;
1627 int htotal = adjusted_mode->htotal; 1627 int htotal = adjusted_mode->crtc_htotal;
1628 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w; 1628 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
1629 int pixel_size = enabled->fb->bits_per_pixel / 8; 1629 int pixel_size = enabled->fb->bits_per_pixel / 8;
1630 unsigned long line_time_us; 1630 unsigned long line_time_us;
@@ -1776,7 +1776,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1776 crtc = intel_get_crtc_for_plane(dev, plane); 1776 crtc = intel_get_crtc_for_plane(dev, plane);
1777 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1777 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1778 clock = adjusted_mode->crtc_clock; 1778 clock = adjusted_mode->crtc_clock;
1779 htotal = adjusted_mode->htotal; 1779 htotal = adjusted_mode->crtc_htotal;
1780 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1780 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1781 pixel_size = crtc->fb->bits_per_pixel / 8; 1781 pixel_size = crtc->fb->bits_per_pixel / 8;
1782 1782
@@ -2469,8 +2469,9 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2469 /* The WM are computed with base on how long it takes to fill a single 2469 /* The WM are computed with base on how long it takes to fill a single
2470 * row at the given clock rate, multiplied by 8. 2470 * row at the given clock rate, multiplied by 8.
2471 * */ 2471 * */
2472 linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock); 2472 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2473 ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, 2473 mode->crtc_clock);
2474 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2474 intel_ddi_get_cdclk_freq(dev_priv)); 2475 intel_ddi_get_cdclk_freq(dev_priv));
2475 2476
2476 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | 2477 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
@@ -5684,8 +5685,11 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5684{ 5685{
5685 struct drm_i915_private *dev_priv = dev->dev_private; 5686 struct drm_i915_private *dev_priv = dev->dev_private;
5686 bool is_enabled, enable_requested; 5687 bool is_enabled, enable_requested;
5688 unsigned long irqflags;
5687 uint32_t tmp; 5689 uint32_t tmp;
5688 5690
5691 WARN_ON(dev_priv->pc8.enabled);
5692
5689 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 5693 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
5690 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; 5694 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
5691 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; 5695 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
@@ -5701,9 +5705,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5701 HSW_PWR_WELL_STATE_ENABLED), 20)) 5705 HSW_PWR_WELL_STATE_ENABLED), 20))
5702 DRM_ERROR("Timeout enabling power well\n"); 5706 DRM_ERROR("Timeout enabling power well\n");
5703 } 5707 }
5708
5709 if (IS_BROADWELL(dev)) {
5710 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
5711 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
5712 dev_priv->de_irq_mask[PIPE_B]);
5713 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
5714 ~dev_priv->de_irq_mask[PIPE_B] |
5715 GEN8_PIPE_VBLANK);
5716 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
5717 dev_priv->de_irq_mask[PIPE_C]);
5718 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
5719 ~dev_priv->de_irq_mask[PIPE_C] |
5720 GEN8_PIPE_VBLANK);
5721 POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
5722 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
5723 }
5704 } else { 5724 } else {
5705 if (enable_requested) { 5725 if (enable_requested) {
5706 unsigned long irqflags;
5707 enum pipe p; 5726 enum pipe p;
5708 5727
5709 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 5728 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
@@ -5730,16 +5749,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5730static void __intel_power_well_get(struct drm_device *dev, 5749static void __intel_power_well_get(struct drm_device *dev,
5731 struct i915_power_well *power_well) 5750 struct i915_power_well *power_well)
5732{ 5751{
5733 if (!power_well->count++) 5752 struct drm_i915_private *dev_priv = dev->dev_private;
5753
5754 if (!power_well->count++) {
5755 hsw_disable_package_c8(dev_priv);
5734 __intel_set_power_well(dev, true); 5756 __intel_set_power_well(dev, true);
5757 }
5735} 5758}
5736 5759
5737static void __intel_power_well_put(struct drm_device *dev, 5760static void __intel_power_well_put(struct drm_device *dev,
5738 struct i915_power_well *power_well) 5761 struct i915_power_well *power_well)
5739{ 5762{
5763 struct drm_i915_private *dev_priv = dev->dev_private;
5764
5740 WARN_ON(!power_well->count); 5765 WARN_ON(!power_well->count);
5741 if (!--power_well->count && i915_disable_power_well) 5766 if (!--power_well->count && i915_disable_power_well) {
5742 __intel_set_power_well(dev, false); 5767 __intel_set_power_well(dev, false);
5768 hsw_enable_package_c8(dev_priv);
5769 }
5743} 5770}
5744 5771
5745void intel_display_power_get(struct drm_device *dev, 5772void intel_display_power_get(struct drm_device *dev,
@@ -6129,10 +6156,19 @@ int vlv_freq_opcode(int ddr_freq, int val)
6129 return val; 6156 return val;
6130} 6157}
6131 6158
6132void intel_pm_init(struct drm_device *dev) 6159void intel_pm_setup(struct drm_device *dev)
6133{ 6160{
6134 struct drm_i915_private *dev_priv = dev->dev_private; 6161 struct drm_i915_private *dev_priv = dev->dev_private;
6135 6162
6163 mutex_init(&dev_priv->rps.hw_lock);
6164
6165 mutex_init(&dev_priv->pc8.lock);
6166 dev_priv->pc8.requirements_met = false;
6167 dev_priv->pc8.gpu_idle = false;
6168 dev_priv->pc8.irqs_disabled = false;
6169 dev_priv->pc8.enabled = false;
6170 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
6171 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
6136 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 6172 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
6137 intel_gen6_powersave_work); 6173 intel_gen6_powersave_work);
6138} 6174}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b620337e6d67..c2f09d456300 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -965,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
965 } else if (IS_GEN6(ring->dev)) { 965 } else if (IS_GEN6(ring->dev)) {
966 mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 966 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
967 } else { 967 } else {
968 /* XXX: gen8 returns to sanity */
968 mmio = RING_HWS_PGA(ring->mmio_base); 969 mmio = RING_HWS_PGA(ring->mmio_base);
969 } 970 }
970 971
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 0b02078a0b84..25cbe073c388 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -784,6 +784,7 @@ static int gen6_do_reset(struct drm_device *dev)
784int intel_gpu_reset(struct drm_device *dev) 784int intel_gpu_reset(struct drm_device *dev)
785{ 785{
786 switch (INTEL_INFO(dev)->gen) { 786 switch (INTEL_INFO(dev)->gen) {
787 case 8:
787 case 7: 788 case 7:
788 case 6: return gen6_do_reset(dev); 789 case 6: return gen6_do_reset(dev);
789 case 5: return ironlake_do_reset(dev); 790 case 5: return ironlake_do_reset(dev);
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index edcf801613e6..b3fa1ba191b7 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -59,6 +59,7 @@ nouveau-y += core/subdev/clock/nv40.o
59nouveau-y += core/subdev/clock/nv50.o 59nouveau-y += core/subdev/clock/nv50.o
60nouveau-y += core/subdev/clock/nv84.o 60nouveau-y += core/subdev/clock/nv84.o
61nouveau-y += core/subdev/clock/nva3.o 61nouveau-y += core/subdev/clock/nva3.o
62nouveau-y += core/subdev/clock/nvaa.o
62nouveau-y += core/subdev/clock/nvc0.o 63nouveau-y += core/subdev/clock/nvc0.o
63nouveau-y += core/subdev/clock/nve0.o 64nouveau-y += core/subdev/clock/nve0.o
64nouveau-y += core/subdev/clock/pllnv04.o 65nouveau-y += core/subdev/clock/pllnv04.o
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c
index 48f06378d3f9..2ea5568b6cf5 100644
--- a/drivers/gpu/drm/nouveau/core/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/core/core/subdev.c
@@ -104,11 +104,8 @@ nouveau_subdev_create_(struct nouveau_object *parent,
104 104
105 if (parent) { 105 if (parent) {
106 struct nouveau_device *device = nv_device(parent); 106 struct nouveau_device *device = nv_device(parent);
107 int subidx = nv_hclass(subdev) & 0xff;
108
109 subdev->debug = nouveau_dbgopt(device->dbgopt, subname); 107 subdev->debug = nouveau_dbgopt(device->dbgopt, subname);
110 subdev->mmio = nv_subdev(device)->mmio; 108 subdev->mmio = nv_subdev(device)->mmio;
111 device->subdev[subidx] = *pobject;
112 } 109 }
113 110
114 return 0; 111 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 9135b25a29d0..dd01c6c435d6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -268,6 +268,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
268 if (ret) 268 if (ret)
269 return ret; 269 return ret;
270 270
271 device->subdev[i] = devobj->subdev[i];
272
271 /* note: can't init *any* subdevs until devinit has been run 273 /* note: can't init *any* subdevs until devinit has been run
272 * due to not knowing exactly what the vbios init tables will 274 * due to not knowing exactly what the vbios init tables will
273 * mess with. devinit also can't be run until all of its 275 * mess with. devinit also can't be run until all of its
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index db139827047c..db3fc7be856a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -283,7 +283,7 @@ nv50_identify(struct nouveau_device *device)
283 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 283 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
284 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 284 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
285 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 285 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
286 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 286 device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
287 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 287 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
288 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 288 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
289 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 289 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
@@ -311,7 +311,7 @@ nv50_identify(struct nouveau_device *device)
311 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 311 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
312 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 312 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
313 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 313 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
314 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 314 device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
315 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 315 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
316 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 316 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
317 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 317 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 8d06eef2b9ee..dbc5e33de94f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -161,7 +161,7 @@ nvc0_identify(struct nouveau_device *device)
161 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 161 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
162 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 162 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
163 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 163 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
164 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; 164 device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
165 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 165 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
167 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 167 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 5f555788121c..e6352bd5b4ff 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -33,6 +33,7 @@
33#include <engine/dmaobj.h> 33#include <engine/dmaobj.h>
34#include <engine/fifo.h> 34#include <engine/fifo.h>
35 35
36#include "nv04.h"
36#include "nv50.h" 37#include "nv50.h"
37 38
38/******************************************************************************* 39/*******************************************************************************
@@ -460,6 +461,8 @@ nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
460 nv_subdev(priv)->intr = nv04_fifo_intr; 461 nv_subdev(priv)->intr = nv04_fifo_intr;
461 nv_engine(priv)->cclass = &nv50_fifo_cclass; 462 nv_engine(priv)->cclass = &nv50_fifo_cclass;
462 nv_engine(priv)->sclass = nv50_fifo_sclass; 463 nv_engine(priv)->sclass = nv50_fifo_sclass;
464 priv->base.pause = nv04_fifo_pause;
465 priv->base.start = nv04_fifo_start;
463 return 0; 466 return 0;
464} 467}
465 468
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 0908dc834c84..fe0f41e65d9b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -35,6 +35,7 @@
35#include <engine/dmaobj.h> 35#include <engine/dmaobj.h>
36#include <engine/fifo.h> 36#include <engine/fifo.h>
37 37
38#include "nv04.h"
38#include "nv50.h" 39#include "nv50.h"
39 40
40/******************************************************************************* 41/*******************************************************************************
@@ -432,6 +433,8 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
432 nv_subdev(priv)->intr = nv04_fifo_intr; 433 nv_subdev(priv)->intr = nv04_fifo_intr;
433 nv_engine(priv)->cclass = &nv84_fifo_cclass; 434 nv_engine(priv)->cclass = &nv84_fifo_cclass;
434 nv_engine(priv)->sclass = nv84_fifo_sclass; 435 nv_engine(priv)->sclass = nv84_fifo_sclass;
436 priv->base.pause = nv04_fifo_pause;
437 priv->base.start = nv04_fifo_start;
435 return 0; 438 return 0;
436} 439}
437 440
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 434bb4b0fa2e..5c8a63dc506a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -334,7 +334,7 @@ nvc0_graph_mthd(struct nvc0_graph_priv *priv, struct nvc0_graph_mthd *mthds)
334 while ((mthd = &mthds[i++]) && (init = mthd->init)) { 334 while ((mthd = &mthds[i++]) && (init = mthd->init)) {
335 u32 addr = 0x80000000 | mthd->oclass; 335 u32 addr = 0x80000000 | mthd->oclass;
336 for (data = 0; init->count; init++) { 336 for (data = 0; init->count; init++) {
337 if (data != init->data) { 337 if (init == mthd->init || data != init->data) {
338 nv_wr32(priv, 0x40448c, init->data); 338 nv_wr32(priv, 0x40448c, init->data);
339 data = init->data; 339 data = init->data;
340 } 340 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index b574dd4bb828..5ce686ee729e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -176,7 +176,7 @@ nv50_software_context_ctor(struct nouveau_object *parent,
176 if (ret) 176 if (ret)
177 return ret; 177 return ret;
178 178
179 chan->vblank.nr_event = pdisp->vblank->index_nr; 179 chan->vblank.nr_event = pdisp ? pdisp->vblank->index_nr : 0;
180 chan->vblank.event = kzalloc(chan->vblank.nr_event * 180 chan->vblank.event = kzalloc(chan->vblank.nr_event *
181 sizeof(*chan->vblank.event), GFP_KERNEL); 181 sizeof(*chan->vblank.event), GFP_KERNEL);
182 if (!chan->vblank.event) 182 if (!chan->vblank.event)
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index e2675bc0edba..8f4ced75444a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -14,6 +14,9 @@ enum nv_clk_src {
14 nv_clk_src_hclk, 14 nv_clk_src_hclk,
15 nv_clk_src_hclkm3, 15 nv_clk_src_hclkm3,
16 nv_clk_src_hclkm3d2, 16 nv_clk_src_hclkm3d2,
17 nv_clk_src_hclkm2d3, /* NVAA */
18 nv_clk_src_hclkm4, /* NVAA */
19 nv_clk_src_cclk, /* NVAA */
17 20
18 nv_clk_src_host, 21 nv_clk_src_host,
19 22
@@ -127,6 +130,7 @@ extern struct nouveau_oclass nv04_clock_oclass;
127extern struct nouveau_oclass nv40_clock_oclass; 130extern struct nouveau_oclass nv40_clock_oclass;
128extern struct nouveau_oclass *nv50_clock_oclass; 131extern struct nouveau_oclass *nv50_clock_oclass;
129extern struct nouveau_oclass *nv84_clock_oclass; 132extern struct nouveau_oclass *nv84_clock_oclass;
133extern struct nouveau_oclass *nvaa_clock_oclass;
130extern struct nouveau_oclass nva3_clock_oclass; 134extern struct nouveau_oclass nva3_clock_oclass;
131extern struct nouveau_oclass nvc0_clock_oclass; 135extern struct nouveau_oclass nvc0_clock_oclass;
132extern struct nouveau_oclass nve0_clock_oclass; 136extern struct nouveau_oclass nve0_clock_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 8541aa382ff2..d89dbdf39b0d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -75,6 +75,11 @@ struct nouveau_fb {
75static inline struct nouveau_fb * 75static inline struct nouveau_fb *
76nouveau_fb(void *obj) 76nouveau_fb(void *obj)
77{ 77{
78 /* fbram uses this before device subdev pointer is valid */
79 if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
80 nv_subidx(obj) == NVDEV_SUBDEV_FB)
81 return obj;
82
78 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB]; 83 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
79} 84}
80 85
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 420908cb82b6..df1b1b423093 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -365,13 +365,13 @@ static u16
365init_script(struct nouveau_bios *bios, int index) 365init_script(struct nouveau_bios *bios, int index)
366{ 366{
367 struct nvbios_init init = { .bios = bios }; 367 struct nvbios_init init = { .bios = bios };
368 u16 data; 368 u16 bmp_ver = bmp_version(bios), data;
369 369
370 if (bmp_version(bios) && bmp_version(bios) < 0x0510) { 370 if (bmp_ver && bmp_ver < 0x0510) {
371 if (index > 1) 371 if (index > 1 || bmp_ver < 0x0100)
372 return 0x0000; 372 return 0x0000;
373 373
374 data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18); 374 data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
375 return nv_ro16(bios, data + (index * 2)); 375 return nv_ro16(bios, data + (index * 2));
376 } 376 }
377 377
@@ -1294,7 +1294,11 @@ init_jump(struct nvbios_init *init)
1294 u16 offset = nv_ro16(bios, init->offset + 1); 1294 u16 offset = nv_ro16(bios, init->offset + 1);
1295 1295
1296 trace("JUMP\t0x%04x\n", offset); 1296 trace("JUMP\t0x%04x\n", offset);
1297 init->offset = offset; 1297
1298 if (init_exec(init))
1299 init->offset = offset;
1300 else
1301 init->offset += 3;
1298} 1302}
1299 1303
1300/** 1304/**
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index da50c1b12928..30c1f3a4158e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -69,6 +69,11 @@ nv04_clock_pll_prog(struct nouveau_clock *clk, u32 reg1,
69 return 0; 69 return 0;
70} 70}
71 71
72static struct nouveau_clocks
73nv04_domain[] = {
74 { nv_clk_src_max }
75};
76
72static int 77static int
73nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 78nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
74 struct nouveau_oclass *oclass, void *data, u32 size, 79 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -77,7 +82,7 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
77 struct nv04_clock_priv *priv; 82 struct nv04_clock_priv *priv;
78 int ret; 83 int ret;
79 84
80 ret = nouveau_clock_create(parent, engine, oclass, NULL, &priv); 85 ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, &priv);
81 *pobject = nv_object(priv); 86 *pobject = nv_object(priv);
82 if (ret) 87 if (ret)
83 return ret; 88 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
new file mode 100644
index 000000000000..7a723b4f564d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
@@ -0,0 +1,445 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/fifo.h>
26#include <subdev/bios.h>
27#include <subdev/bios/pll.h>
28#include <subdev/timer.h>
29#include <subdev/clock.h>
30
31#include "pll.h"
32
33struct nvaa_clock_priv {
34 struct nouveau_clock base;
35 enum nv_clk_src csrc, ssrc, vsrc;
36 u32 cctrl, sctrl;
37 u32 ccoef, scoef;
38 u32 cpost, spost;
39 u32 vdiv;
40};
41
42static u32
43read_div(struct nouveau_clock *clk)
44{
45 return nv_rd32(clk, 0x004600);
46}
47
48static u32
49read_pll(struct nouveau_clock *clk, u32 base)
50{
51 u32 ctrl = nv_rd32(clk, base + 0);
52 u32 coef = nv_rd32(clk, base + 4);
53 u32 ref = clk->read(clk, nv_clk_src_href);
54 u32 post_div = 0;
55 u32 clock = 0;
56 int N1, M1;
57
58 switch (base){
59 case 0x4020:
60 post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16);
61 break;
62 case 0x4028:
63 post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16;
64 break;
65 default:
66 break;
67 }
68
69 N1 = (coef & 0x0000ff00) >> 8;
70 M1 = (coef & 0x000000ff);
71 if ((ctrl & 0x80000000) && M1) {
72 clock = ref * N1 / M1;
73 clock = clock / post_div;
74 }
75
76 return clock;
77}
78
79static int
80nvaa_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
81{
82 struct nvaa_clock_priv *priv = (void *)clk;
83 u32 mast = nv_rd32(clk, 0x00c054);
84 u32 P = 0;
85
86 switch (src) {
87 case nv_clk_src_crystal:
88 return nv_device(priv)->crystal;
89 case nv_clk_src_href:
90 return 100000; /* PCIE reference clock */
91 case nv_clk_src_hclkm4:
92 return clk->read(clk, nv_clk_src_href) * 4;
93 case nv_clk_src_hclkm2d3:
94 return clk->read(clk, nv_clk_src_href) * 2 / 3;
95 case nv_clk_src_host:
96 switch (mast & 0x000c0000) {
97 case 0x00000000: return clk->read(clk, nv_clk_src_hclkm2d3);
98 case 0x00040000: break;
99 case 0x00080000: return clk->read(clk, nv_clk_src_hclkm4);
100 case 0x000c0000: return clk->read(clk, nv_clk_src_cclk);
101 }
102 break;
103 case nv_clk_src_core:
104 P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
105
106 switch (mast & 0x00000003) {
107 case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
108 case 0x00000001: return 0;
109 case 0x00000002: return clk->read(clk, nv_clk_src_hclkm4) >> P;
110 case 0x00000003: return read_pll(clk, 0x004028) >> P;
111 }
112 break;
113 case nv_clk_src_cclk:
114 if ((mast & 0x03000000) != 0x03000000)
115 return clk->read(clk, nv_clk_src_core);
116
117 if ((mast & 0x00000200) == 0x00000000)
118 return clk->read(clk, nv_clk_src_core);
119
120 switch (mast & 0x00000c00) {
121 case 0x00000000: return clk->read(clk, nv_clk_src_href);
122 case 0x00000400: return clk->read(clk, nv_clk_src_hclkm4);
123 case 0x00000800: return clk->read(clk, nv_clk_src_hclkm2d3);
124 default: return 0;
125 }
126 case nv_clk_src_shader:
127 P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16;
128 switch (mast & 0x00000030) {
129 case 0x00000000:
130 if (mast & 0x00000040)
131 return clk->read(clk, nv_clk_src_href) >> P;
132 return clk->read(clk, nv_clk_src_crystal) >> P;
133 case 0x00000010: break;
134 case 0x00000020: return read_pll(clk, 0x004028) >> P;
135 case 0x00000030: return read_pll(clk, 0x004020) >> P;
136 }
137 break;
138 case nv_clk_src_mem:
139 return 0;
140 break;
141 case nv_clk_src_vdec:
142 P = (read_div(clk) & 0x00000700) >> 8;
143
144 switch (mast & 0x00400000) {
145 case 0x00400000:
146 return clk->read(clk, nv_clk_src_core) >> P;
147 break;
148 default:
149 return 500000 >> P;
150 break;
151 }
152 break;
153 default:
154 break;
155 }
156
157 nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
158 return 0;
159}
160
161static u32
162calc_pll(struct nvaa_clock_priv *priv, u32 reg,
163 u32 clock, int *N, int *M, int *P)
164{
165 struct nouveau_bios *bios = nouveau_bios(priv);
166 struct nvbios_pll pll;
167 struct nouveau_clock *clk = &priv->base;
168 int ret;
169
170 ret = nvbios_pll_parse(bios, reg, &pll);
171 if (ret)
172 return 0;
173
174 pll.vco2.max_freq = 0;
175 pll.refclk = clk->read(clk, nv_clk_src_href);
176 if (!pll.refclk)
177 return 0;
178
179 return nv04_pll_calc(nv_subdev(priv), &pll, clock, N, M, NULL, NULL, P);
180}
181
182static inline u32
183calc_P(u32 src, u32 target, int *div)
184{
185 u32 clk0 = src, clk1 = src;
186 for (*div = 0; *div <= 7; (*div)++) {
187 if (clk0 <= target) {
188 clk1 = clk0 << (*div ? 1 : 0);
189 break;
190 }
191 clk0 >>= 1;
192 }
193
194 if (target - clk0 <= clk1 - target)
195 return clk0;
196 (*div)--;
197 return clk1;
198}
199
200static int
201nvaa_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
202{
203 struct nvaa_clock_priv *priv = (void *)clk;
204 const int shader = cstate->domain[nv_clk_src_shader];
205 const int core = cstate->domain[nv_clk_src_core];
206 const int vdec = cstate->domain[nv_clk_src_vdec];
207 u32 out = 0, clock = 0;
208 int N, M, P1, P2 = 0;
209 int divs = 0;
210
211 /* cclk: find suitable source, disable PLL if we can */
212 if (core < clk->read(clk, nv_clk_src_hclkm4))
213 out = calc_P(clk->read(clk, nv_clk_src_hclkm4), core, &divs);
214
215 /* Calculate clock * 2, so shader clock can use it too */
216 clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1);
217
218 if (abs(core - out) <=
219 abs(core - (clock >> 1))) {
220 priv->csrc = nv_clk_src_hclkm4;
221 priv->cctrl = divs << 16;
222 } else {
223 /* NVCTRL is actually used _after_ NVPOST, and after what we
224 * call NVPLL. To make matters worse, NVPOST is an integer
225 * divider instead of a right-shift number. */
226 if(P1 > 2) {
227 P2 = P1 - 2;
228 P1 = 2;
229 }
230
231 priv->csrc = nv_clk_src_core;
232 priv->ccoef = (N << 8) | M;
233
234 priv->cctrl = (P2 + 1) << 16;
235 priv->cpost = (1 << P1) << 16;
236 }
237
238 /* sclk: nvpll + divisor, href or spll */
239 out = 0;
240 if (shader == clk->read(clk, nv_clk_src_href)) {
241 priv->ssrc = nv_clk_src_href;
242 } else {
243 clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
244 if (priv->csrc == nv_clk_src_core) {
245 out = calc_P((core << 1), shader, &divs);
246 }
247
248 if (abs(shader - out) <=
249 abs(shader - clock) &&
250 (divs + P2) <= 7) {
251 priv->ssrc = nv_clk_src_core;
252 priv->sctrl = (divs + P2) << 16;
253 } else {
254 priv->ssrc = nv_clk_src_shader;
255 priv->scoef = (N << 8) | M;
256 priv->sctrl = P1 << 16;
257 }
258 }
259
260 /* vclk */
261 out = calc_P(core, vdec, &divs);
262 clock = calc_P(500000, vdec, &P1);
263 if(abs(vdec - out) <=
264 abs(vdec - clock)) {
265 priv->vsrc = nv_clk_src_cclk;
266 priv->vdiv = divs << 16;
267 } else {
268 priv->vsrc = nv_clk_src_vdec;
269 priv->vdiv = P1 << 16;
270 }
271
272 /* Print strategy! */
273 nv_debug(priv, "nvpll: %08x %08x %08x\n",
274 priv->ccoef, priv->cpost, priv->cctrl);
275 nv_debug(priv, " spll: %08x %08x %08x\n",
276 priv->scoef, priv->spost, priv->sctrl);
277 nv_debug(priv, " vdiv: %08x\n", priv->vdiv);
278 if (priv->csrc == nv_clk_src_hclkm4)
279 nv_debug(priv, "core: hrefm4\n");
280 else
281 nv_debug(priv, "core: nvpll\n");
282
283 if (priv->ssrc == nv_clk_src_hclkm4)
284 nv_debug(priv, "shader: hrefm4\n");
285 else if (priv->ssrc == nv_clk_src_core)
286 nv_debug(priv, "shader: nvpll\n");
287 else
288 nv_debug(priv, "shader: spll\n");
289
290 if (priv->vsrc == nv_clk_src_hclkm4)
291 nv_debug(priv, "vdec: 500MHz\n");
292 else
293 nv_debug(priv, "vdec: core\n");
294
295 return 0;
296}
297
298static int
299nvaa_clock_prog(struct nouveau_clock *clk)
300{
301 struct nvaa_clock_priv *priv = (void *)clk;
302 struct nouveau_fifo *pfifo = nouveau_fifo(clk);
303 unsigned long flags;
304 u32 pllmask = 0, mast, ptherm_gate;
305 int ret = -EBUSY;
306
307 /* halt and idle execution engines */
308 ptherm_gate = nv_mask(clk, 0x020060, 0x00070000, 0x00000000);
309 nv_mask(clk, 0x002504, 0x00000001, 0x00000001);
310 /* Wait until the interrupt handler is finished */
311 if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000))
312 goto resume;
313
314 if (pfifo)
315 pfifo->pause(pfifo, &flags);
316
317 if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010))
318 goto resume;
319 if (!nv_wait(clk, 0x00251c, 0x0000003f, 0x0000003f))
320 goto resume;
321
322 /* First switch to safe clocks: href */
323 mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640);
324 mast &= ~0x00400e73;
325 mast |= 0x03000000;
326
327 switch (priv->csrc) {
328 case nv_clk_src_hclkm4:
329 nv_mask(clk, 0x4028, 0x00070000, priv->cctrl);
330 mast |= 0x00000002;
331 break;
332 case nv_clk_src_core:
333 nv_wr32(clk, 0x402c, priv->ccoef);
334 nv_wr32(clk, 0x4028, 0x80000000 | priv->cctrl);
335 nv_wr32(clk, 0x4040, priv->cpost);
336 pllmask |= (0x3 << 8);
337 mast |= 0x00000003;
338 break;
339 default:
340 nv_warn(priv,"Reclocking failed: unknown core clock\n");
341 goto resume;
342 }
343
344 switch (priv->ssrc) {
345 case nv_clk_src_href:
346 nv_mask(clk, 0x4020, 0x00070000, 0x00000000);
347 /* mast |= 0x00000000; */
348 break;
349 case nv_clk_src_core:
350 nv_mask(clk, 0x4020, 0x00070000, priv->sctrl);
351 mast |= 0x00000020;
352 break;
353 case nv_clk_src_shader:
354 nv_wr32(clk, 0x4024, priv->scoef);
355 nv_wr32(clk, 0x4020, 0x80000000 | priv->sctrl);
356 nv_wr32(clk, 0x4070, priv->spost);
357 pllmask |= (0x3 << 12);
358 mast |= 0x00000030;
359 break;
360 default:
361 nv_warn(priv,"Reclocking failed: unknown sclk clock\n");
362 goto resume;
363 }
364
365 if (!nv_wait(clk, 0x004080, pllmask, pllmask)) {
366 nv_warn(priv,"Reclocking failed: unstable PLLs\n");
367 goto resume;
368 }
369
370 switch (priv->vsrc) {
371 case nv_clk_src_cclk:
372 mast |= 0x00400000;
373 default:
374 nv_wr32(clk, 0x4600, priv->vdiv);
375 }
376
377 nv_wr32(clk, 0xc054, mast);
378 ret = 0;
379
380resume:
381 if (pfifo)
382 pfifo->start(pfifo, &flags);
383
384 nv_mask(clk, 0x002504, 0x00000001, 0x00000000);
385 nv_wr32(clk, 0x020060, ptherm_gate);
386
387 /* Disable some PLLs and dividers when unused */
388 if (priv->csrc != nv_clk_src_core) {
389 nv_wr32(clk, 0x4040, 0x00000000);
390 nv_mask(clk, 0x4028, 0x80000000, 0x00000000);
391 }
392
393 if (priv->ssrc != nv_clk_src_shader) {
394 nv_wr32(clk, 0x4070, 0x00000000);
395 nv_mask(clk, 0x4020, 0x80000000, 0x00000000);
396 }
397
398 return ret;
399}
400
401static void
402nvaa_clock_tidy(struct nouveau_clock *clk)
403{
404}
405
406static struct nouveau_clocks
407nvaa_domains[] = {
408 { nv_clk_src_crystal, 0xff },
409 { nv_clk_src_href , 0xff },
410 { nv_clk_src_core , 0xff, 0, "core", 1000 },
411 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
412 { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 },
413 { nv_clk_src_max }
414};
415
416static int
417nvaa_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
418 struct nouveau_oclass *oclass, void *data, u32 size,
419 struct nouveau_object **pobject)
420{
421 struct nvaa_clock_priv *priv;
422 int ret;
423
424 ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, &priv);
425 *pobject = nv_object(priv);
426 if (ret)
427 return ret;
428
429 priv->base.read = nvaa_clock_read;
430 priv->base.calc = nvaa_clock_calc;
431 priv->base.prog = nvaa_clock_prog;
432 priv->base.tidy = nvaa_clock_tidy;
433 return 0;
434}
435
436struct nouveau_oclass *
437nvaa_clock_oclass = &(struct nouveau_oclass) {
438 .handle = NV_SUBDEV(CLOCK, 0xaa),
439 .ofuncs = &(struct nouveau_ofuncs) {
440 .ctor = nvaa_clock_ctor,
441 .dtor = _nouveau_clock_dtor,
442 .init = _nouveau_clock_init,
443 .fini = _nouveau_clock_fini,
444 },
445};
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 3618ac6b6316..32e7064b819b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -58,8 +58,8 @@ struct nouveau_plane {
58}; 58};
59 59
60static uint32_t formats[] = { 60static uint32_t formats[] = {
61 DRM_FORMAT_NV12,
62 DRM_FORMAT_UYVY, 61 DRM_FORMAT_UYVY,
62 DRM_FORMAT_NV12,
63}; 63};
64 64
65/* Sine can be approximated with 65/* Sine can be approximated with
@@ -99,13 +99,28 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
99 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 99 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
100 struct nouveau_bo *cur = nv_plane->cur; 100 struct nouveau_bo *cur = nv_plane->cur;
101 bool flip = nv_plane->flip; 101 bool flip = nv_plane->flip;
102 int format = ALIGN(src_w * 4, 0x100);
103 int soff = NV_PCRTC0_SIZE * nv_crtc->index; 102 int soff = NV_PCRTC0_SIZE * nv_crtc->index;
104 int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index; 103 int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
105 int ret; 104 int format, ret;
105
106 /* Source parameters given in 16.16 fixed point, ignore fractional. */
107 src_x >>= 16;
108 src_y >>= 16;
109 src_w >>= 16;
110 src_h >>= 16;
111
112 format = ALIGN(src_w * 4, 0x100);
106 113
107 if (format > 0xffff) 114 if (format > 0xffff)
108 return -EINVAL; 115 return -ERANGE;
116
117 if (dev->chipset >= 0x30) {
118 if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
119 return -ERANGE;
120 } else {
121 if (crtc_w < (src_w >> 3) || crtc_h < (src_h >> 3))
122 return -ERANGE;
123 }
109 124
110 ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM); 125 ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
111 if (ret) 126 if (ret)
@@ -113,12 +128,6 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
113 128
114 nv_plane->cur = nv_fb->nvbo; 129 nv_plane->cur = nv_fb->nvbo;
115 130
116 /* Source parameters given in 16.16 fixed point, ignore fractional. */
117 src_x = src_x >> 16;
118 src_y = src_y >> 16;
119 src_w = src_w >> 16;
120 src_h = src_h >> 16;
121
122 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY); 131 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
123 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0); 132 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
124 133
@@ -245,14 +254,25 @@ nv10_overlay_init(struct drm_device *device)
245{ 254{
246 struct nouveau_device *dev = nouveau_dev(device); 255 struct nouveau_device *dev = nouveau_dev(device);
247 struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); 256 struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
257 int num_formats = ARRAY_SIZE(formats);
248 int ret; 258 int ret;
249 259
250 if (!plane) 260 if (!plane)
251 return; 261 return;
252 262
263 switch (dev->chipset) {
264 case 0x10:
265 case 0x11:
266 case 0x15:
267 case 0x1a:
268 case 0x20:
269 num_formats = 1;
270 break;
271 }
272
253 ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */, 273 ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */,
254 &nv10_plane_funcs, 274 &nv10_plane_funcs,
255 formats, ARRAY_SIZE(formats), false); 275 formats, num_formats, false);
256 if (ret) 276 if (ret)
257 goto err; 277 goto err;
258 278
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 6828d81ed7b9..900fae01793e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -447,6 +447,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
447 if (ret) 447 if (ret)
448 goto done; 448 goto done;
449 449
450 info->offset = ntfy->node->offset;
451
450done: 452done:
451 if (ret) 453 if (ret)
452 nouveau_abi16_ntfy_fini(chan, ntfy); 454 nouveau_abi16_ntfy_fini(chan, ntfy);
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 95c740454049..ba0183fb84f3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -51,6 +51,7 @@ static struct nouveau_dsm_priv {
51 bool dsm_detected; 51 bool dsm_detected;
52 bool optimus_detected; 52 bool optimus_detected;
53 acpi_handle dhandle; 53 acpi_handle dhandle;
54 acpi_handle other_handle;
54 acpi_handle rom_handle; 55 acpi_handle rom_handle;
55} nouveau_dsm_priv; 56} nouveau_dsm_priv;
56 57
@@ -260,9 +261,10 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
260 if (!dhandle) 261 if (!dhandle)
261 return false; 262 return false;
262 263
263 if (!acpi_has_method(dhandle, "_DSM")) 264 if (!acpi_has_method(dhandle, "_DSM")) {
265 nouveau_dsm_priv.other_handle = dhandle;
264 return false; 266 return false;
265 267 }
266 if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER)) 268 if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
267 retval |= NOUVEAU_DSM_HAS_MUX; 269 retval |= NOUVEAU_DSM_HAS_MUX;
268 270
@@ -338,6 +340,16 @@ static bool nouveau_dsm_detect(void)
338 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", 340 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
339 acpi_method_name); 341 acpi_method_name);
340 nouveau_dsm_priv.dsm_detected = true; 342 nouveau_dsm_priv.dsm_detected = true;
343 /*
344 * On some systems hotplug events are generated for the device
345 * being switched off when _DSM is executed. They cause ACPI
346 * hotplug to trigger and attempt to remove the device from
347 * the system, which causes it to break down. Prevent that from
348 * happening by setting the no_hotplug flag for the involved
349 * ACPI device objects.
350 */
351 acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle);
352 acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle);
341 ret = true; 353 ret = true;
342 } 354 }
343 355
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7809d92183c4..25ea82f8def3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -608,8 +608,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
608 fence = nouveau_fence_ref(new_bo->bo.sync_obj); 608 fence = nouveau_fence_ref(new_bo->bo.sync_obj);
609 spin_unlock(&new_bo->bo.bdev->fence_lock); 609 spin_unlock(&new_bo->bo.bdev->fence_lock);
610 ret = nouveau_fence_sync(fence, chan); 610 ret = nouveau_fence_sync(fence, chan);
611 nouveau_fence_unref(&fence);
611 if (ret) 612 if (ret)
612 return ret; 613 goto fail_free;
613 614
614 if (new_bo != old_bo) { 615 if (new_bo != old_bo) {
615 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); 616 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
@@ -701,7 +702,7 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
701 702
702 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); 703 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
703 if (s->event) 704 if (s->event)
704 drm_send_vblank_event(dev, -1, s->event); 705 drm_send_vblank_event(dev, s->crtc, s->event);
705 706
706 list_del(&s->head); 707 list_del(&s->head);
707 if (ps) 708 if (ps)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 7a3759f1c41a..98a22e6e27a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -858,6 +858,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
858 if (nouveau_runtime_pm == 0) 858 if (nouveau_runtime_pm == 0)
859 return -EINVAL; 859 return -EINVAL;
860 860
861 /* are we optimus enabled? */
862 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
863 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
864 return -EINVAL;
865 }
866
861 nv_debug_level(SILENT); 867 nv_debug_level(SILENT);
862 drm_kms_helper_poll_disable(drm_dev); 868 drm_kms_helper_poll_disable(drm_dev);
863 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); 869 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 38a4db5bfe21..4aff04fa483c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -630,7 +630,6 @@ error:
630 hwmon->hwmon = NULL; 630 hwmon->hwmon = NULL;
631 return ret; 631 return ret;
632#else 632#else
633 hwmon->hwmon = NULL;
634 return 0; 633 return 0;
635#endif 634#endif
636} 635}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f8e66c08b11a..4e384a2f99c3 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1265,7 +1265,7 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
1265 uint32_t start, uint32_t size) 1265 uint32_t start, uint32_t size)
1266{ 1266{
1267 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 1267 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1268 u32 end = max(start + size, (u32)256); 1268 u32 end = min_t(u32, start + size, 256);
1269 u32 i; 1269 u32 i;
1270 1270
1271 for (i = start; i < end; i++) { 1271 for (i = start; i < end; i++) {
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index 037d324bf58f..66ac0ff95f5a 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -8,5 +8,6 @@ config DRM_QXL
8 select DRM_KMS_HELPER 8 select DRM_KMS_HELPER
9 select DRM_KMS_FB_HELPER 9 select DRM_KMS_FB_HELPER
10 select DRM_TTM 10 select DRM_TTM
11 select CRC32
11 help 12 help
12 QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting. 13 QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 5e827c29d194..d70aafb83307 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -24,7 +24,7 @@
24 */ 24 */
25 25
26 26
27#include "linux/crc32.h" 27#include <linux/crc32.h>
28 28
29#include "qxl_drv.h" 29#include "qxl_drv.h"
30#include "qxl_object.h" 30#include "qxl_object.h"
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 0109a9644cb2..821ab7b9409b 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -92,6 +92,7 @@ qxl_release_free(struct qxl_device *qdev,
92 - DRM_FILE_OFFSET); 92 - DRM_FILE_OFFSET);
93 qxl_fence_remove_release(&bo->fence, release->id); 93 qxl_fence_remove_release(&bo->fence, release->id);
94 qxl_bo_unref(&bo); 94 qxl_bo_unref(&bo);
95 kfree(entry);
95 } 96 }
96 spin_lock(&qdev->release_idr_lock); 97 spin_lock(&qdev->release_idr_lock);
97 idr_remove(&qdev->release_idr, release->id); 98 idr_remove(&qdev->release_idr, release->id);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 80a20120e625..0b9621c9aeea 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1143,31 +1143,53 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1143 } 1143 }
1144 1144
1145 if (tiling_flags & RADEON_TILING_MACRO) { 1145 if (tiling_flags & RADEON_TILING_MACRO) {
1146 if (rdev->family >= CHIP_BONAIRE) 1146 evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
1147 tmp = rdev->config.cik.tile_config;
1148 else if (rdev->family >= CHIP_TAHITI)
1149 tmp = rdev->config.si.tile_config;
1150 else if (rdev->family >= CHIP_CAYMAN)
1151 tmp = rdev->config.cayman.tile_config;
1152 else
1153 tmp = rdev->config.evergreen.tile_config;
1154 1147
1155 switch ((tmp & 0xf0) >> 4) { 1148 /* Set NUM_BANKS. */
1156 case 0: /* 4 banks */ 1149 if (rdev->family >= CHIP_BONAIRE) {
1157 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK); 1150 unsigned tileb, index, num_banks, tile_split_bytes;
1158 break; 1151
1159 case 1: /* 8 banks */ 1152 /* Calculate the macrotile mode index. */
1160 default: 1153 tile_split_bytes = 64 << tile_split;
1161 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK); 1154 tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
1162 break; 1155 tileb = min(tile_split_bytes, tileb);
1163 case 2: /* 16 banks */ 1156
1164 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK); 1157 for (index = 0; tileb > 64; index++) {
1165 break; 1158 tileb >>= 1;
1159 }
1160
1161 if (index >= 16) {
1162 DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
1163 target_fb->bits_per_pixel, tile_split);
1164 return -EINVAL;
1165 }
1166
1167 num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
1168 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
1169 } else {
1170 /* SI and older. */
1171 if (rdev->family >= CHIP_TAHITI)
1172 tmp = rdev->config.si.tile_config;
1173 else if (rdev->family >= CHIP_CAYMAN)
1174 tmp = rdev->config.cayman.tile_config;
1175 else
1176 tmp = rdev->config.evergreen.tile_config;
1177
1178 switch ((tmp & 0xf0) >> 4) {
1179 case 0: /* 4 banks */
1180 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
1181 break;
1182 case 1: /* 8 banks */
1183 default:
1184 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
1185 break;
1186 case 2: /* 16 banks */
1187 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
1188 break;
1189 }
1166 } 1190 }
1167 1191
1168 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); 1192 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
1169
1170 evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
1171 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split); 1193 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
1172 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); 1194 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
1173 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); 1195 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
@@ -1180,23 +1202,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1180 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); 1202 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
1181 1203
1182 if (rdev->family >= CHIP_BONAIRE) { 1204 if (rdev->family >= CHIP_BONAIRE) {
1183 u32 num_pipe_configs = rdev->config.cik.max_tile_pipes; 1205 /* Read the pipe config from the 2D TILED SCANOUT mode.
1184 u32 num_rb = rdev->config.cik.max_backends_per_se; 1206 * It should be the same for the other modes too, but not all
1185 if (num_pipe_configs > 8) 1207 * modes set the pipe config field. */
1186 num_pipe_configs = 8; 1208 u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f;
1187 if (num_pipe_configs == 8) 1209
1188 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16); 1210 fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config);
1189 else if (num_pipe_configs == 4) {
1190 if (num_rb == 4)
1191 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16);
1192 else if (num_rb < 4)
1193 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16);
1194 } else if (num_pipe_configs == 2)
1195 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2);
1196 } else if ((rdev->family == CHIP_TAHITI) || 1211 } else if ((rdev->family == CHIP_TAHITI) ||
1197 (rdev->family == CHIP_PITCAIRN)) 1212 (rdev->family == CHIP_PITCAIRN))
1198 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); 1213 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
1199 else if (rdev->family == CHIP_VERDE) 1214 else if ((rdev->family == CHIP_VERDE) ||
1215 (rdev->family == CHIP_OLAND) ||
1216 (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
1200 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16); 1217 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
1201 1218
1202 switch (radeon_crtc->crtc_id) { 1219 switch (radeon_crtc->crtc_id) {
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index 0652ee0a2098..f685035dbe39 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -44,7 +44,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
44 PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; 44 PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
45 int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); 45 int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
46 unsigned char *base; 46 unsigned char *base;
47 u16 out; 47 u16 out = cpu_to_le16(0);
48 48
49 memset(&args, 0, sizeof(args)); 49 memset(&args, 0, sizeof(args));
50 50
@@ -55,11 +55,14 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
55 DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num); 55 DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
56 return -EINVAL; 56 return -EINVAL;
57 } 57 }
58 args.ucRegIndex = buf[0]; 58 if (buf == NULL)
59 if (num > 1) { 59 args.ucRegIndex = 0;
60 else
61 args.ucRegIndex = buf[0];
62 if (num)
60 num--; 63 num--;
64 if (num)
61 memcpy(&out, &buf[1], num); 65 memcpy(&out, &buf[1], num);
62 }
63 args.lpI2CDataOut = cpu_to_le16(out); 66 args.lpI2CDataOut = cpu_to_le16(out);
64 } else { 67 } else {
65 if (num > ATOM_MAX_HW_I2C_READ) { 68 if (num > ATOM_MAX_HW_I2C_READ) {
@@ -96,14 +99,14 @@ int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
96 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); 99 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
97 struct i2c_msg *p; 100 struct i2c_msg *p;
98 int i, remaining, current_count, buffer_offset, max_bytes, ret; 101 int i, remaining, current_count, buffer_offset, max_bytes, ret;
99 u8 buf = 0, flags; 102 u8 flags;
100 103
101 /* check for bus probe */ 104 /* check for bus probe */
102 p = &msgs[0]; 105 p = &msgs[0];
103 if ((num == 1) && (p->len == 0)) { 106 if ((num == 1) && (p->len == 0)) {
104 ret = radeon_process_i2c_ch(i2c, 107 ret = radeon_process_i2c_ch(i2c,
105 p->addr, HW_I2C_WRITE, 108 p->addr, HW_I2C_WRITE,
106 &buf, 1); 109 NULL, 0);
107 if (ret) 110 if (ret)
108 return ret; 111 return ret;
109 else 112 else
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index b43a3a3c9067..e950fabd7f5e 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3057,7 +3057,7 @@ static u32 cik_create_bitmask(u32 bit_width)
3057 * Returns the disabled RB bitmask. 3057 * Returns the disabled RB bitmask.
3058 */ 3058 */
3059static u32 cik_get_rb_disabled(struct radeon_device *rdev, 3059static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3060 u32 max_rb_num, u32 se_num, 3060 u32 max_rb_num_per_se,
3061 u32 sh_per_se) 3061 u32 sh_per_se)
3062{ 3062{
3063 u32 data, mask; 3063 u32 data, mask;
@@ -3071,7 +3071,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3071 3071
3072 data >>= BACKEND_DISABLE_SHIFT; 3072 data >>= BACKEND_DISABLE_SHIFT;
3073 3073
3074 mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se); 3074 mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se);
3075 3075
3076 return data & mask; 3076 return data & mask;
3077} 3077}
@@ -3088,7 +3088,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3088 */ 3088 */
3089static void cik_setup_rb(struct radeon_device *rdev, 3089static void cik_setup_rb(struct radeon_device *rdev,
3090 u32 se_num, u32 sh_per_se, 3090 u32 se_num, u32 sh_per_se,
3091 u32 max_rb_num) 3091 u32 max_rb_num_per_se)
3092{ 3092{
3093 int i, j; 3093 int i, j;
3094 u32 data, mask; 3094 u32 data, mask;
@@ -3098,7 +3098,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
3098 for (i = 0; i < se_num; i++) { 3098 for (i = 0; i < se_num; i++) {
3099 for (j = 0; j < sh_per_se; j++) { 3099 for (j = 0; j < sh_per_se; j++) {
3100 cik_select_se_sh(rdev, i, j); 3100 cik_select_se_sh(rdev, i, j);
3101 data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); 3101 data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
3102 if (rdev->family == CHIP_HAWAII) 3102 if (rdev->family == CHIP_HAWAII)
3103 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH); 3103 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
3104 else 3104 else
@@ -3108,12 +3108,14 @@ static void cik_setup_rb(struct radeon_device *rdev,
3108 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 3108 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3109 3109
3110 mask = 1; 3110 mask = 1;
3111 for (i = 0; i < max_rb_num; i++) { 3111 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
3112 if (!(disabled_rbs & mask)) 3112 if (!(disabled_rbs & mask))
3113 enabled_rbs |= mask; 3113 enabled_rbs |= mask;
3114 mask <<= 1; 3114 mask <<= 1;
3115 } 3115 }
3116 3116
3117 rdev->config.cik.backend_enable_mask = enabled_rbs;
3118
3117 for (i = 0; i < se_num; i++) { 3119 for (i = 0; i < se_num; i++) {
3118 cik_select_se_sh(rdev, i, 0xffffffff); 3120 cik_select_se_sh(rdev, i, 0xffffffff);
3119 data = 0; 3121 data = 0;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 0300727a4f70..d08b83c6267b 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -458,7 +458,7 @@ int cik_copy_dma(struct radeon_device *rdev,
458 radeon_ring_write(ring, 0); /* src/dst endian swap */ 458 radeon_ring_write(ring, 0); /* src/dst endian swap */
459 radeon_ring_write(ring, src_offset & 0xffffffff); 459 radeon_ring_write(ring, src_offset & 0xffffffff);
460 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); 460 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
461 radeon_ring_write(ring, dst_offset & 0xfffffffc); 461 radeon_ring_write(ring, dst_offset & 0xffffffff);
462 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); 462 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
463 src_offset += cur_size_in_bytes; 463 src_offset += cur_size_in_bytes;
464 dst_offset += cur_size_in_bytes; 464 dst_offset += cur_size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 009f46e0ce72..713a5d359901 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -93,11 +93,13 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
93 struct radeon_device *rdev = encoder->dev->dev_private; 93 struct radeon_device *rdev = encoder->dev->dev_private;
94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
96 u32 offset = dig->afmt->offset; 96 u32 offset;
97 97
98 if (!dig->afmt->pin) 98 if (!dig || !dig->afmt || !dig->afmt->pin)
99 return; 99 return;
100 100
101 offset = dig->afmt->offset;
102
101 WREG32(AFMT_AUDIO_SRC_CONTROL + offset, 103 WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
102 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id)); 104 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
103} 105}
@@ -112,7 +114,7 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
112 struct radeon_connector *radeon_connector = NULL; 114 struct radeon_connector *radeon_connector = NULL;
113 u32 tmp = 0, offset; 115 u32 tmp = 0, offset;
114 116
115 if (!dig->afmt->pin) 117 if (!dig || !dig->afmt || !dig->afmt->pin)
116 return; 118 return;
117 119
118 offset = dig->afmt->pin->offset; 120 offset = dig->afmt->pin->offset;
@@ -156,7 +158,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
156 u8 *sadb; 158 u8 *sadb;
157 int sad_count; 159 int sad_count;
158 160
159 if (!dig->afmt->pin) 161 if (!dig || !dig->afmt || !dig->afmt->pin)
160 return; 162 return;
161 163
162 offset = dig->afmt->pin->offset; 164 offset = dig->afmt->pin->offset;
@@ -172,7 +174,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
172 } 174 }
173 175
174 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); 176 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
175 if (sad_count < 0) { 177 if (sad_count <= 0) {
176 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 178 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
177 return; 179 return;
178 } 180 }
@@ -217,7 +219,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
217 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 219 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
218 }; 220 };
219 221
220 if (!dig->afmt->pin) 222 if (!dig || !dig->afmt || !dig->afmt->pin)
221 return; 223 return;
222 224
223 offset = dig->afmt->pin->offset; 225 offset = dig->afmt->pin->offset;
@@ -233,7 +235,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
233 } 235 }
234 236
235 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); 237 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
236 if (sad_count < 0) { 238 if (sad_count <= 0) {
237 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 239 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
238 return; 240 return;
239 } 241 }
@@ -306,7 +308,9 @@ int dce6_audio_init(struct radeon_device *rdev)
306 rdev->audio.enabled = true; 308 rdev->audio.enabled = true;
307 309
308 if (ASIC_IS_DCE8(rdev)) 310 if (ASIC_IS_DCE8(rdev))
309 rdev->audio.num_pins = 7; 311 rdev->audio.num_pins = 6;
312 else if (ASIC_IS_DCE61(rdev))
313 rdev->audio.num_pins = 4;
310 else 314 else
311 rdev->audio.num_pins = 6; 315 rdev->audio.num_pins = 6;
312 316
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index aa695c4feb3d..0c6d5cef4cf1 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -118,7 +118,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
118 } 118 }
119 119
120 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); 120 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
121 if (sad_count < 0) { 121 if (sad_count <= 0) {
122 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 122 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
123 return; 123 return;
124 } 124 }
@@ -173,7 +173,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
173 } 173 }
174 174
175 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); 175 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
176 if (sad_count < 0) { 176 if (sad_count <= 0) {
177 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 177 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
178 return; 178 return;
179 } 179 }
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 11aab2ab54ce..f59a9e9fccf8 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -895,6 +895,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
895 (rdev->pdev->device == 0x999C)) { 895 (rdev->pdev->device == 0x999C)) {
896 rdev->config.cayman.max_simds_per_se = 6; 896 rdev->config.cayman.max_simds_per_se = 6;
897 rdev->config.cayman.max_backends_per_se = 2; 897 rdev->config.cayman.max_backends_per_se = 2;
898 rdev->config.cayman.max_hw_contexts = 8;
899 rdev->config.cayman.sx_max_export_size = 256;
900 rdev->config.cayman.sx_max_export_pos_size = 64;
901 rdev->config.cayman.sx_max_export_smx_size = 192;
898 } else if ((rdev->pdev->device == 0x9903) || 902 } else if ((rdev->pdev->device == 0x9903) ||
899 (rdev->pdev->device == 0x9904) || 903 (rdev->pdev->device == 0x9904) ||
900 (rdev->pdev->device == 0x990A) || 904 (rdev->pdev->device == 0x990A) ||
@@ -905,6 +909,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
905 (rdev->pdev->device == 0x999D)) { 909 (rdev->pdev->device == 0x999D)) {
906 rdev->config.cayman.max_simds_per_se = 4; 910 rdev->config.cayman.max_simds_per_se = 4;
907 rdev->config.cayman.max_backends_per_se = 2; 911 rdev->config.cayman.max_backends_per_se = 2;
912 rdev->config.cayman.max_hw_contexts = 8;
913 rdev->config.cayman.sx_max_export_size = 256;
914 rdev->config.cayman.sx_max_export_pos_size = 64;
915 rdev->config.cayman.sx_max_export_smx_size = 192;
908 } else if ((rdev->pdev->device == 0x9919) || 916 } else if ((rdev->pdev->device == 0x9919) ||
909 (rdev->pdev->device == 0x9990) || 917 (rdev->pdev->device == 0x9990) ||
910 (rdev->pdev->device == 0x9991) || 918 (rdev->pdev->device == 0x9991) ||
@@ -915,9 +923,17 @@ static void cayman_gpu_init(struct radeon_device *rdev)
915 (rdev->pdev->device == 0x99A0)) { 923 (rdev->pdev->device == 0x99A0)) {
916 rdev->config.cayman.max_simds_per_se = 3; 924 rdev->config.cayman.max_simds_per_se = 3;
917 rdev->config.cayman.max_backends_per_se = 1; 925 rdev->config.cayman.max_backends_per_se = 1;
926 rdev->config.cayman.max_hw_contexts = 4;
927 rdev->config.cayman.sx_max_export_size = 128;
928 rdev->config.cayman.sx_max_export_pos_size = 32;
929 rdev->config.cayman.sx_max_export_smx_size = 96;
918 } else { 930 } else {
919 rdev->config.cayman.max_simds_per_se = 2; 931 rdev->config.cayman.max_simds_per_se = 2;
920 rdev->config.cayman.max_backends_per_se = 1; 932 rdev->config.cayman.max_backends_per_se = 1;
933 rdev->config.cayman.max_hw_contexts = 4;
934 rdev->config.cayman.sx_max_export_size = 128;
935 rdev->config.cayman.sx_max_export_pos_size = 32;
936 rdev->config.cayman.sx_max_export_smx_size = 96;
921 } 937 }
922 rdev->config.cayman.max_texture_channel_caches = 2; 938 rdev->config.cayman.max_texture_channel_caches = 2;
923 rdev->config.cayman.max_gprs = 256; 939 rdev->config.cayman.max_gprs = 256;
@@ -925,10 +941,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
925 rdev->config.cayman.max_gs_threads = 32; 941 rdev->config.cayman.max_gs_threads = 32;
926 rdev->config.cayman.max_stack_entries = 512; 942 rdev->config.cayman.max_stack_entries = 512;
927 rdev->config.cayman.sx_num_of_sets = 8; 943 rdev->config.cayman.sx_num_of_sets = 8;
928 rdev->config.cayman.sx_max_export_size = 256;
929 rdev->config.cayman.sx_max_export_pos_size = 64;
930 rdev->config.cayman.sx_max_export_smx_size = 192;
931 rdev->config.cayman.max_hw_contexts = 8;
932 rdev->config.cayman.sq_num_cf_insts = 2; 944 rdev->config.cayman.sq_num_cf_insts = 2;
933 945
934 rdev->config.cayman.sc_prim_fifo_size = 0x40; 946 rdev->config.cayman.sc_prim_fifo_size = 0x40;
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index cdc003085a76..49c4d48f54d6 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -785,8 +785,8 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
785 struct ni_ps *ps = ni_get_ps(rps); 785 struct ni_ps *ps = ni_get_ps(rps);
786 struct radeon_clock_and_voltage_limits *max_limits; 786 struct radeon_clock_and_voltage_limits *max_limits;
787 bool disable_mclk_switching; 787 bool disable_mclk_switching;
788 u32 mclk, sclk; 788 u32 mclk;
789 u16 vddc, vddci; 789 u16 vddci;
790 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; 790 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
791 int i; 791 int i;
792 792
@@ -839,24 +839,14 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
839 839
840 /* XXX validate the min clocks required for display */ 840 /* XXX validate the min clocks required for display */
841 841
842 /* adjust low state */
842 if (disable_mclk_switching) { 843 if (disable_mclk_switching) {
843 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; 844 ps->performance_levels[0].mclk =
844 sclk = ps->performance_levels[0].sclk; 845 ps->performance_levels[ps->performance_level_count - 1].mclk;
845 vddc = ps->performance_levels[0].vddc; 846 ps->performance_levels[0].vddci =
846 vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; 847 ps->performance_levels[ps->performance_level_count - 1].vddci;
847 } else {
848 sclk = ps->performance_levels[0].sclk;
849 mclk = ps->performance_levels[0].mclk;
850 vddc = ps->performance_levels[0].vddc;
851 vddci = ps->performance_levels[0].vddci;
852 } 848 }
853 849
854 /* adjusted low state */
855 ps->performance_levels[0].sclk = sclk;
856 ps->performance_levels[0].mclk = mclk;
857 ps->performance_levels[0].vddc = vddc;
858 ps->performance_levels[0].vddci = vddci;
859
860 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk, 850 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
861 &ps->performance_levels[0].sclk, 851 &ps->performance_levels[0].sclk,
862 &ps->performance_levels[0].mclk); 852 &ps->performance_levels[0].mclk);
@@ -868,11 +858,15 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
868 ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; 858 ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
869 } 859 }
870 860
861 /* adjust remaining states */
871 if (disable_mclk_switching) { 862 if (disable_mclk_switching) {
872 mclk = ps->performance_levels[0].mclk; 863 mclk = ps->performance_levels[0].mclk;
864 vddci = ps->performance_levels[0].vddci;
873 for (i = 1; i < ps->performance_level_count; i++) { 865 for (i = 1; i < ps->performance_level_count; i++) {
874 if (mclk < ps->performance_levels[i].mclk) 866 if (mclk < ps->performance_levels[i].mclk)
875 mclk = ps->performance_levels[i].mclk; 867 mclk = ps->performance_levels[i].mclk;
868 if (vddci < ps->performance_levels[i].vddci)
869 vddci = ps->performance_levels[i].vddci;
876 } 870 }
877 for (i = 0; i < ps->performance_level_count; i++) { 871 for (i = 0; i < ps->performance_level_count; i++) {
878 ps->performance_levels[i].mclk = mclk; 872 ps->performance_levels[i].mclk = mclk;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 4b89262f3f0e..b7d3ecba43e3 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -304,9 +304,9 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
304 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); 304 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
305 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ 305 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
306 } 306 }
307 } else if (ASIC_IS_DCE3(rdev)) { 307 } else {
308 /* according to the reg specs, this should DCE3.2 only, but in 308 /* according to the reg specs, this should DCE3.2 only, but in
309 * practice it seems to cover DCE3.0/3.1 as well. 309 * practice it seems to cover DCE2.0/3.0/3.1 as well.
310 */ 310 */
311 if (dig->dig_encoder == 0) { 311 if (dig->dig_encoder == 0) {
312 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); 312 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
@@ -317,10 +317,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
317 WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100); 317 WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
318 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ 318 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
319 } 319 }
320 } else {
321 /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
322 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
323 AUDIO_DTO_MODULE(clock / 10));
324 } 320 }
325} 321}
326 322
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ecf2a3960c07..45e1f447bc79 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1940,7 +1940,7 @@ struct si_asic {
1940 unsigned sc_earlyz_tile_fifo_size; 1940 unsigned sc_earlyz_tile_fifo_size;
1941 1941
1942 unsigned num_tile_pipes; 1942 unsigned num_tile_pipes;
1943 unsigned num_backends_per_se; 1943 unsigned backend_enable_mask;
1944 unsigned backend_disable_mask_per_asic; 1944 unsigned backend_disable_mask_per_asic;
1945 unsigned backend_map; 1945 unsigned backend_map;
1946 unsigned num_texture_channel_caches; 1946 unsigned num_texture_channel_caches;
@@ -1970,7 +1970,7 @@ struct cik_asic {
1970 unsigned sc_earlyz_tile_fifo_size; 1970 unsigned sc_earlyz_tile_fifo_size;
1971 1971
1972 unsigned num_tile_pipes; 1972 unsigned num_tile_pipes;
1973 unsigned num_backends_per_se; 1973 unsigned backend_enable_mask;
1974 unsigned backend_disable_mask_per_asic; 1974 unsigned backend_disable_mask_per_asic;
1975 unsigned backend_map; 1975 unsigned backend_map;
1976 unsigned num_texture_channel_caches; 1976 unsigned num_texture_channel_caches;
@@ -2710,10 +2710,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
2710 struct radeon_vm *vm, 2710 struct radeon_vm *vm,
2711 struct radeon_fence *fence); 2711 struct radeon_fence *fence);
2712uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); 2712uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
2713int radeon_vm_bo_update_pte(struct radeon_device *rdev, 2713int radeon_vm_bo_update(struct radeon_device *rdev,
2714 struct radeon_vm *vm, 2714 struct radeon_vm *vm,
2715 struct radeon_bo *bo, 2715 struct radeon_bo *bo,
2716 struct ttm_mem_reg *mem); 2716 struct ttm_mem_reg *mem);
2717void radeon_vm_bo_invalidate(struct radeon_device *rdev, 2717void radeon_vm_bo_invalidate(struct radeon_device *rdev,
2718 struct radeon_bo *bo); 2718 struct radeon_bo *bo);
2719struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, 2719struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e354ce94cdd1..c0425bb6223a 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2021,7 +2021,7 @@ static struct radeon_asic ci_asic = {
2021 .hdmi_setmode = &evergreen_hdmi_setmode, 2021 .hdmi_setmode = &evergreen_hdmi_setmode,
2022 }, 2022 },
2023 .copy = { 2023 .copy = {
2024 .blit = NULL, 2024 .blit = &cik_copy_cpdma,
2025 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2025 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2026 .dma = &cik_copy_dma, 2026 .dma = &cik_copy_dma,
2027 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 2027 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2122,7 +2122,7 @@ static struct radeon_asic kv_asic = {
2122 .hdmi_setmode = &evergreen_hdmi_setmode, 2122 .hdmi_setmode = &evergreen_hdmi_setmode,
2123 }, 2123 },
2124 .copy = { 2124 .copy = {
2125 .blit = NULL, 2125 .blit = &cik_copy_cpdma,
2126 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2126 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2127 .dma = &cik_copy_dma, 2127 .dma = &cik_copy_dma,
2128 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 2128 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f79ee184ffd5..5c39bf7c3d88 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2918,7 +2918,7 @@ int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
2918 mpll_param->dll_speed = args.ucDllSpeed; 2918 mpll_param->dll_speed = args.ucDllSpeed;
2919 mpll_param->bwcntl = args.ucBWCntl; 2919 mpll_param->bwcntl = args.ucBWCntl;
2920 mpll_param->vco_mode = 2920 mpll_param->vco_mode =
2921 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK) ? 1 : 0; 2921 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK);
2922 mpll_param->yclk_sel = 2922 mpll_param->yclk_sel =
2923 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0; 2923 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0;
2924 mpll_param->qdr = 2924 mpll_param->qdr =
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 9d302eaeea15..485848f889f5 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -33,6 +33,7 @@ static struct radeon_atpx_priv {
33 bool atpx_detected; 33 bool atpx_detected;
34 /* handle for device - and atpx */ 34 /* handle for device - and atpx */
35 acpi_handle dhandle; 35 acpi_handle dhandle;
36 acpi_handle other_handle;
36 struct radeon_atpx atpx; 37 struct radeon_atpx atpx;
37} radeon_atpx_priv; 38} radeon_atpx_priv;
38 39
@@ -451,9 +452,10 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
451 return false; 452 return false;
452 453
453 status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); 454 status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
454 if (ACPI_FAILURE(status)) 455 if (ACPI_FAILURE(status)) {
456 radeon_atpx_priv.other_handle = dhandle;
455 return false; 457 return false;
456 458 }
457 radeon_atpx_priv.dhandle = dhandle; 459 radeon_atpx_priv.dhandle = dhandle;
458 radeon_atpx_priv.atpx.handle = atpx_handle; 460 radeon_atpx_priv.atpx.handle = atpx_handle;
459 return true; 461 return true;
@@ -530,6 +532,16 @@ static bool radeon_atpx_detect(void)
530 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", 532 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
531 acpi_method_name); 533 acpi_method_name);
532 radeon_atpx_priv.atpx_detected = true; 534 radeon_atpx_priv.atpx_detected = true;
535 /*
536 * On some systems hotplug events are generated for the device
537 * being switched off when ATPX is executed. They cause ACPI
538 * hotplug to trigger and attempt to remove the device from
539 * the system, which causes it to break down. Prevent that from
540 * happening by setting the no_hotplug flag for the involved
541 * ACPI device objects.
542 */
543 acpi_bus_no_hotplug(radeon_atpx_priv.dhandle);
544 acpi_bus_no_hotplug(radeon_atpx_priv.other_handle);
533 return true; 545 return true;
534 } 546 }
535 return false; 547 return false;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index f41594b2eeac..0b366169d64d 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -360,13 +360,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
360 struct radeon_bo *bo; 360 struct radeon_bo *bo;
361 int r; 361 int r;
362 362
363 r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem); 363 r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
364 if (r) { 364 if (r) {
365 return r; 365 return r;
366 } 366 }
367 list_for_each_entry(lobj, &parser->validated, tv.head) { 367 list_for_each_entry(lobj, &parser->validated, tv.head) {
368 bo = lobj->bo; 368 bo = lobj->bo;
369 r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem); 369 r = radeon_vm_bo_update(parser->rdev, vm, bo, &bo->tbo.mem);
370 if (r) { 370 if (r) {
371 return r; 371 return r;
372 } 372 }
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 9f5ff28864f6..db39ea36bf22 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -77,9 +77,10 @@
77 * 2.33.0 - Add SI tiling mode array query 77 * 2.33.0 - Add SI tiling mode array query
78 * 2.34.0 - Add CIK tiling mode array query 78 * 2.34.0 - Add CIK tiling mode array query
79 * 2.35.0 - Add CIK macrotile mode array query 79 * 2.35.0 - Add CIK macrotile mode array query
80 * 2.36.0 - Fix CIK DCE tiling setup
80 */ 81 */
81#define KMS_DRIVER_MAJOR 2 82#define KMS_DRIVER_MAJOR 2
82#define KMS_DRIVER_MINOR 35 83#define KMS_DRIVER_MINOR 36
83#define KMS_DRIVER_PATCHLEVEL 0 84#define KMS_DRIVER_PATCHLEVEL 0
84int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 85int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
85int radeon_driver_unload_kms(struct drm_device *dev); 86int radeon_driver_unload_kms(struct drm_device *dev);
@@ -508,15 +509,6 @@ static const struct file_operations radeon_driver_kms_fops = {
508#endif 509#endif
509}; 510};
510 511
511
512static void
513radeon_pci_shutdown(struct pci_dev *pdev)
514{
515 struct drm_device *dev = pci_get_drvdata(pdev);
516
517 radeon_driver_unload_kms(dev);
518}
519
520static struct drm_driver kms_driver = { 512static struct drm_driver kms_driver = {
521 .driver_features = 513 .driver_features =
522 DRIVER_USE_AGP | 514 DRIVER_USE_AGP |
@@ -586,7 +578,6 @@ static struct pci_driver radeon_kms_pci_driver = {
586 .probe = radeon_pci_probe, 578 .probe = radeon_pci_probe,
587 .remove = radeon_pci_remove, 579 .remove = radeon_pci_remove,
588 .driver.pm = &radeon_pm_ops, 580 .driver.pm = &radeon_pm_ops,
589 .shutdown = radeon_pci_shutdown,
590}; 581};
591 582
592static int __init radeon_init(void) 583static int __init radeon_init(void)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 543dcfae7e6f..00e0d449021c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -108,9 +108,10 @@
108 * 1.31- Add support for num Z pipes from GET_PARAM 108 * 1.31- Add support for num Z pipes from GET_PARAM
109 * 1.32- fixes for rv740 setup 109 * 1.32- fixes for rv740 setup
110 * 1.33- Add r6xx/r7xx const buffer support 110 * 1.33- Add r6xx/r7xx const buffer support
111 * 1.34- fix evergreen/cayman GS register
111 */ 112 */
112#define DRIVER_MAJOR 1 113#define DRIVER_MAJOR 1
113#define DRIVER_MINOR 33 114#define DRIVER_MINOR 34
114#define DRIVER_PATCHLEVEL 0 115#define DRIVER_PATCHLEVEL 0
115 116
116long radeon_drm_ioctl(struct file *filp, 117long radeon_drm_ioctl(struct file *filp,
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 3044e504f4ec..96e440061bdb 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -29,6 +29,7 @@
29#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
30#include "radeon.h" 30#include "radeon.h"
31#include "radeon_reg.h" 31#include "radeon_reg.h"
32#include "radeon_trace.h"
32 33
33/* 34/*
34 * GART 35 * GART
@@ -737,6 +738,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
737 for (i = 0; i < 2; ++i) { 738 for (i = 0; i < 2; ++i) {
738 if (choices[i]) { 739 if (choices[i]) {
739 vm->id = choices[i]; 740 vm->id = choices[i];
741 trace_radeon_vm_grab_id(vm->id, ring);
740 return rdev->vm_manager.active[choices[i]]; 742 return rdev->vm_manager.active[choices[i]];
741 } 743 }
742 } 744 }
@@ -1116,7 +1118,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
1116} 1118}
1117 1119
1118/** 1120/**
1119 * radeon_vm_bo_update_pte - map a bo into the vm page table 1121 * radeon_vm_bo_update - map a bo into the vm page table
1120 * 1122 *
1121 * @rdev: radeon_device pointer 1123 * @rdev: radeon_device pointer
1122 * @vm: requested vm 1124 * @vm: requested vm
@@ -1128,10 +1130,10 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
1128 * 1130 *
1129 * Object have to be reserved & global and local mutex must be locked! 1131 * Object have to be reserved & global and local mutex must be locked!
1130 */ 1132 */
1131int radeon_vm_bo_update_pte(struct radeon_device *rdev, 1133int radeon_vm_bo_update(struct radeon_device *rdev,
1132 struct radeon_vm *vm, 1134 struct radeon_vm *vm,
1133 struct radeon_bo *bo, 1135 struct radeon_bo *bo,
1134 struct ttm_mem_reg *mem) 1136 struct ttm_mem_reg *mem)
1135{ 1137{
1136 struct radeon_ib ib; 1138 struct radeon_ib ib;
1137 struct radeon_bo_va *bo_va; 1139 struct radeon_bo_va *bo_va;
@@ -1176,6 +1178,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1176 bo_va->valid = false; 1178 bo_va->valid = false;
1177 } 1179 }
1178 1180
1181 trace_radeon_vm_bo_update(bo_va);
1182
1179 nptes = radeon_bo_ngpu_pages(bo); 1183 nptes = radeon_bo_ngpu_pages(bo);
1180 1184
1181 /* assume two extra pdes in case the mapping overlaps the borders */ 1185 /* assume two extra pdes in case the mapping overlaps the borders */
@@ -1257,7 +1261,7 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
1257 mutex_lock(&rdev->vm_manager.lock); 1261 mutex_lock(&rdev->vm_manager.lock);
1258 mutex_lock(&bo_va->vm->mutex); 1262 mutex_lock(&bo_va->vm->mutex);
1259 if (bo_va->soffset) { 1263 if (bo_va->soffset) {
1260 r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); 1264 r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
1261 } 1265 }
1262 mutex_unlock(&rdev->vm_manager.lock); 1266 mutex_unlock(&rdev->vm_manager.lock);
1263 list_del(&bo_va->vm_list); 1267 list_del(&bo_va->vm_list);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 55d0b474bd37..21d593c0ecaf 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -461,6 +461,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
461 case RADEON_INFO_SI_CP_DMA_COMPUTE: 461 case RADEON_INFO_SI_CP_DMA_COMPUTE:
462 *value = 1; 462 *value = 1;
463 break; 463 break;
464 case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
465 if (rdev->family >= CHIP_BONAIRE) {
466 *value = rdev->config.cik.backend_enable_mask;
467 } else if (rdev->family >= CHIP_TAHITI) {
468 *value = rdev->config.si.backend_enable_mask;
469 } else {
470 DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
471 }
472 break;
464 default: 473 default:
465 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 474 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
466 return -EINVAL; 475 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index d1385ccc672c..984097b907ef 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -537,8 +537,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
537 struct device_attribute *attr, 537 struct device_attribute *attr,
538 char *buf) 538 char *buf)
539{ 539{
540 struct drm_device *ddev = dev_get_drvdata(dev); 540 struct radeon_device *rdev = dev_get_drvdata(dev);
541 struct radeon_device *rdev = ddev->dev_private;
542 int temp; 541 int temp;
543 542
544 if (rdev->asic->pm.get_temperature) 543 if (rdev->asic->pm.get_temperature)
@@ -553,8 +552,7 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
553 struct device_attribute *attr, 552 struct device_attribute *attr,
554 char *buf) 553 char *buf)
555{ 554{
556 struct drm_device *ddev = dev_get_drvdata(dev); 555 struct radeon_device *rdev = dev_get_drvdata(dev);
557 struct radeon_device *rdev = ddev->dev_private;
558 int hyst = to_sensor_dev_attr(attr)->index; 556 int hyst = to_sensor_dev_attr(attr)->index;
559 int temp; 557 int temp;
560 558
@@ -566,23 +564,14 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
566 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 564 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
567} 565}
568 566
569static ssize_t radeon_hwmon_show_name(struct device *dev,
570 struct device_attribute *attr,
571 char *buf)
572{
573 return sprintf(buf, "radeon\n");
574}
575
576static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); 567static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
577static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0); 568static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
578static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1); 569static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
579static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
580 570
581static struct attribute *hwmon_attributes[] = { 571static struct attribute *hwmon_attributes[] = {
582 &sensor_dev_attr_temp1_input.dev_attr.attr, 572 &sensor_dev_attr_temp1_input.dev_attr.attr,
583 &sensor_dev_attr_temp1_crit.dev_attr.attr, 573 &sensor_dev_attr_temp1_crit.dev_attr.attr,
584 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 574 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
585 &sensor_dev_attr_name.dev_attr.attr,
586 NULL 575 NULL
587}; 576};
588 577
@@ -590,8 +579,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
590 struct attribute *attr, int index) 579 struct attribute *attr, int index)
591{ 580{
592 struct device *dev = container_of(kobj, struct device, kobj); 581 struct device *dev = container_of(kobj, struct device, kobj);
593 struct drm_device *ddev = dev_get_drvdata(dev); 582 struct radeon_device *rdev = dev_get_drvdata(dev);
594 struct radeon_device *rdev = ddev->dev_private;
595 583
596 /* Skip limit attributes if DPM is not enabled */ 584 /* Skip limit attributes if DPM is not enabled */
597 if (rdev->pm.pm_method != PM_METHOD_DPM && 585 if (rdev->pm.pm_method != PM_METHOD_DPM &&
@@ -607,11 +595,15 @@ static const struct attribute_group hwmon_attrgroup = {
607 .is_visible = hwmon_attributes_visible, 595 .is_visible = hwmon_attributes_visible,
608}; 596};
609 597
598static const struct attribute_group *hwmon_groups[] = {
599 &hwmon_attrgroup,
600 NULL
601};
602
610static int radeon_hwmon_init(struct radeon_device *rdev) 603static int radeon_hwmon_init(struct radeon_device *rdev)
611{ 604{
612 int err = 0; 605 int err = 0;
613 606 struct device *hwmon_dev;
614 rdev->pm.int_hwmon_dev = NULL;
615 607
616 switch (rdev->pm.int_thermal_type) { 608 switch (rdev->pm.int_thermal_type) {
617 case THERMAL_TYPE_RV6XX: 609 case THERMAL_TYPE_RV6XX:
@@ -624,20 +616,13 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
624 case THERMAL_TYPE_KV: 616 case THERMAL_TYPE_KV:
625 if (rdev->asic->pm.get_temperature == NULL) 617 if (rdev->asic->pm.get_temperature == NULL)
626 return err; 618 return err;
627 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); 619 hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
628 if (IS_ERR(rdev->pm.int_hwmon_dev)) { 620 "radeon", rdev,
629 err = PTR_ERR(rdev->pm.int_hwmon_dev); 621 hwmon_groups);
622 if (IS_ERR(hwmon_dev)) {
623 err = PTR_ERR(hwmon_dev);
630 dev_err(rdev->dev, 624 dev_err(rdev->dev,
631 "Unable to register hwmon device: %d\n", err); 625 "Unable to register hwmon device: %d\n", err);
632 break;
633 }
634 dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev);
635 err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj,
636 &hwmon_attrgroup);
637 if (err) {
638 dev_err(rdev->dev,
639 "Unable to create hwmon sysfs file: %d\n", err);
640 hwmon_device_unregister(rdev->dev);
641 } 626 }
642 break; 627 break;
643 default: 628 default:
@@ -647,14 +632,6 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
647 return err; 632 return err;
648} 633}
649 634
650static void radeon_hwmon_fini(struct radeon_device *rdev)
651{
652 if (rdev->pm.int_hwmon_dev) {
653 sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup);
654 hwmon_device_unregister(rdev->pm.int_hwmon_dev);
655 }
656}
657
658static void radeon_dpm_thermal_work_handler(struct work_struct *work) 635static void radeon_dpm_thermal_work_handler(struct work_struct *work)
659{ 636{
660 struct radeon_device *rdev = 637 struct radeon_device *rdev =
@@ -1337,8 +1314,6 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
1337 1314
1338 if (rdev->pm.power_state) 1315 if (rdev->pm.power_state)
1339 kfree(rdev->pm.power_state); 1316 kfree(rdev->pm.power_state);
1340
1341 radeon_hwmon_fini(rdev);
1342} 1317}
1343 1318
1344static void radeon_pm_fini_dpm(struct radeon_device *rdev) 1319static void radeon_pm_fini_dpm(struct radeon_device *rdev)
@@ -1358,8 +1333,6 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1358 1333
1359 if (rdev->pm.power_state) 1334 if (rdev->pm.power_state)
1360 kfree(rdev->pm.power_state); 1335 kfree(rdev->pm.power_state);
1361
1362 radeon_hwmon_fini(rdev);
1363} 1336}
1364 1337
1365void radeon_pm_fini(struct radeon_device *rdev) 1338void radeon_pm_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 9f0e18172b6e..0473257d4078 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -47,6 +47,39 @@ TRACE_EVENT(radeon_cs,
47 __entry->fences) 47 __entry->fences)
48); 48);
49 49
50TRACE_EVENT(radeon_vm_grab_id,
51 TP_PROTO(unsigned vmid, int ring),
52 TP_ARGS(vmid, ring),
53 TP_STRUCT__entry(
54 __field(u32, vmid)
55 __field(u32, ring)
56 ),
57
58 TP_fast_assign(
59 __entry->vmid = vmid;
60 __entry->ring = ring;
61 ),
62 TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
63);
64
65TRACE_EVENT(radeon_vm_bo_update,
66 TP_PROTO(struct radeon_bo_va *bo_va),
67 TP_ARGS(bo_va),
68 TP_STRUCT__entry(
69 __field(u64, soffset)
70 __field(u64, eoffset)
71 __field(u32, flags)
72 ),
73
74 TP_fast_assign(
75 __entry->soffset = bo_va->soffset;
76 __entry->eoffset = bo_va->eoffset;
77 __entry->flags = bo_va->flags;
78 ),
79 TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
80 __entry->soffset, __entry->eoffset, __entry->flags)
81);
82
50TRACE_EVENT(radeon_vm_set_page, 83TRACE_EVENT(radeon_vm_set_page,
51 TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, 84 TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
52 uint32_t incr, uint32_t flags), 85 uint32_t incr, uint32_t flags),
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 373d088bac66..b9c0529b4a2e 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -473,7 +473,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
473 return -EINVAL; 473 return -EINVAL;
474 } 474 }
475 475
476 if ((start >> 28) != (end >> 28)) { 476 if ((start >> 28) != ((end - 1) >> 28)) {
477 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", 477 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
478 start, end); 478 start, end);
479 return -EINVAL; 479 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index a072fa8c46b0..d46b58d078aa 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -21,7 +21,7 @@ cayman 0x9400
210x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE 210x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
220x000089B0 VGT_HS_OFFCHIP_PARAM 220x000089B0 VGT_HS_OFFCHIP_PARAM
230x00008A14 PA_CL_ENHANCE 230x00008A14 PA_CL_ENHANCE
240x00008A60 PA_SC_LINE_STIPPLE_VALUE 240x00008A60 PA_SU_LINE_STIPPLE_VALUE
250x00008B10 PA_SC_LINE_STIPPLE_STATE 250x00008B10 PA_SC_LINE_STIPPLE_STATE
260x00008BF0 PA_SC_ENHANCE 260x00008BF0 PA_SC_ENHANCE
270x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 270x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
@@ -532,7 +532,7 @@ cayman 0x9400
5320x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET 5320x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
5330x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE 5330x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
5340x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET 5340x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
5350x00028B74 VGT_GS_INSTANCE_CNT 5350x00028B90 VGT_GS_INSTANCE_CNT
5360x00028BD4 PA_SC_CENTROID_PRIORITY_0 5360x00028BD4 PA_SC_CENTROID_PRIORITY_0
5370x00028BD8 PA_SC_CENTROID_PRIORITY_1 5370x00028BD8 PA_SC_CENTROID_PRIORITY_1
5380x00028BDC PA_SC_LINE_CNTL 5380x00028BDC PA_SC_LINE_CNTL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index b912a37689bf..57745c8761c8 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -22,7 +22,7 @@ evergreen 0x9400
220x000089A4 VGT_COMPUTE_START_Z 220x000089A4 VGT_COMPUTE_START_Z
230x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE 230x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
240x00008A14 PA_CL_ENHANCE 240x00008A14 PA_CL_ENHANCE
250x00008A60 PA_SC_LINE_STIPPLE_VALUE 250x00008A60 PA_SU_LINE_STIPPLE_VALUE
260x00008B10 PA_SC_LINE_STIPPLE_STATE 260x00008B10 PA_SC_LINE_STIPPLE_STATE
270x00008BF0 PA_SC_ENHANCE 270x00008BF0 PA_SC_ENHANCE
280x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 280x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
@@ -545,7 +545,7 @@ evergreen 0x9400
5450x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET 5450x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
5460x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE 5460x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
5470x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET 5470x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
5480x00028B74 VGT_GS_INSTANCE_CNT 5480x00028B90 VGT_GS_INSTANCE_CNT
5490x00028C00 PA_SC_LINE_CNTL 5490x00028C00 PA_SC_LINE_CNTL
5500x00028C08 PA_SU_VTX_CNTL 5500x00028C08 PA_SU_VTX_CNTL
5510x00028C0C PA_CL_GB_VERT_CLIP_ADJ 5510x00028C0C PA_CL_GB_VERT_CLIP_ADJ
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 1c560629575a..e7dab069cccf 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev)
162 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); 162 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
163 base = G_000100_MC_FB_START(base) << 16; 163 base = G_000100_MC_FB_START(base) << 16;
164 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 164 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
165 /* Some boards seem to be configured for 128MB of sideport memory,
166 * but really only have 64MB. Just skip the sideport and use
167 * UMA memory.
168 */
169 if (rdev->mc.igp_sideport_enabled &&
170 (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
171 base += 128 * 1024 * 1024;
172 rdev->mc.real_vram_size -= 128 * 1024 * 1024;
173 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
174 }
165 175
166 /* Use K8 direct mapping for fast fb access. */ 176 /* Use K8 direct mapping for fast fb access. */
167 rdev->fastfb_working = false; 177 rdev->fastfb_working = false;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 913b025ae9b3..374499db20c7 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2328,6 +2328,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
2328 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, 2328 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
2329 ASIC_INTERNAL_MEMORY_SS, 0); 2329 ASIC_INTERNAL_MEMORY_SS, 0);
2330 2330
2331 /* disable ss, causes hangs on some cayman boards */
2332 if (rdev->family == CHIP_CAYMAN) {
2333 pi->sclk_ss = false;
2334 pi->mclk_ss = false;
2335 }
2336
2331 if (pi->sclk_ss || pi->mclk_ss) 2337 if (pi->sclk_ss || pi->mclk_ss)
2332 pi->dynamic_ss = true; 2338 pi->dynamic_ss = true;
2333 else 2339 else
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 6a64ccaa0695..85e1edfaa3be 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2811,7 +2811,7 @@ static void si_setup_spi(struct radeon_device *rdev,
2811} 2811}
2812 2812
2813static u32 si_get_rb_disabled(struct radeon_device *rdev, 2813static u32 si_get_rb_disabled(struct radeon_device *rdev,
2814 u32 max_rb_num, u32 se_num, 2814 u32 max_rb_num_per_se,
2815 u32 sh_per_se) 2815 u32 sh_per_se)
2816{ 2816{
2817 u32 data, mask; 2817 u32 data, mask;
@@ -2825,14 +2825,14 @@ static u32 si_get_rb_disabled(struct radeon_device *rdev,
2825 2825
2826 data >>= BACKEND_DISABLE_SHIFT; 2826 data >>= BACKEND_DISABLE_SHIFT;
2827 2827
2828 mask = si_create_bitmask(max_rb_num / se_num / sh_per_se); 2828 mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
2829 2829
2830 return data & mask; 2830 return data & mask;
2831} 2831}
2832 2832
2833static void si_setup_rb(struct radeon_device *rdev, 2833static void si_setup_rb(struct radeon_device *rdev,
2834 u32 se_num, u32 sh_per_se, 2834 u32 se_num, u32 sh_per_se,
2835 u32 max_rb_num) 2835 u32 max_rb_num_per_se)
2836{ 2836{
2837 int i, j; 2837 int i, j;
2838 u32 data, mask; 2838 u32 data, mask;
@@ -2842,19 +2842,21 @@ static void si_setup_rb(struct radeon_device *rdev,
2842 for (i = 0; i < se_num; i++) { 2842 for (i = 0; i < se_num; i++) {
2843 for (j = 0; j < sh_per_se; j++) { 2843 for (j = 0; j < sh_per_se; j++) {
2844 si_select_se_sh(rdev, i, j); 2844 si_select_se_sh(rdev, i, j);
2845 data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); 2845 data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
2846 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); 2846 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
2847 } 2847 }
2848 } 2848 }
2849 si_select_se_sh(rdev, 0xffffffff, 0xffffffff); 2849 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
2850 2850
2851 mask = 1; 2851 mask = 1;
2852 for (i = 0; i < max_rb_num; i++) { 2852 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
2853 if (!(disabled_rbs & mask)) 2853 if (!(disabled_rbs & mask))
2854 enabled_rbs |= mask; 2854 enabled_rbs |= mask;
2855 mask <<= 1; 2855 mask <<= 1;
2856 } 2856 }
2857 2857
2858 rdev->config.si.backend_enable_mask = enabled_rbs;
2859
2858 for (i = 0; i < se_num; i++) { 2860 for (i = 0; i < se_num; i++) {
2859 si_select_se_sh(rdev, i, 0xffffffff); 2861 si_select_se_sh(rdev, i, 0xffffffff);
2860 data = 0; 2862 data = 0;
@@ -3882,8 +3884,15 @@ static int si_mc_init(struct radeon_device *rdev)
3882 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 3884 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
3883 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 3885 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
3884 /* size in MB on si */ 3886 /* size in MB on si */
3885 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; 3887 tmp = RREG32(CONFIG_MEMSIZE);
3886 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; 3888 /* some boards may have garbage in the upper 16 bits */
3889 if (tmp & 0xffff0000) {
3890 DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
3891 if (tmp & 0xffff)
3892 tmp &= 0xffff;
3893 }
3894 rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
3895 rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
3887 rdev->mc.visible_vram_size = rdev->mc.aper_size; 3896 rdev->mc.visible_vram_size = rdev->mc.aper_size;
3888 si_vram_gtt_location(rdev, &rdev->mc); 3897 si_vram_gtt_location(rdev, &rdev->mc);
3889 radeon_update_bandwidth_info(rdev); 3898 radeon_update_bandwidth_info(rdev);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 28e178137718..07eba596d458 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -135,11 +135,11 @@ int tegra_drm_submit(struct tegra_drm_context *context,
135 unsigned int num_relocs = args->num_relocs; 135 unsigned int num_relocs = args->num_relocs;
136 unsigned int num_waitchks = args->num_waitchks; 136 unsigned int num_waitchks = args->num_waitchks;
137 struct drm_tegra_cmdbuf __user *cmdbufs = 137 struct drm_tegra_cmdbuf __user *cmdbufs =
138 (void * __user)(uintptr_t)args->cmdbufs; 138 (void __user *)(uintptr_t)args->cmdbufs;
139 struct drm_tegra_reloc __user *relocs = 139 struct drm_tegra_reloc __user *relocs =
140 (void * __user)(uintptr_t)args->relocs; 140 (void __user *)(uintptr_t)args->relocs;
141 struct drm_tegra_waitchk __user *waitchks = 141 struct drm_tegra_waitchk __user *waitchks =
142 (void * __user)(uintptr_t)args->waitchks; 142 (void __user *)(uintptr_t)args->waitchks;
143 struct drm_tegra_syncpt syncpt; 143 struct drm_tegra_syncpt syncpt;
144 struct host1x_job *job; 144 struct host1x_job *job;
145 int err; 145 int err;
@@ -163,9 +163,10 @@ int tegra_drm_submit(struct tegra_drm_context *context,
163 struct drm_tegra_cmdbuf cmdbuf; 163 struct drm_tegra_cmdbuf cmdbuf;
164 struct host1x_bo *bo; 164 struct host1x_bo *bo;
165 165
166 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf)); 166 if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) {
167 if (err) 167 err = -EFAULT;
168 goto fail; 168 goto fail;
169 }
169 170
170 bo = host1x_bo_lookup(drm, file, cmdbuf.handle); 171 bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
171 if (!bo) { 172 if (!bo) {
@@ -178,10 +179,11 @@ int tegra_drm_submit(struct tegra_drm_context *context,
178 cmdbufs++; 179 cmdbufs++;
179 } 180 }
180 181
181 err = copy_from_user(job->relocarray, relocs, 182 if (copy_from_user(job->relocarray, relocs,
182 sizeof(*relocs) * num_relocs); 183 sizeof(*relocs) * num_relocs)) {
183 if (err) 184 err = -EFAULT;
184 goto fail; 185 goto fail;
186 }
185 187
186 while (num_relocs--) { 188 while (num_relocs--) {
187 struct host1x_reloc *reloc = &job->relocarray[num_relocs]; 189 struct host1x_reloc *reloc = &job->relocarray[num_relocs];
@@ -199,15 +201,17 @@ int tegra_drm_submit(struct tegra_drm_context *context,
199 } 201 }
200 } 202 }
201 203
202 err = copy_from_user(job->waitchk, waitchks, 204 if (copy_from_user(job->waitchk, waitchks,
203 sizeof(*waitchks) * num_waitchks); 205 sizeof(*waitchks) * num_waitchks)) {
204 if (err) 206 err = -EFAULT;
205 goto fail; 207 goto fail;
208 }
206 209
207 err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts, 210 if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts,
208 sizeof(syncpt)); 211 sizeof(syncpt))) {
209 if (err) 212 err = -EFAULT;
210 goto fail; 213 goto fail;
214 }
211 215
212 job->is_addr_reg = context->client->ops->is_addr_reg; 216 job->is_addr_reg = context->client->ops->is_addr_reg;
213 job->syncpt_incrs = syncpt.incrs; 217 job->syncpt_incrs = syncpt.incrs;
@@ -573,7 +577,7 @@ static void tegra_debugfs_cleanup(struct drm_minor *minor)
573} 577}
574#endif 578#endif
575 579
576struct drm_driver tegra_drm_driver = { 580static struct drm_driver tegra_drm_driver = {
577 .driver_features = DRIVER_MODESET | DRIVER_GEM, 581 .driver_features = DRIVER_MODESET | DRIVER_GEM,
578 .load = tegra_drm_load, 582 .load = tegra_drm_load,
579 .unload = tegra_drm_unload, 583 .unload = tegra_drm_unload,
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index fdfe259ed7f8..7da0b923131f 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -116,7 +116,7 @@ host1x_client_to_dc(struct host1x_client *client)
116 116
117static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc) 117static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
118{ 118{
119 return container_of(crtc, struct tegra_dc, base); 119 return crtc ? container_of(crtc, struct tegra_dc, base) : NULL;
120} 120}
121 121
122static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value, 122static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value,
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 490f7719e317..a3835e7de184 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -247,7 +247,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
247 info->var.yoffset * fb->pitches[0]; 247 info->var.yoffset * fb->pitches[0];
248 248
249 drm->mode_config.fb_base = (resource_size_t)bo->paddr; 249 drm->mode_config.fb_base = (resource_size_t)bo->paddr;
250 info->screen_base = bo->vaddr + offset; 250 info->screen_base = (void __iomem *)bo->vaddr + offset;
251 info->screen_size = size; 251 info->screen_size = size;
252 info->fix.smem_start = (unsigned long)(bo->paddr + offset); 252 info->fix.smem_start = (unsigned long)(bo->paddr + offset);
253 info->fix.smem_len = size; 253 info->fix.smem_len = size;
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index ba47ca4fb880..3b29018913a5 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -14,6 +14,8 @@
14 14
15struct tegra_rgb { 15struct tegra_rgb {
16 struct tegra_output output; 16 struct tegra_output output;
17 struct tegra_dc *dc;
18
17 struct clk *clk_parent; 19 struct clk *clk_parent;
18 struct clk *clk; 20 struct clk *clk;
19}; 21};
@@ -84,18 +86,18 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
84 86
85static int tegra_output_rgb_enable(struct tegra_output *output) 87static int tegra_output_rgb_enable(struct tegra_output *output)
86{ 88{
87 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); 89 struct tegra_rgb *rgb = to_rgb(output);
88 90
89 tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable)); 91 tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
90 92
91 return 0; 93 return 0;
92} 94}
93 95
94static int tegra_output_rgb_disable(struct tegra_output *output) 96static int tegra_output_rgb_disable(struct tegra_output *output)
95{ 97{
96 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); 98 struct tegra_rgb *rgb = to_rgb(output);
97 99
98 tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable)); 100 tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
99 101
100 return 0; 102 return 0;
101} 103}
@@ -146,6 +148,7 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
146 148
147 rgb->output.dev = dc->dev; 149 rgb->output.dev = dc->dev;
148 rgb->output.of_node = np; 150 rgb->output.of_node = np;
151 rgb->dc = dc;
149 152
150 err = tegra_output_probe(&rgb->output); 153 err = tegra_output_probe(&rgb->output);
151 if (err < 0) 154 if (err < 0)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 15b86a94949d..406152152315 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -353,7 +353,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
353 * Don't move nonexistent data. Clear destination instead. 353 * Don't move nonexistent data. Clear destination instead.
354 */ 354 */
355 if (old_iomap == NULL && 355 if (old_iomap == NULL &&
356 (ttm == NULL || ttm->state == tt_unpopulated)) { 356 (ttm == NULL || (ttm->state == tt_unpopulated &&
357 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
357 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); 358 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
358 goto out2; 359 goto out2;
359 } 360 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index b249ab9b1eb2..6440eeac22d2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -169,9 +169,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
169 } 169 }
170 170
171 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 171 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
172 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; 172 vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
173 page_last = vma_pages(vma) + 173 page_last = vma_pages(vma) + vma->vm_pgoff -
174 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; 174 drm_vma_node_start(&bo->vma_node);
175 175
176 if (unlikely(page_offset >= bo->num_pages)) { 176 if (unlikely(page_offset >= bo->num_pages)) {
177 retval = VM_FAULT_SIGBUS; 177 retval = VM_FAULT_SIGBUS;
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 24ffbe990736..8d67b943ac05 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -125,6 +125,12 @@ static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
125 125
126static void udl_gem_put_pages(struct udl_gem_object *obj) 126static void udl_gem_put_pages(struct udl_gem_object *obj)
127{ 127{
128 if (obj->base.import_attach) {
129 drm_free_large(obj->pages);
130 obj->pages = NULL;
131 return;
132 }
133
128 drm_gem_put_pages(&obj->base, obj->pages, false, false); 134 drm_gem_put_pages(&obj->base, obj->pages, false, false);
129 obj->pages = NULL; 135 obj->pages = NULL;
130} 136}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 7776e6f0aef6..0489c6152482 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -150,6 +150,8 @@ struct vmw_ttm_tt {
150 bool mapped; 150 bool mapped;
151}; 151};
152 152
153const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
154
153/** 155/**
154 * Helper functions to advance a struct vmw_piter iterator. 156 * Helper functions to advance a struct vmw_piter iterator.
155 * 157 *
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index db85985c7086..20890ad8408b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -615,6 +615,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
615 * TTM buffer object driver - vmwgfx_buffer.c 615 * TTM buffer object driver - vmwgfx_buffer.c
616 */ 616 */
617 617
618extern const size_t vmw_tt_size;
618extern struct ttm_placement vmw_vram_placement; 619extern struct ttm_placement vmw_vram_placement;
619extern struct ttm_placement vmw_vram_ne_placement; 620extern struct ttm_placement vmw_vram_ne_placement;
620extern struct ttm_placement vmw_vram_sys_placement; 621extern struct ttm_placement vmw_vram_sys_placement;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index a51f48e3e917..45d5b5ab6ca9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -68,6 +68,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
68 SVGA_FIFO_3D_HWVERSION)); 68 SVGA_FIFO_3D_HWVERSION));
69 break; 69 break;
70 } 70 }
71 case DRM_VMW_PARAM_MAX_SURF_MEMORY:
72 param->value = dev_priv->memory_size;
73 break;
71 default: 74 default:
72 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 75 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
73 param->param); 76 param->param);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ecb3d867b426..03f1c2038631 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -75,6 +75,7 @@ void vmw_display_unit_cleanup(struct vmw_display_unit *du)
75 vmw_surface_unreference(&du->cursor_surface); 75 vmw_surface_unreference(&du->cursor_surface);
76 if (du->cursor_dmabuf) 76 if (du->cursor_dmabuf)
77 vmw_dmabuf_unreference(&du->cursor_dmabuf); 77 vmw_dmabuf_unreference(&du->cursor_dmabuf);
78 drm_sysfs_connector_remove(&du->connector);
78 drm_crtc_cleanup(&du->crtc); 79 drm_crtc_cleanup(&du->crtc);
79 drm_encoder_cleanup(&du->encoder); 80 drm_encoder_cleanup(&du->encoder);
80 drm_connector_cleanup(&du->connector); 81 drm_connector_cleanup(&du->connector);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 79f7e8e60529..a055a26819c2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -260,6 +260,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
260 connector->encoder = NULL; 260 connector->encoder = NULL;
261 encoder->crtc = NULL; 261 encoder->crtc = NULL;
262 crtc->fb = NULL; 262 crtc->fb = NULL;
263 crtc->enabled = false;
263 264
264 vmw_ldu_del_active(dev_priv, ldu); 265 vmw_ldu_del_active(dev_priv, ldu);
265 266
@@ -285,6 +286,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
285 crtc->x = set->x; 286 crtc->x = set->x;
286 crtc->y = set->y; 287 crtc->y = set->y;
287 crtc->mode = *mode; 288 crtc->mode = *mode;
289 crtc->enabled = true;
288 290
289 vmw_ldu_add_active(dev_priv, ldu, vfb); 291 vmw_ldu_add_active(dev_priv, ldu, vfb);
290 292
@@ -369,6 +371,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
369 encoder->possible_crtcs = (1 << unit); 371 encoder->possible_crtcs = (1 << unit);
370 encoder->possible_clones = 0; 372 encoder->possible_clones = 0;
371 373
374 (void) drm_sysfs_connector_add(connector);
375
372 drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); 376 drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
373 377
374 drm_mode_crtc_set_gamma_size(crtc, 256); 378 drm_mode_crtc_set_gamma_size(crtc, 256);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index efe2b74c5eb1..9b5ea2ac7ddf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -352,6 +352,38 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
352/** 352/**
353 * Buffer management. 353 * Buffer management.
354 */ 354 */
355
356/**
357 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
358 *
359 * @dev_priv: Pointer to a struct vmw_private identifying the device.
360 * @size: The requested buffer size.
361 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
362 */
363static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
364 bool user)
365{
366 static size_t struct_size, user_struct_size;
367 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
368 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
369
370 if (unlikely(struct_size == 0)) {
371 size_t backend_size = ttm_round_pot(vmw_tt_size);
372
373 struct_size = backend_size +
374 ttm_round_pot(sizeof(struct vmw_dma_buffer));
375 user_struct_size = backend_size +
376 ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
377 }
378
379 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
380 page_array_size +=
381 ttm_round_pot(num_pages * sizeof(dma_addr_t));
382
383 return ((user) ? user_struct_size : struct_size) +
384 page_array_size;
385}
386
355void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) 387void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
356{ 388{
357 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 389 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
@@ -359,6 +391,13 @@ void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
359 kfree(vmw_bo); 391 kfree(vmw_bo);
360} 392}
361 393
394static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
395{
396 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
397
398 ttm_prime_object_kfree(vmw_user_bo, prime);
399}
400
362int vmw_dmabuf_init(struct vmw_private *dev_priv, 401int vmw_dmabuf_init(struct vmw_private *dev_priv,
363 struct vmw_dma_buffer *vmw_bo, 402 struct vmw_dma_buffer *vmw_bo,
364 size_t size, struct ttm_placement *placement, 403 size_t size, struct ttm_placement *placement,
@@ -368,28 +407,23 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
368 struct ttm_bo_device *bdev = &dev_priv->bdev; 407 struct ttm_bo_device *bdev = &dev_priv->bdev;
369 size_t acc_size; 408 size_t acc_size;
370 int ret; 409 int ret;
410 bool user = (bo_free == &vmw_user_dmabuf_destroy);
371 411
372 BUG_ON(!bo_free); 412 BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
373 413
374 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); 414 acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
375 memset(vmw_bo, 0, sizeof(*vmw_bo)); 415 memset(vmw_bo, 0, sizeof(*vmw_bo));
376 416
377 INIT_LIST_HEAD(&vmw_bo->res_list); 417 INIT_LIST_HEAD(&vmw_bo->res_list);
378 418
379 ret = ttm_bo_init(bdev, &vmw_bo->base, size, 419 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
380 ttm_bo_type_device, placement, 420 (user) ? ttm_bo_type_device :
421 ttm_bo_type_kernel, placement,
381 0, interruptible, 422 0, interruptible,
382 NULL, acc_size, NULL, bo_free); 423 NULL, acc_size, NULL, bo_free);
383 return ret; 424 return ret;
384} 425}
385 426
386static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
387{
388 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
389
390 ttm_prime_object_kfree(vmw_user_bo, prime);
391}
392
393static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) 427static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
394{ 428{
395 struct vmw_user_dma_buffer *vmw_user_bo; 429 struct vmw_user_dma_buffer *vmw_user_bo;
@@ -781,54 +815,55 @@ err_ref:
781} 815}
782 816
783 817
818/**
819 * vmw_dumb_create - Create a dumb kms buffer
820 *
821 * @file_priv: Pointer to a struct drm_file identifying the caller.
822 * @dev: Pointer to the drm device.
823 * @args: Pointer to a struct drm_mode_create_dumb structure
824 *
825 * This is a driver callback for the core drm create_dumb functionality.
826 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
827 * that the arguments have a different format.
828 */
784int vmw_dumb_create(struct drm_file *file_priv, 829int vmw_dumb_create(struct drm_file *file_priv,
785 struct drm_device *dev, 830 struct drm_device *dev,
786 struct drm_mode_create_dumb *args) 831 struct drm_mode_create_dumb *args)
787{ 832{
788 struct vmw_private *dev_priv = vmw_priv(dev); 833 struct vmw_private *dev_priv = vmw_priv(dev);
789 struct vmw_master *vmaster = vmw_master(file_priv->master); 834 struct vmw_master *vmaster = vmw_master(file_priv->master);
790 struct vmw_user_dma_buffer *vmw_user_bo; 835 struct vmw_dma_buffer *dma_buf;
791 struct ttm_buffer_object *tmp;
792 int ret; 836 int ret;
793 837
794 args->pitch = args->width * ((args->bpp + 7) / 8); 838 args->pitch = args->width * ((args->bpp + 7) / 8);
795 args->size = args->pitch * args->height; 839 args->size = args->pitch * args->height;
796 840
797 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
798 if (vmw_user_bo == NULL)
799 return -ENOMEM;
800
801 ret = ttm_read_lock(&vmaster->lock, true); 841 ret = ttm_read_lock(&vmaster->lock, true);
802 if (ret != 0) { 842 if (unlikely(ret != 0))
803 kfree(vmw_user_bo);
804 return ret; 843 return ret;
805 }
806 844
807 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size, 845 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
808 &vmw_vram_sys_placement, true, 846 args->size, false, &args->handle,
809 &vmw_user_dmabuf_destroy); 847 &dma_buf);
810 if (ret != 0)
811 goto out_no_dmabuf;
812
813 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
814 ret = ttm_prime_object_init(vmw_fpriv(file_priv)->tfile,
815 args->size,
816 &vmw_user_bo->prime,
817 false,
818 ttm_buffer_type,
819 &vmw_user_dmabuf_release, NULL);
820 if (unlikely(ret != 0)) 848 if (unlikely(ret != 0))
821 goto out_no_base_object; 849 goto out_no_dmabuf;
822
823 args->handle = vmw_user_bo->prime.base.hash.key;
824 850
825out_no_base_object: 851 vmw_dmabuf_unreference(&dma_buf);
826 ttm_bo_unref(&tmp);
827out_no_dmabuf: 852out_no_dmabuf:
828 ttm_read_unlock(&vmaster->lock); 853 ttm_read_unlock(&vmaster->lock);
829 return ret; 854 return ret;
830} 855}
831 856
857/**
858 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
859 *
860 * @file_priv: Pointer to a struct drm_file identifying the caller.
861 * @dev: Pointer to the drm device.
862 * @handle: Handle identifying the dumb buffer.
863 * @offset: The address space offset returned.
864 *
865 * This is a driver callback for the core drm dumb_map_offset functionality.
866 */
832int vmw_dumb_map_offset(struct drm_file *file_priv, 867int vmw_dumb_map_offset(struct drm_file *file_priv,
833 struct drm_device *dev, uint32_t handle, 868 struct drm_device *dev, uint32_t handle,
834 uint64_t *offset) 869 uint64_t *offset)
@@ -846,6 +881,15 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
846 return 0; 881 return 0;
847} 882}
848 883
884/**
885 * vmw_dumb_destroy - Destroy a dumb boffer
886 *
887 * @file_priv: Pointer to a struct drm_file identifying the caller.
888 * @dev: Pointer to the drm device.
889 * @handle: Handle identifying the dumb buffer.
890 *
891 * This is a driver callback for the core drm dumb_destroy functionality.
892 */
849int vmw_dumb_destroy(struct drm_file *file_priv, 893int vmw_dumb_destroy(struct drm_file *file_priv,
850 struct drm_device *dev, 894 struct drm_device *dev,
851 uint32_t handle) 895 uint32_t handle)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 26387c3d5a21..22406c8651ea 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -310,6 +310,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
310 crtc->fb = NULL; 310 crtc->fb = NULL;
311 crtc->x = 0; 311 crtc->x = 0;
312 crtc->y = 0; 312 crtc->y = 0;
313 crtc->enabled = false;
313 314
314 vmw_sou_del_active(dev_priv, sou); 315 vmw_sou_del_active(dev_priv, sou);
315 316
@@ -370,6 +371,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
370 crtc->fb = NULL; 371 crtc->fb = NULL;
371 crtc->x = 0; 372 crtc->x = 0;
372 crtc->y = 0; 373 crtc->y = 0;
374 crtc->enabled = false;
373 375
374 return ret; 376 return ret;
375 } 377 }
@@ -382,6 +384,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
382 crtc->fb = fb; 384 crtc->fb = fb;
383 crtc->x = set->x; 385 crtc->x = set->x;
384 crtc->y = set->y; 386 crtc->y = set->y;
387 crtc->enabled = true;
385 388
386 return 0; 389 return 0;
387} 390}
@@ -464,6 +467,8 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
464 encoder->possible_crtcs = (1 << unit); 467 encoder->possible_crtcs = (1 << unit);
465 encoder->possible_clones = 0; 468 encoder->possible_clones = 0;
466 469
470 (void) drm_sysfs_connector_add(connector);
471
467 drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs); 472 drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
468 473
469 drm_mode_crtc_set_gamma_size(crtc, 256); 474 drm_mode_crtc_set_gamma_size(crtc, 256);
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 509383f8be03..6a929591aa73 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -19,6 +19,7 @@
19#include <linux/of.h> 19#include <linux/of.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21 21
22#include "bus.h"
22#include "dev.h" 23#include "dev.h"
23 24
24static DEFINE_MUTEX(clients_lock); 25static DEFINE_MUTEX(clients_lock);
@@ -257,7 +258,7 @@ static int host1x_unregister_client(struct host1x *host1x,
257 return -ENODEV; 258 return -ENODEV;
258} 259}
259 260
260struct bus_type host1x_bus_type = { 261static struct bus_type host1x_bus_type = {
261 .name = "host1x", 262 .name = "host1x",
262}; 263};
263 264
@@ -301,7 +302,7 @@ static int host1x_device_add(struct host1x *host1x,
301 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; 302 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
302 device->dev.dma_mask = &device->dev.coherent_dma_mask; 303 device->dev.dma_mask = &device->dev.coherent_dma_mask;
303 device->dev.release = host1x_device_release; 304 device->dev.release = host1x_device_release;
304 dev_set_name(&device->dev, driver->name); 305 dev_set_name(&device->dev, "%s", driver->name);
305 device->dev.bus = &host1x_bus_type; 306 device->dev.bus = &host1x_bus_type;
306 device->dev.parent = host1x->dev; 307 device->dev.parent = host1x->dev;
307 308
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 37e2a63241a9..6b09b71940c2 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -54,8 +54,8 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
54 u32 *p = (u32 *)((u32)pb->mapped + getptr); 54 u32 *p = (u32 *)((u32)pb->mapped + getptr);
55 *(p++) = HOST1X_OPCODE_NOP; 55 *(p++) = HOST1X_OPCODE_NOP;
56 *(p++) = HOST1X_OPCODE_NOP; 56 *(p++) = HOST1X_OPCODE_NOP;
57 dev_dbg(host1x->dev, "%s: NOP at 0x%x\n", __func__, 57 dev_dbg(host1x->dev, "%s: NOP at %#llx\n", __func__,
58 pb->phys + getptr); 58 (u64)pb->phys + getptr);
59 getptr = (getptr + 8) & (pb->size_bytes - 1); 59 getptr = (getptr + 8) & (pb->size_bytes - 1);
60 } 60 }
61 wmb(); 61 wmb();
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index 640c75ca5a8b..f72c873eff81 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -163,8 +163,8 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
163 continue; 163 continue;
164 } 164 }
165 165
166 host1x_debug_output(o, " GATHER at %08x+%04x, %d words\n", 166 host1x_debug_output(o, " GATHER at %#llx+%04x, %d words\n",
167 g->base, g->offset, g->words); 167 (u64)g->base, g->offset, g->words);
168 168
169 show_gather(o, g->base + g->offset, g->words, cdma, 169 show_gather(o, g->base + g->offset, g->words, cdma,
170 g->base, mapped); 170 g->base, mapped);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 329fbb9b5976..34e2d39d4ce8 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -460,6 +460,7 @@ config HID_MULTITOUCH
460 - Stantum multitouch panels 460 - Stantum multitouch panels
461 - Touch International Panels 461 - Touch International Panels
462 - Unitec Panels 462 - Unitec Panels
463 - Wistron optical touch panels
463 - XAT optical touch panels 464 - XAT optical touch panels
464 - Xiroku optical touch panels 465 - Xiroku optical touch panels
465 - Zytronic touch panels 466 - Zytronic touch panels
diff --git a/drivers/hid/hid-appleir.c b/drivers/hid/hid-appleir.c
index a42e6a394c5e..0e6a42d37eb6 100644
--- a/drivers/hid/hid-appleir.c
+++ b/drivers/hid/hid-appleir.c
@@ -297,6 +297,9 @@ static int appleir_probe(struct hid_device *hid, const struct hid_device_id *id)
297 297
298 appleir->hid = hid; 298 appleir->hid = hid;
299 299
300 /* force input as some remotes bypass the input registration */
301 hid->quirks |= HID_QUIRK_HIDINPUT_FORCE;
302
300 spin_lock_init(&appleir->lock); 303 spin_lock_init(&appleir->lock);
301 setup_timer(&appleir->key_up_timer, 304 setup_timer(&appleir->key_up_timer,
302 key_up_tick, (unsigned long) appleir); 305 key_up_tick, (unsigned long) appleir);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 8c10f2742233..253fe23ef7fe 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1723,6 +1723,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1723 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, 1723 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
1724 { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) }, 1724 { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
1725 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, 1725 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
1726 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
1726 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) }, 1727 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
1727 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, 1728 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
1728 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) }, 1729 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
@@ -1879,7 +1880,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
1879 1880
1880 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) }, 1881 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
1881 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, 1882 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1882 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1883 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, 1883 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
1884 { } 1884 { }
1885}; 1885};
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 76559629568c..f9304cb37154 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -489,6 +489,7 @@
489#define USB_VENDOR_ID_KYE 0x0458 489#define USB_VENDOR_ID_KYE 0x0458
490#define USB_DEVICE_ID_KYE_ERGO_525V 0x0087 490#define USB_DEVICE_ID_KYE_ERGO_525V 0x0087
491#define USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE 0x0138 491#define USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE 0x0138
492#define USB_DEVICE_ID_GENIUS_MANTICORE 0x0153
492#define USB_DEVICE_ID_GENIUS_GX_IMPERATOR 0x4018 493#define USB_DEVICE_ID_GENIUS_GX_IMPERATOR 0x4018
493#define USB_DEVICE_ID_KYE_GPEN_560 0x5003 494#define USB_DEVICE_ID_KYE_GPEN_560 0x5003
494#define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010 495#define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010
@@ -640,7 +641,6 @@
640#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003 641#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
641 642
642#define USB_VENDOR_ID_NINTENDO 0x057e 643#define USB_VENDOR_ID_NINTENDO 0x057e
643#define USB_VENDOR_ID_NINTENDO2 0x054c
644#define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306 644#define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306
645#define USB_DEVICE_ID_NINTENDO_WIIMOTE2 0x0330 645#define USB_DEVICE_ID_NINTENDO_WIIMOTE2 0x0330
646 646
@@ -902,6 +902,9 @@
902#define USB_DEVICE_ID_SUPER_DUAL_BOX_PRO 0x8802 902#define USB_DEVICE_ID_SUPER_DUAL_BOX_PRO 0x8802
903#define USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO 0x8804 903#define USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO 0x8804
904 904
905#define USB_VENDOR_ID_WISTRON 0x0fb8
906#define USB_DEVICE_ID_WISTRON_OPTICAL_TOUCH 0x1109
907
905#define USB_VENDOR_ID_X_TENSIONS 0x1ae7 908#define USB_VENDOR_ID_X_TENSIONS 0x1ae7
906#define USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE 0x9001 909#define USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE 0x9001
907 910
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index 73845120295e..e77696367591 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -342,6 +342,10 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
342 rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83, 342 rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83,
343 "Genius Gx Imperator Keyboard"); 343 "Genius Gx Imperator Keyboard");
344 break; 344 break;
345 case USB_DEVICE_ID_GENIUS_MANTICORE:
346 rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
347 "Genius Manticore Keyboard");
348 break;
345 } 349 }
346 return rdesc; 350 return rdesc;
347} 351}
@@ -418,6 +422,14 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
418 goto enabling_err; 422 goto enabling_err;
419 } 423 }
420 break; 424 break;
425 case USB_DEVICE_ID_GENIUS_MANTICORE:
426 /*
427 * The manticore keyboard needs to have all the interfaces
428 * opened at least once to be fully functional.
429 */
430 if (hid_hw_open(hdev))
431 hid_hw_close(hdev);
432 break;
421 } 433 }
422 434
423 return 0; 435 return 0;
@@ -439,6 +451,8 @@ static const struct hid_device_id kye_devices[] = {
439 USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, 451 USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
440 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, 452 { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
441 USB_DEVICE_ID_GENIUS_GX_IMPERATOR) }, 453 USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
454 { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
455 USB_DEVICE_ID_GENIUS_MANTICORE) },
442 { } 456 { }
443}; 457};
444MODULE_DEVICE_TABLE(hid, kye_devices); 458MODULE_DEVICE_TABLE(hid, kye_devices);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index a2cedb8ae1c0..d83b1e8b505b 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1335,6 +1335,12 @@ static const struct hid_device_id mt_devices[] = {
1335 { .driver_data = MT_CLS_NSMU, 1335 { .driver_data = MT_CLS_NSMU,
1336 MT_USB_DEVICE(USB_VENDOR_ID_UNITEC, 1336 MT_USB_DEVICE(USB_VENDOR_ID_UNITEC,
1337 USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) }, 1337 USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) },
1338
1339 /* Wistron panels */
1340 { .driver_data = MT_CLS_NSMU,
1341 MT_USB_DEVICE(USB_VENDOR_ID_WISTRON,
1342 USB_DEVICE_ID_WISTRON_OPTICAL_TOUCH) },
1343
1338 /* XAT */ 1344 /* XAT */
1339 { .driver_data = MT_CLS_NSMU, 1345 { .driver_data = MT_CLS_NSMU,
1340 MT_USB_DEVICE(USB_VENDOR_ID_XAT, 1346 MT_USB_DEVICE(USB_VENDOR_ID_XAT,
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index a184e1921c11..8fab82829f8b 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -112,13 +112,15 @@ static int sensor_hub_get_physical_device_count(
112 112
113static void sensor_hub_fill_attr_info( 113static void sensor_hub_fill_attr_info(
114 struct hid_sensor_hub_attribute_info *info, 114 struct hid_sensor_hub_attribute_info *info,
115 s32 index, s32 report_id, s32 units, s32 unit_expo, s32 size) 115 s32 index, s32 report_id, struct hid_field *field)
116{ 116{
117 info->index = index; 117 info->index = index;
118 info->report_id = report_id; 118 info->report_id = report_id;
119 info->units = units; 119 info->units = field->unit;
120 info->unit_expo = unit_expo; 120 info->unit_expo = field->unit_exponent;
121 info->size = size/8; 121 info->size = (field->report_size * field->report_count)/8;
122 info->logical_minimum = field->logical_minimum;
123 info->logical_maximum = field->logical_maximum;
122} 124}
123 125
124static struct hid_sensor_hub_callbacks *sensor_hub_get_callback( 126static struct hid_sensor_hub_callbacks *sensor_hub_get_callback(
@@ -325,9 +327,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
325 if (field->physical == usage_id && 327 if (field->physical == usage_id &&
326 field->logical == attr_usage_id) { 328 field->logical == attr_usage_id) {
327 sensor_hub_fill_attr_info(info, i, report->id, 329 sensor_hub_fill_attr_info(info, i, report->id,
328 field->unit, field->unit_exponent, 330 field);
329 field->report_size *
330 field->report_count);
331 ret = 0; 331 ret = 0;
332 } else { 332 } else {
333 for (j = 0; j < field->maxusage; ++j) { 333 for (j = 0; j < field->maxusage; ++j) {
@@ -336,11 +336,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
336 field->usage[j].collection_index == 336 field->usage[j].collection_index ==
337 collection_index) { 337 collection_index) {
338 sensor_hub_fill_attr_info(info, 338 sensor_hub_fill_attr_info(info,
339 i, report->id, 339 i, report->id, field);
340 field->unit,
341 field->unit_exponent,
342 field->report_size *
343 field->report_count);
344 ret = 0; 340 ret = 0;
345 break; 341 break;
346 } 342 }
@@ -573,6 +569,8 @@ static int sensor_hub_probe(struct hid_device *hdev,
573 goto err_free_names; 569 goto err_free_names;
574 } 570 }
575 sd->hid_sensor_hub_client_devs[ 571 sd->hid_sensor_hub_client_devs[
572 sd->hid_sensor_client_cnt].id = PLATFORM_DEVID_AUTO;
573 sd->hid_sensor_hub_client_devs[
576 sd->hid_sensor_client_cnt].name = name; 574 sd->hid_sensor_client_cnt].name = name;
577 sd->hid_sensor_hub_client_devs[ 575 sd->hid_sensor_hub_client_devs[
578 sd->hid_sensor_client_cnt].platform_data = 576 sd->hid_sensor_client_cnt].platform_data =
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index da551d113762..098af2f84b8c 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -225,6 +225,13 @@ static const unsigned int buzz_keymap[] = {
225struct sony_sc { 225struct sony_sc {
226 unsigned long quirks; 226 unsigned long quirks;
227 227
228#ifdef CONFIG_SONY_FF
229 struct work_struct rumble_worker;
230 struct hid_device *hdev;
231 __u8 left;
232 __u8 right;
233#endif
234
228 void *extra; 235 void *extra;
229}; 236};
230 237
@@ -615,9 +622,9 @@ static void buzz_remove(struct hid_device *hdev)
615} 622}
616 623
617#ifdef CONFIG_SONY_FF 624#ifdef CONFIG_SONY_FF
618static int sony_play_effect(struct input_dev *dev, void *data, 625static void sony_rumble_worker(struct work_struct *work)
619 struct ff_effect *effect)
620{ 626{
627 struct sony_sc *sc = container_of(work, struct sony_sc, rumble_worker);
621 unsigned char buf[] = { 628 unsigned char buf[] = {
622 0x01, 629 0x01,
623 0x00, 0xff, 0x00, 0xff, 0x00, 630 0x00, 0xff, 0x00, 0xff, 0x00,
@@ -628,21 +635,28 @@ static int sony_play_effect(struct input_dev *dev, void *data,
628 0xff, 0x27, 0x10, 0x00, 0x32, 635 0xff, 0x27, 0x10, 0x00, 0x32,
629 0x00, 0x00, 0x00, 0x00, 0x00 636 0x00, 0x00, 0x00, 0x00, 0x00
630 }; 637 };
631 __u8 left; 638
632 __u8 right; 639 buf[3] = sc->right;
640 buf[5] = sc->left;
641
642 sc->hdev->hid_output_raw_report(sc->hdev, buf, sizeof(buf),
643 HID_OUTPUT_REPORT);
644}
645
646static int sony_play_effect(struct input_dev *dev, void *data,
647 struct ff_effect *effect)
648{
633 struct hid_device *hid = input_get_drvdata(dev); 649 struct hid_device *hid = input_get_drvdata(dev);
650 struct sony_sc *sc = hid_get_drvdata(hid);
634 651
635 if (effect->type != FF_RUMBLE) 652 if (effect->type != FF_RUMBLE)
636 return 0; 653 return 0;
637 654
638 left = effect->u.rumble.strong_magnitude / 256; 655 sc->left = effect->u.rumble.strong_magnitude / 256;
639 right = effect->u.rumble.weak_magnitude ? 1 : 0; 656 sc->right = effect->u.rumble.weak_magnitude ? 1 : 0;
640
641 buf[3] = right;
642 buf[5] = left;
643 657
644 return hid->hid_output_raw_report(hid, buf, sizeof(buf), 658 schedule_work(&sc->rumble_worker);
645 HID_OUTPUT_REPORT); 659 return 0;
646} 660}
647 661
648static int sony_init_ff(struct hid_device *hdev) 662static int sony_init_ff(struct hid_device *hdev)
@@ -650,16 +664,31 @@ static int sony_init_ff(struct hid_device *hdev)
650 struct hid_input *hidinput = list_entry(hdev->inputs.next, 664 struct hid_input *hidinput = list_entry(hdev->inputs.next,
651 struct hid_input, list); 665 struct hid_input, list);
652 struct input_dev *input_dev = hidinput->input; 666 struct input_dev *input_dev = hidinput->input;
667 struct sony_sc *sc = hid_get_drvdata(hdev);
668
669 sc->hdev = hdev;
670 INIT_WORK(&sc->rumble_worker, sony_rumble_worker);
653 671
654 input_set_capability(input_dev, EV_FF, FF_RUMBLE); 672 input_set_capability(input_dev, EV_FF, FF_RUMBLE);
655 return input_ff_create_memless(input_dev, NULL, sony_play_effect); 673 return input_ff_create_memless(input_dev, NULL, sony_play_effect);
656} 674}
657 675
676static void sony_destroy_ff(struct hid_device *hdev)
677{
678 struct sony_sc *sc = hid_get_drvdata(hdev);
679
680 cancel_work_sync(&sc->rumble_worker);
681}
682
658#else 683#else
659static int sony_init_ff(struct hid_device *hdev) 684static int sony_init_ff(struct hid_device *hdev)
660{ 685{
661 return 0; 686 return 0;
662} 687}
688
689static void sony_destroy_ff(struct hid_device *hdev)
690{
691}
663#endif 692#endif
664 693
665static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) 694static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
@@ -728,6 +757,8 @@ static void sony_remove(struct hid_device *hdev)
728 if (sc->quirks & BUZZ_CONTROLLER) 757 if (sc->quirks & BUZZ_CONTROLLER)
729 buzz_remove(hdev); 758 buzz_remove(hdev);
730 759
760 sony_destroy_ff(hdev);
761
731 hid_hw_stop(hdev); 762 hid_hw_stop(hdev);
732} 763}
733 764
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index 1446f526ee8b..abb20db2b443 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -834,8 +834,7 @@ static void wiimote_init_set_type(struct wiimote_data *wdata,
834 goto done; 834 goto done;
835 } 835 }
836 836
837 if (vendor == USB_VENDOR_ID_NINTENDO || 837 if (vendor == USB_VENDOR_ID_NINTENDO) {
838 vendor == USB_VENDOR_ID_NINTENDO2) {
839 if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) { 838 if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) {
840 devtype = WIIMOTE_DEV_GEN10; 839 devtype = WIIMOTE_DEV_GEN10;
841 goto done; 840 goto done;
@@ -1856,8 +1855,6 @@ static void wiimote_hid_remove(struct hid_device *hdev)
1856static const struct hid_device_id wiimote_hid_devices[] = { 1855static const struct hid_device_id wiimote_hid_devices[] = {
1857 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, 1856 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
1858 USB_DEVICE_ID_NINTENDO_WIIMOTE) }, 1857 USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1859 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2,
1860 USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1861 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, 1858 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
1862 USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, 1859 USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
1863 { } 1860 { }
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 93b00d76374c..cedc6da93c19 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -287,7 +287,7 @@ static int uhid_event_from_user(const char __user *buffer, size_t len,
287 */ 287 */
288 struct uhid_create_req_compat *compat; 288 struct uhid_create_req_compat *compat;
289 289
290 compat = kmalloc(sizeof(*compat), GFP_KERNEL); 290 compat = kzalloc(sizeof(*compat), GFP_KERNEL);
291 if (!compat) 291 if (!compat)
292 return -ENOMEM; 292 return -ENOMEM;
293 293
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 1d7ff46812c3..dafc63c6932d 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -18,7 +18,6 @@
18#include <linux/err.h> 18#include <linux/err.h>
19 19
20#include <acpi/acpi.h> 20#include <acpi/acpi.h>
21#include <acpi/acpixf.h>
22#include <acpi/acpi_drivers.h> 21#include <acpi/acpi_drivers.h>
23#include <acpi/acpi_bus.h> 22#include <acpi/acpi_bus.h>
24 23
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
index 2dc37c7c6947..7d68a08baaa8 100644
--- a/drivers/hwmon/hih6130.c
+++ b/drivers/hwmon/hih6130.c
@@ -43,6 +43,7 @@
43 * @last_update: time of last update (jiffies) 43 * @last_update: time of last update (jiffies)
44 * @temperature: cached temperature measurement value 44 * @temperature: cached temperature measurement value
45 * @humidity: cached humidity measurement value 45 * @humidity: cached humidity measurement value
46 * @write_length: length for I2C measurement request
46 */ 47 */
47struct hih6130 { 48struct hih6130 {
48 struct device *hwmon_dev; 49 struct device *hwmon_dev;
@@ -51,6 +52,7 @@ struct hih6130 {
51 unsigned long last_update; 52 unsigned long last_update;
52 int temperature; 53 int temperature;
53 int humidity; 54 int humidity;
55 size_t write_length;
54}; 56};
55 57
56/** 58/**
@@ -121,8 +123,15 @@ static int hih6130_update_measurements(struct i2c_client *client)
121 */ 123 */
122 if (time_after(jiffies, hih6130->last_update + HZ) || !hih6130->valid) { 124 if (time_after(jiffies, hih6130->last_update + HZ) || !hih6130->valid) {
123 125
124 /* write to slave address, no data, to request a measurement */ 126 /*
125 ret = i2c_master_send(client, tmp, 0); 127 * Write to slave address to request a measurement.
128 * According with the datasheet it should be with no data, but
129 * for systems with I2C bus drivers that do not allow zero
130 * length packets we write one dummy byte to allow sensor
131 * measurements on them.
132 */
133 tmp[0] = 0;
134 ret = i2c_master_send(client, tmp, hih6130->write_length);
126 if (ret < 0) 135 if (ret < 0)
127 goto out; 136 goto out;
128 137
@@ -252,6 +261,9 @@ static int hih6130_probe(struct i2c_client *client,
252 goto fail_remove_sysfs; 261 goto fail_remove_sysfs;
253 } 262 }
254 263
264 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_QUICK))
265 hih6130->write_length = 1;
266
255 return 0; 267 return 0;
256 268
257fail_remove_sysfs: 269fail_remove_sysfs:
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 6cf6bff79003..a2f3b4a365e4 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -94,6 +94,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
94{ 94{
95 if (rpm <= 0) 95 if (rpm <= 0)
96 return 255; 96 return 255;
97 if (rpm > 1350000)
98 return 1;
97 return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254); 99 return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
98} 100}
99 101
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 4c4c1421bf28..8b8f3aa49726 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1610,12 +1610,14 @@ static int lm90_probe(struct i2c_client *client,
1610 "lm90", client); 1610 "lm90", client);
1611 if (err < 0) { 1611 if (err < 0) {
1612 dev_err(dev, "cannot request IRQ %d\n", client->irq); 1612 dev_err(dev, "cannot request IRQ %d\n", client->irq);
1613 goto exit_remove_files; 1613 goto exit_unregister;
1614 } 1614 }
1615 } 1615 }
1616 1616
1617 return 0; 1617 return 0;
1618 1618
1619exit_unregister:
1620 hwmon_device_unregister(data->hwmon_dev);
1619exit_remove_files: 1621exit_remove_files:
1620 lm90_remove_files(client, data); 1622 lm90_remove_files(client, data);
1621exit_restore: 1623exit_restore:
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 1404e6319deb..72a889702f0d 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -141,6 +141,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
141{ 141{
142 if (rpm <= 0) 142 if (rpm <= 0)
143 return 255; 143 return 255;
144 if (rpm > 1350000)
145 return 1;
144 return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254); 146 return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
145} 147}
146 148
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index 0e7017841f7d..aee14e2192f8 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -145,7 +145,7 @@ static const u8 regtempmin[] = { 0x3a, 0x3e, 0x2c, 0x2e, 0x30, 0x32 };
145 */ 145 */
146static inline u8 FAN_TO_REG(long rpm, int div) 146static inline u8 FAN_TO_REG(long rpm, int div)
147{ 147{
148 if (rpm == 0) 148 if (rpm <= 0 || rpm > 1310720)
149 return 0; 149 return 0;
150 return clamp_val(1310720 / (rpm * div), 1, 255); 150 return clamp_val(1310720 / (rpm * div), 1, 255);
151} 151}
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index edb06cda5a68..6ed76ceb9270 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -481,9 +481,11 @@ store_pwm(struct device *dev, struct device_attribute *attr,
481 if (err) 481 if (err)
482 return err; 482 return err;
483 val = clamp_val(val, 0, 255); 483 val = clamp_val(val, 0, 255);
484 val = DIV_ROUND_CLOSEST(val, 0x11);
484 485
485 mutex_lock(&data->update_lock); 486 mutex_lock(&data->update_lock);
486 data->pwm[nr] = val; 487 data->pwm[nr] = val * 0x11;
488 val |= w83l786ng_read_value(client, W83L786NG_REG_PWM[nr]) & 0xf0;
487 w83l786ng_write_value(client, W83L786NG_REG_PWM[nr], val); 489 w83l786ng_write_value(client, W83L786NG_REG_PWM[nr], val);
488 mutex_unlock(&data->update_lock); 490 mutex_unlock(&data->update_lock);
489 return count; 491 return count;
@@ -510,7 +512,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
510 mutex_lock(&data->update_lock); 512 mutex_lock(&data->update_lock);
511 reg = w83l786ng_read_value(client, W83L786NG_REG_FAN_CFG); 513 reg = w83l786ng_read_value(client, W83L786NG_REG_FAN_CFG);
512 data->pwm_enable[nr] = val; 514 data->pwm_enable[nr] = val;
513 reg &= ~(0x02 << W83L786NG_PWM_ENABLE_SHIFT[nr]); 515 reg &= ~(0x03 << W83L786NG_PWM_ENABLE_SHIFT[nr]);
514 reg |= (val - 1) << W83L786NG_PWM_ENABLE_SHIFT[nr]; 516 reg |= (val - 1) << W83L786NG_PWM_ENABLE_SHIFT[nr];
515 w83l786ng_write_value(client, W83L786NG_REG_FAN_CFG, reg); 517 w83l786ng_write_value(client, W83L786NG_REG_FAN_CFG, reg);
516 mutex_unlock(&data->update_lock); 518 mutex_unlock(&data->update_lock);
@@ -776,9 +778,10 @@ static struct w83l786ng_data *w83l786ng_update_device(struct device *dev)
776 ((pwmcfg >> W83L786NG_PWM_MODE_SHIFT[i]) & 1) 778 ((pwmcfg >> W83L786NG_PWM_MODE_SHIFT[i]) & 1)
777 ? 0 : 1; 779 ? 0 : 1;
778 data->pwm_enable[i] = 780 data->pwm_enable[i] =
779 ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 2) + 1; 781 ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 3) + 1;
780 data->pwm[i] = w83l786ng_read_value(client, 782 data->pwm[i] =
781 W83L786NG_REG_PWM[i]); 783 (w83l786ng_read_value(client, W83L786NG_REG_PWM[i])
784 & 0x0f) * 0x11;
782 } 785 }
783 786
784 787
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index 036cf03aeb61..18a74a6751a9 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -20,7 +20,6 @@
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/clk.h> 21#include <linux/clk.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/clk.h>
24#include <linux/slab.h> 23#include <linux/slab.h>
25 24
26/* Hardware register offsets and field defintions */ 25/* Hardware register offsets and field defintions */
@@ -891,7 +890,7 @@ static const struct of_device_id bcm_kona_i2c_of_match[] = {
891 {.compatible = "brcm,kona-i2c",}, 890 {.compatible = "brcm,kona-i2c",},
892 {}, 891 {},
893}; 892};
894MODULE_DEVICE_TABLE(of, kona_i2c_of_match); 893MODULE_DEVICE_TABLE(of, bcm_kona_i2c_of_match);
895 894
896static struct platform_driver bcm_kona_i2c_driver = { 895static struct platform_driver bcm_kona_i2c_driver = {
897 .driver = { 896 .driver = {
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index d7e8600f31fb..77df97b932af 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -299,6 +299,7 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
299 strlcpy(adap->name, "bcm2835 I2C adapter", sizeof(adap->name)); 299 strlcpy(adap->name, "bcm2835 I2C adapter", sizeof(adap->name));
300 adap->algo = &bcm2835_i2c_algo; 300 adap->algo = &bcm2835_i2c_algo;
301 adap->dev.parent = &pdev->dev; 301 adap->dev.parent = &pdev->dev;
302 adap->dev.of_node = pdev->dev.of_node;
302 303
303 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0); 304 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0);
304 305
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index ff05d9fef4a8..af0b5830303d 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -125,12 +125,12 @@ static struct davinci_i2c_platform_data davinci_i2c_platform_data_default = {
125static inline void davinci_i2c_write_reg(struct davinci_i2c_dev *i2c_dev, 125static inline void davinci_i2c_write_reg(struct davinci_i2c_dev *i2c_dev,
126 int reg, u16 val) 126 int reg, u16 val)
127{ 127{
128 __raw_writew(val, i2c_dev->base + reg); 128 writew_relaxed(val, i2c_dev->base + reg);
129} 129}
130 130
131static inline u16 davinci_i2c_read_reg(struct davinci_i2c_dev *i2c_dev, int reg) 131static inline u16 davinci_i2c_read_reg(struct davinci_i2c_dev *i2c_dev, int reg)
132{ 132{
133 return __raw_readw(i2c_dev->base + reg); 133 return readw_relaxed(i2c_dev->base + reg);
134} 134}
135 135
136/* Generate a pulse on the i2c clock pin. */ 136/* Generate a pulse on the i2c clock pin. */
diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
index dae3ddfe7619..721f7ebf9a3b 100644
--- a/drivers/i2c/busses/i2c-diolan-u2c.c
+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
@@ -25,8 +25,6 @@
25#define USB_VENDOR_ID_DIOLAN 0x0abf 25#define USB_VENDOR_ID_DIOLAN 0x0abf
26#define USB_DEVICE_ID_DIOLAN_U2C 0x3370 26#define USB_DEVICE_ID_DIOLAN_U2C 0x3370
27 27
28#define DIOLAN_OUT_EP 0x02
29#define DIOLAN_IN_EP 0x84
30 28
31/* commands via USB, must match command ids in the firmware */ 29/* commands via USB, must match command ids in the firmware */
32#define CMD_I2C_READ 0x01 30#define CMD_I2C_READ 0x01
@@ -84,6 +82,7 @@
84struct i2c_diolan_u2c { 82struct i2c_diolan_u2c {
85 u8 obuffer[DIOLAN_OUTBUF_LEN]; /* output buffer */ 83 u8 obuffer[DIOLAN_OUTBUF_LEN]; /* output buffer */
86 u8 ibuffer[DIOLAN_INBUF_LEN]; /* input buffer */ 84 u8 ibuffer[DIOLAN_INBUF_LEN]; /* input buffer */
85 int ep_in, ep_out; /* Endpoints */
87 struct usb_device *usb_dev; /* the usb device for this device */ 86 struct usb_device *usb_dev; /* the usb device for this device */
88 struct usb_interface *interface;/* the interface for this device */ 87 struct usb_interface *interface;/* the interface for this device */
89 struct i2c_adapter adapter; /* i2c related things */ 88 struct i2c_adapter adapter; /* i2c related things */
@@ -109,7 +108,7 @@ static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
109 return -EINVAL; 108 return -EINVAL;
110 109
111 ret = usb_bulk_msg(dev->usb_dev, 110 ret = usb_bulk_msg(dev->usb_dev,
112 usb_sndbulkpipe(dev->usb_dev, DIOLAN_OUT_EP), 111 usb_sndbulkpipe(dev->usb_dev, dev->ep_out),
113 dev->obuffer, dev->olen, &actual, 112 dev->obuffer, dev->olen, &actual,
114 DIOLAN_USB_TIMEOUT); 113 DIOLAN_USB_TIMEOUT);
115 if (!ret) { 114 if (!ret) {
@@ -118,7 +117,7 @@ static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
118 117
119 tmpret = usb_bulk_msg(dev->usb_dev, 118 tmpret = usb_bulk_msg(dev->usb_dev,
120 usb_rcvbulkpipe(dev->usb_dev, 119 usb_rcvbulkpipe(dev->usb_dev,
121 DIOLAN_IN_EP), 120 dev->ep_in),
122 dev->ibuffer, 121 dev->ibuffer,
123 sizeof(dev->ibuffer), &actual, 122 sizeof(dev->ibuffer), &actual,
124 DIOLAN_USB_TIMEOUT); 123 DIOLAN_USB_TIMEOUT);
@@ -210,7 +209,7 @@ static void diolan_flush_input(struct i2c_diolan_u2c *dev)
210 int ret; 209 int ret;
211 210
212 ret = usb_bulk_msg(dev->usb_dev, 211 ret = usb_bulk_msg(dev->usb_dev,
213 usb_rcvbulkpipe(dev->usb_dev, DIOLAN_IN_EP), 212 usb_rcvbulkpipe(dev->usb_dev, dev->ep_in),
214 dev->ibuffer, sizeof(dev->ibuffer), &actual, 213 dev->ibuffer, sizeof(dev->ibuffer), &actual,
215 DIOLAN_USB_TIMEOUT); 214 DIOLAN_USB_TIMEOUT);
216 if (ret < 0 || actual == 0) 215 if (ret < 0 || actual == 0)
@@ -445,9 +444,14 @@ static void diolan_u2c_free(struct i2c_diolan_u2c *dev)
445static int diolan_u2c_probe(struct usb_interface *interface, 444static int diolan_u2c_probe(struct usb_interface *interface,
446 const struct usb_device_id *id) 445 const struct usb_device_id *id)
447{ 446{
447 struct usb_host_interface *hostif = interface->cur_altsetting;
448 struct i2c_diolan_u2c *dev; 448 struct i2c_diolan_u2c *dev;
449 int ret; 449 int ret;
450 450
451 if (hostif->desc.bInterfaceNumber != 0
452 || hostif->desc.bNumEndpoints < 2)
453 return -ENODEV;
454
451 /* allocate memory for our device state and initialize it */ 455 /* allocate memory for our device state and initialize it */
452 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 456 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
453 if (dev == NULL) { 457 if (dev == NULL) {
@@ -455,6 +459,8 @@ static int diolan_u2c_probe(struct usb_interface *interface,
455 ret = -ENOMEM; 459 ret = -ENOMEM;
456 goto error; 460 goto error;
457 } 461 }
462 dev->ep_out = hostif->endpoint[0].desc.bEndpointAddress;
463 dev->ep_in = hostif->endpoint[1].desc.bEndpointAddress;
458 464
459 dev->usb_dev = usb_get_dev(interface_to_usbdev(interface)); 465 dev->usb_dev = usb_get_dev(interface_to_usbdev(interface));
460 dev->interface = interface; 466 dev->interface = interface;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 1d7efa3169cd..d0cfbb4cb964 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -312,7 +312,9 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
312 312
313 dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); 313 dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
314 314
315 clk_prepare_enable(i2c_imx->clk); 315 result = clk_prepare_enable(i2c_imx->clk);
316 if (result)
317 return result;
316 imx_i2c_write_reg(i2c_imx->ifdr, i2c_imx, IMX_I2C_IFDR); 318 imx_i2c_write_reg(i2c_imx->ifdr, i2c_imx, IMX_I2C_IFDR);
317 /* Enable I2C controller */ 319 /* Enable I2C controller */
318 imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR); 320 imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index a6a891d7970d..90dcc2eaac5f 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -266,13 +266,13 @@ static const u8 reg_map_ip_v2[] = {
266static inline void omap_i2c_write_reg(struct omap_i2c_dev *i2c_dev, 266static inline void omap_i2c_write_reg(struct omap_i2c_dev *i2c_dev,
267 int reg, u16 val) 267 int reg, u16 val)
268{ 268{
269 __raw_writew(val, i2c_dev->base + 269 writew_relaxed(val, i2c_dev->base +
270 (i2c_dev->regs[reg] << i2c_dev->reg_shift)); 270 (i2c_dev->regs[reg] << i2c_dev->reg_shift));
271} 271}
272 272
273static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg) 273static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
274{ 274{
275 return __raw_readw(i2c_dev->base + 275 return readw_relaxed(i2c_dev->base +
276 (i2c_dev->regs[reg] << i2c_dev->reg_shift)); 276 (i2c_dev->regs[reg] << i2c_dev->reg_shift));
277} 277}
278 278
@@ -1037,6 +1037,20 @@ static const struct i2c_algorithm omap_i2c_algo = {
1037}; 1037};
1038 1038
1039#ifdef CONFIG_OF 1039#ifdef CONFIG_OF
1040static struct omap_i2c_bus_platform_data omap2420_pdata = {
1041 .rev = OMAP_I2C_IP_VERSION_1,
1042 .flags = OMAP_I2C_FLAG_NO_FIFO |
1043 OMAP_I2C_FLAG_SIMPLE_CLOCK |
1044 OMAP_I2C_FLAG_16BIT_DATA_REG |
1045 OMAP_I2C_FLAG_BUS_SHIFT_2,
1046};
1047
1048static struct omap_i2c_bus_platform_data omap2430_pdata = {
1049 .rev = OMAP_I2C_IP_VERSION_1,
1050 .flags = OMAP_I2C_FLAG_BUS_SHIFT_2 |
1051 OMAP_I2C_FLAG_FORCE_19200_INT_CLK,
1052};
1053
1040static struct omap_i2c_bus_platform_data omap3_pdata = { 1054static struct omap_i2c_bus_platform_data omap3_pdata = {
1041 .rev = OMAP_I2C_IP_VERSION_1, 1055 .rev = OMAP_I2C_IP_VERSION_1,
1042 .flags = OMAP_I2C_FLAG_BUS_SHIFT_2, 1056 .flags = OMAP_I2C_FLAG_BUS_SHIFT_2,
@@ -1055,6 +1069,14 @@ static const struct of_device_id omap_i2c_of_match[] = {
1055 .compatible = "ti,omap3-i2c", 1069 .compatible = "ti,omap3-i2c",
1056 .data = &omap3_pdata, 1070 .data = &omap3_pdata,
1057 }, 1071 },
1072 {
1073 .compatible = "ti,omap2430-i2c",
1074 .data = &omap2430_pdata,
1075 },
1076 {
1077 .compatible = "ti,omap2420-i2c",
1078 .data = &omap2420_pdata,
1079 },
1058 { }, 1080 { },
1059}; 1081};
1060MODULE_DEVICE_TABLE(of, omap_i2c_of_match); 1082MODULE_DEVICE_TABLE(of, omap_i2c_of_match);
@@ -1140,9 +1162,9 @@ omap_i2c_probe(struct platform_device *pdev)
1140 * Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2. 1162 * Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2.
1141 * On omap1/3/2 Offset 4 is IE Reg the bit [15:14] is 0 at reset. 1163 * On omap1/3/2 Offset 4 is IE Reg the bit [15:14] is 0 at reset.
1142 * Also since the omap_i2c_read_reg uses reg_map_ip_* a 1164 * Also since the omap_i2c_read_reg uses reg_map_ip_* a
1143 * raw_readw is done. 1165 * readw_relaxed is done.
1144 */ 1166 */
1145 rev = __raw_readw(dev->base + 0x04); 1167 rev = readw_relaxed(dev->base + 0x04);
1146 1168
1147 dev->scheme = OMAP_I2C_SCHEME(rev); 1169 dev->scheme = OMAP_I2C_SCHEME(rev);
1148 switch (dev->scheme) { 1170 switch (dev->scheme) {
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 797e3117bef7..2d0847b6be62 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -139,6 +139,8 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
139 priv->adap.algo = &priv->algo; 139 priv->adap.algo = &priv->algo;
140 priv->adap.algo_data = priv; 140 priv->adap.algo_data = priv;
141 priv->adap.dev.parent = &parent->dev; 141 priv->adap.dev.parent = &parent->dev;
142 priv->adap.retries = parent->retries;
143 priv->adap.timeout = parent->timeout;
142 144
143 /* Sanity check on class */ 145 /* Sanity check on class */
144 if (i2c_mux_parent_classes(parent) & class) 146 if (i2c_mux_parent_classes(parent) & class)
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index cbd4e9abc47e..797ed29a36ea 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -123,7 +123,7 @@ static struct cpuidle_state *cpuidle_state_table;
123 * which is also the index into the MWAIT hint array. 123 * which is also the index into the MWAIT hint array.
124 * Thus C0 is a dummy. 124 * Thus C0 is a dummy.
125 */ 125 */
126static struct cpuidle_state nehalem_cstates[] __initdata = { 126static struct cpuidle_state nehalem_cstates[] = {
127 { 127 {
128 .name = "C1-NHM", 128 .name = "C1-NHM",
129 .desc = "MWAIT 0x00", 129 .desc = "MWAIT 0x00",
@@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
156 .enter = NULL } 156 .enter = NULL }
157}; 157};
158 158
159static struct cpuidle_state snb_cstates[] __initdata = { 159static struct cpuidle_state snb_cstates[] = {
160 { 160 {
161 .name = "C1-SNB", 161 .name = "C1-SNB",
162 .desc = "MWAIT 0x00", 162 .desc = "MWAIT 0x00",
@@ -196,7 +196,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
196 .enter = NULL } 196 .enter = NULL }
197}; 197};
198 198
199static struct cpuidle_state ivb_cstates[] __initdata = { 199static struct cpuidle_state ivb_cstates[] = {
200 { 200 {
201 .name = "C1-IVB", 201 .name = "C1-IVB",
202 .desc = "MWAIT 0x00", 202 .desc = "MWAIT 0x00",
@@ -236,7 +236,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
236 .enter = NULL } 236 .enter = NULL }
237}; 237};
238 238
239static struct cpuidle_state hsw_cstates[] __initdata = { 239static struct cpuidle_state hsw_cstates[] = {
240 { 240 {
241 .name = "C1-HSW", 241 .name = "C1-HSW",
242 .desc = "MWAIT 0x00", 242 .desc = "MWAIT 0x00",
@@ -297,7 +297,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
297 .enter = NULL } 297 .enter = NULL }
298}; 298};
299 299
300static struct cpuidle_state atom_cstates[] __initdata = { 300static struct cpuidle_state atom_cstates[] = {
301 { 301 {
302 .name = "C1E-ATM", 302 .name = "C1E-ATM",
303 .desc = "MWAIT 0x00", 303 .desc = "MWAIT 0x00",
@@ -329,7 +329,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
329 { 329 {
330 .enter = NULL } 330 .enter = NULL }
331}; 331};
332static struct cpuidle_state avn_cstates[CPUIDLE_STATE_MAX] = { 332static struct cpuidle_state avn_cstates[] = {
333 { 333 {
334 .name = "C1-AVN", 334 .name = "C1-AVN",
335 .desc = "MWAIT 0x00", 335 .desc = "MWAIT 0x00",
@@ -340,10 +340,12 @@ static struct cpuidle_state avn_cstates[CPUIDLE_STATE_MAX] = {
340 { 340 {
341 .name = "C6-AVN", 341 .name = "C6-AVN",
342 .desc = "MWAIT 0x51", 342 .desc = "MWAIT 0x51",
343 .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 343 .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
344 .exit_latency = 15, 344 .exit_latency = 15,
345 .target_residency = 45, 345 .target_residency = 45,
346 .enter = &intel_idle }, 346 .enter = &intel_idle },
347 {
348 .enter = NULL }
347}; 349};
348 350
349/** 351/**
@@ -377,6 +379,9 @@ static int intel_idle(struct cpuidle_device *dev,
377 379
378 if (!current_set_polling_and_test()) { 380 if (!current_set_polling_and_test()) {
379 381
382 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
383 clflush((void *)&current_thread_info()->flags);
384
380 __monitor((void *)&current_thread_info()->flags, 0, 0); 385 __monitor((void *)&current_thread_info()->flags, 0, 0);
381 smp_mb(); 386 smp_mb();
382 if (!need_resched()) 387 if (!need_resched())
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index dcda17395c4e..1cae4e920c9b 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -350,7 +350,7 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
350error_iio_unreg: 350error_iio_unreg:
351 iio_device_unregister(indio_dev); 351 iio_device_unregister(indio_dev);
352error_remove_trigger: 352error_remove_trigger:
353 hid_sensor_remove_trigger(indio_dev); 353 hid_sensor_remove_trigger(&accel_state->common_attributes);
354error_unreg_buffer_funcs: 354error_unreg_buffer_funcs:
355 iio_triggered_buffer_cleanup(indio_dev); 355 iio_triggered_buffer_cleanup(indio_dev);
356error_free_dev_mem: 356error_free_dev_mem:
@@ -363,10 +363,11 @@ static int hid_accel_3d_remove(struct platform_device *pdev)
363{ 363{
364 struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; 364 struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
365 struct iio_dev *indio_dev = platform_get_drvdata(pdev); 365 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
366 struct accel_3d_state *accel_state = iio_priv(indio_dev);
366 367
367 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ACCEL_3D); 368 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ACCEL_3D);
368 iio_device_unregister(indio_dev); 369 iio_device_unregister(indio_dev);
369 hid_sensor_remove_trigger(indio_dev); 370 hid_sensor_remove_trigger(&accel_state->common_attributes);
370 iio_triggered_buffer_cleanup(indio_dev); 371 iio_triggered_buffer_cleanup(indio_dev);
371 kfree(indio_dev->channels); 372 kfree(indio_dev->channels);
372 373
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index d72118d1189c..98ba761cbb9c 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -112,9 +112,10 @@ static int kxsd9_read(struct iio_dev *indio_dev, u8 address)
112 mutex_lock(&st->buf_lock); 112 mutex_lock(&st->buf_lock);
113 st->tx[0] = KXSD9_READ(address); 113 st->tx[0] = KXSD9_READ(address);
114 ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers)); 114 ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
115 if (ret) 115 if (!ret)
116 return ret; 116 ret = (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0);
117 return (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0); 117 mutex_unlock(&st->buf_lock);
118 return ret;
118} 119}
119 120
120static IIO_CONST_ATTR(accel_scale_available, 121static IIO_CONST_ATTR(accel_scale_available,
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index acb7f90359a3..749a6cadab8b 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -200,7 +200,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
200 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), 200 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
201 .address = 1, 201 .address = 1,
202 .scan_index = 1, 202 .scan_index = 1,
203 .scan_type = IIO_ST('u', 12, 16, 0), 203 .scan_type = {
204 .sign = 'u',
205 .realbits = 12,
206 .storagebits = 16,
207 .shift = 0,
208 .endianness = IIO_BE,
209 },
204 }, 210 },
205 .channel[1] = { 211 .channel[1] = {
206 .type = IIO_VOLTAGE, 212 .type = IIO_VOLTAGE,
@@ -210,7 +216,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
210 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), 216 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
211 .address = 0, 217 .address = 0,
212 .scan_index = 0, 218 .scan_index = 0,
213 .scan_type = IIO_ST('u', 12, 16, 0), 219 .scan_type = {
220 .sign = 'u',
221 .realbits = 12,
222 .storagebits = 16,
223 .shift = 0,
224 .endianness = IIO_BE,
225 },
214 }, 226 },
215 .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2), 227 .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
216 .int_vref_mv = 2500, 228 .int_vref_mv = 2500,
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 17df74908db1..5b1aa027c034 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -1047,6 +1047,7 @@ static int at91_adc_probe(struct platform_device *pdev)
1047 } else { 1047 } else {
1048 if (!st->caps->has_tsmr) { 1048 if (!st->caps->has_tsmr) {
1049 dev_err(&pdev->dev, "We don't support non-TSMR adc\n"); 1049 dev_err(&pdev->dev, "We don't support non-TSMR adc\n");
1050 ret = -ENODEV;
1050 goto error_disable_adc_clk; 1051 goto error_disable_adc_clk;
1051 } 1052 }
1052 1053
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index 12948325431c..c8c1baaec6c1 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -88,10 +88,10 @@ static const int mcp3422_sample_rates[4] = {
88 88
89/* sample rates to sign extension table */ 89/* sample rates to sign extension table */
90static const int mcp3422_sign_extend[4] = { 90static const int mcp3422_sign_extend[4] = {
91 [MCP3422_SRATE_240] = 12, 91 [MCP3422_SRATE_240] = 11,
92 [MCP3422_SRATE_60] = 14, 92 [MCP3422_SRATE_60] = 13,
93 [MCP3422_SRATE_15] = 16, 93 [MCP3422_SRATE_15] = 15,
94 [MCP3422_SRATE_3] = 18 }; 94 [MCP3422_SRATE_3] = 17 };
95 95
96/* Client data (each client gets its own) */ 96/* Client data (each client gets its own) */
97struct mcp3422 { 97struct mcp3422 {
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 728411ec7642..d4d748214e4b 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -229,12 +229,15 @@ static int tiadc_iio_buffered_hardware_setup(struct iio_dev *indio_dev,
229 unsigned long flags, 229 unsigned long flags,
230 const struct iio_buffer_setup_ops *setup_ops) 230 const struct iio_buffer_setup_ops *setup_ops)
231{ 231{
232 struct iio_buffer *buffer;
232 int ret; 233 int ret;
233 234
234 indio_dev->buffer = iio_kfifo_allocate(indio_dev); 235 buffer = iio_kfifo_allocate(indio_dev);
235 if (!indio_dev->buffer) 236 if (!buffer)
236 return -ENOMEM; 237 return -ENOMEM;
237 238
239 iio_device_attach_buffer(indio_dev, buffer);
240
238 ret = request_threaded_irq(irq, pollfunc_th, pollfunc_bh, 241 ret = request_threaded_irq(irq, pollfunc_th, pollfunc_bh,
239 flags, indio_dev->name, indio_dev); 242 flags, indio_dev->name, indio_dev);
240 if (ret) 243 if (ret)
diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig
index 1178121b55b0..39188b72cd3b 100644
--- a/drivers/iio/common/hid-sensors/Kconfig
+++ b/drivers/iio/common/hid-sensors/Kconfig
@@ -25,13 +25,4 @@ config HID_SENSOR_IIO_TRIGGER
25 If this driver is compiled as a module, it will be named 25 If this driver is compiled as a module, it will be named
26 hid-sensor-trigger. 26 hid-sensor-trigger.
27 27
28config HID_SENSOR_ENUM_BASE_QUIRKS
29 bool "ENUM base quirks for HID Sensor IIO drivers"
30 depends on HID_SENSOR_IIO_COMMON
31 help
32 Say yes here to build support for sensor hub FW using
33 enumeration, which is using 1 as base instead of 0.
34 Since logical minimum is still set 0 instead of 1,
35 there is no easy way to differentiate.
36
37endmenu 28endmenu
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index b6e77e0fc420..7dcf83998e6f 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -33,33 +33,42 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
33{ 33{
34 struct hid_sensor_common *st = iio_trigger_get_drvdata(trig); 34 struct hid_sensor_common *st = iio_trigger_get_drvdata(trig);
35 int state_val; 35 int state_val;
36 int report_val;
36 37
37 if (state) { 38 if (state) {
38 if (sensor_hub_device_open(st->hsdev)) 39 if (sensor_hub_device_open(st->hsdev))
39 return -EIO; 40 return -EIO;
40 } else 41 state_val =
42 HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM;
43 report_val =
44 HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM;
45
46 } else {
41 sensor_hub_device_close(st->hsdev); 47 sensor_hub_device_close(st->hsdev);
48 state_val =
49 HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM;
50 report_val =
51 HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM;
52 }
42 53
43 state_val = state ? 1 : 0;
44 if (IS_ENABLED(CONFIG_HID_SENSOR_ENUM_BASE_QUIRKS))
45 ++state_val;
46 st->data_ready = state; 54 st->data_ready = state;
55 state_val += st->power_state.logical_minimum;
56 report_val += st->report_state.logical_minimum;
47 sensor_hub_set_feature(st->hsdev, st->power_state.report_id, 57 sensor_hub_set_feature(st->hsdev, st->power_state.report_id,
48 st->power_state.index, 58 st->power_state.index,
49 (s32)state_val); 59 (s32)state_val);
50 60
51 sensor_hub_set_feature(st->hsdev, st->report_state.report_id, 61 sensor_hub_set_feature(st->hsdev, st->report_state.report_id,
52 st->report_state.index, 62 st->report_state.index,
53 (s32)state_val); 63 (s32)report_val);
54 64
55 return 0; 65 return 0;
56} 66}
57 67
58void hid_sensor_remove_trigger(struct iio_dev *indio_dev) 68void hid_sensor_remove_trigger(struct hid_sensor_common *attrb)
59{ 69{
60 iio_trigger_unregister(indio_dev->trig); 70 iio_trigger_unregister(attrb->trigger);
61 iio_trigger_free(indio_dev->trig); 71 iio_trigger_free(attrb->trigger);
62 indio_dev->trig = NULL;
63} 72}
64EXPORT_SYMBOL(hid_sensor_remove_trigger); 73EXPORT_SYMBOL(hid_sensor_remove_trigger);
65 74
@@ -90,7 +99,7 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
90 dev_err(&indio_dev->dev, "Trigger Register Failed\n"); 99 dev_err(&indio_dev->dev, "Trigger Register Failed\n");
91 goto error_free_trig; 100 goto error_free_trig;
92 } 101 }
93 indio_dev->trig = trig; 102 indio_dev->trig = attrb->trigger = trig;
94 103
95 return ret; 104 return ret;
96 105
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
index 9a8731478eda..ca02f7811aa8 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
@@ -21,6 +21,6 @@
21 21
22int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, 22int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
23 struct hid_sensor_common *attrb); 23 struct hid_sensor_common *attrb);
24void hid_sensor_remove_trigger(struct iio_dev *indio_dev); 24void hid_sensor_remove_trigger(struct hid_sensor_common *attrb);
25 25
26#endif 26#endif
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index ea01c6bcfb56..e54f0f4959d3 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -348,7 +348,7 @@ static int hid_gyro_3d_probe(struct platform_device *pdev)
348error_iio_unreg: 348error_iio_unreg:
349 iio_device_unregister(indio_dev); 349 iio_device_unregister(indio_dev);
350error_remove_trigger: 350error_remove_trigger:
351 hid_sensor_remove_trigger(indio_dev); 351 hid_sensor_remove_trigger(&gyro_state->common_attributes);
352error_unreg_buffer_funcs: 352error_unreg_buffer_funcs:
353 iio_triggered_buffer_cleanup(indio_dev); 353 iio_triggered_buffer_cleanup(indio_dev);
354error_free_dev_mem: 354error_free_dev_mem:
@@ -361,10 +361,11 @@ static int hid_gyro_3d_remove(struct platform_device *pdev)
361{ 361{
362 struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; 362 struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
363 struct iio_dev *indio_dev = platform_get_drvdata(pdev); 363 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
364 struct gyro_3d_state *gyro_state = iio_priv(indio_dev);
364 365
365 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_GYRO_3D); 366 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_GYRO_3D);
366 iio_device_unregister(indio_dev); 367 iio_device_unregister(indio_dev);
367 hid_sensor_remove_trigger(indio_dev); 368 hid_sensor_remove_trigger(&gyro_state->common_attributes);
368 iio_triggered_buffer_cleanup(indio_dev); 369 iio_triggered_buffer_cleanup(indio_dev);
369 kfree(indio_dev->channels); 370 kfree(indio_dev->channels);
370 371
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 3fb7757a1028..368660dfe135 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -651,7 +651,12 @@ static const struct iio_chan_spec adis16448_channels[] = {
651 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), 651 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
652 .address = ADIS16448_BARO_OUT, 652 .address = ADIS16448_BARO_OUT,
653 .scan_index = ADIS16400_SCAN_BARO, 653 .scan_index = ADIS16400_SCAN_BARO,
654 .scan_type = IIO_ST('s', 16, 16, 0), 654 .scan_type = {
655 .sign = 's',
656 .realbits = 16,
657 .storagebits = 16,
658 .endianness = IIO_BE,
659 },
655 }, 660 },
656 ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12), 661 ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
657 IIO_CHAN_SOFT_TIMESTAMP(11) 662 IIO_CHAN_SOFT_TIMESTAMP(11)
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index f98c2b509254..a022f27c6690 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -43,6 +43,7 @@ config GP2AP020A00F
43 depends on I2C 43 depends on I2C
44 select IIO_BUFFER 44 select IIO_BUFFER
45 select IIO_TRIGGERED_BUFFER 45 select IIO_TRIGGERED_BUFFER
46 select IRQ_WORK
46 help 47 help
47 Say Y here if you have a Sharp GP2AP020A00F proximity/ALS combo-chip 48 Say Y here if you have a Sharp GP2AP020A00F proximity/ALS combo-chip
48 hooked to an I2C bus. 49 hooked to an I2C bus.
@@ -81,6 +82,8 @@ config SENSORS_LM3533
81config TCS3472 82config TCS3472
82 tristate "TAOS TCS3472 color light-to-digital converter" 83 tristate "TAOS TCS3472 color light-to-digital converter"
83 depends on I2C 84 depends on I2C
85 select IIO_BUFFER
86 select IIO_TRIGGERED_BUFFER
84 help 87 help
85 If you say yes here you get support for the TAOS TCS3472 88 If you say yes here you get support for the TAOS TCS3472
86 family of color light-to-digital converters with IR filter. 89 family of color light-to-digital converters with IR filter.
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 21df57130018..0922e39b0ea9 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -387,7 +387,7 @@ static int cm36651_read_int_time(struct cm36651_data *cm36651,
387 return -EINVAL; 387 return -EINVAL;
388 } 388 }
389 389
390 return IIO_VAL_INT_PLUS_MICRO; 390 return IIO_VAL_INT;
391} 391}
392 392
393static int cm36651_write_int_time(struct cm36651_data *cm36651, 393static int cm36651_write_int_time(struct cm36651_data *cm36651,
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index fa6ae8cf89ea..8e8b9d722853 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -314,7 +314,7 @@ static int hid_als_probe(struct platform_device *pdev)
314error_iio_unreg: 314error_iio_unreg:
315 iio_device_unregister(indio_dev); 315 iio_device_unregister(indio_dev);
316error_remove_trigger: 316error_remove_trigger:
317 hid_sensor_remove_trigger(indio_dev); 317 hid_sensor_remove_trigger(&als_state->common_attributes);
318error_unreg_buffer_funcs: 318error_unreg_buffer_funcs:
319 iio_triggered_buffer_cleanup(indio_dev); 319 iio_triggered_buffer_cleanup(indio_dev);
320error_free_dev_mem: 320error_free_dev_mem:
@@ -327,10 +327,11 @@ static int hid_als_remove(struct platform_device *pdev)
327{ 327{
328 struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; 328 struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
329 struct iio_dev *indio_dev = platform_get_drvdata(pdev); 329 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
330 struct als_state *als_state = iio_priv(indio_dev);
330 331
331 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ALS); 332 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ALS);
332 iio_device_unregister(indio_dev); 333 iio_device_unregister(indio_dev);
333 hid_sensor_remove_trigger(indio_dev); 334 hid_sensor_remove_trigger(&als_state->common_attributes);
334 iio_triggered_buffer_cleanup(indio_dev); 335 iio_triggered_buffer_cleanup(indio_dev);
335 kfree(indio_dev->channels); 336 kfree(indio_dev->channels);
336 337
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 0cf09637b35b..d86d226dcd67 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -19,6 +19,8 @@ config AK8975
19config MAG3110 19config MAG3110
20 tristate "Freescale MAG3110 3-Axis Magnetometer" 20 tristate "Freescale MAG3110 3-Axis Magnetometer"
21 depends on I2C 21 depends on I2C
22 select IIO_BUFFER
23 select IIO_TRIGGERED_BUFFER
22 help 24 help
23 Say yes here to build support for the Freescale MAG3110 3-Axis 25 Say yes here to build support for the Freescale MAG3110 3-Axis
24 magnetometer. 26 magnetometer.
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index 2634920562fb..b26e1028a0a0 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -351,7 +351,7 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
351error_iio_unreg: 351error_iio_unreg:
352 iio_device_unregister(indio_dev); 352 iio_device_unregister(indio_dev);
353error_remove_trigger: 353error_remove_trigger:
354 hid_sensor_remove_trigger(indio_dev); 354 hid_sensor_remove_trigger(&magn_state->common_attributes);
355error_unreg_buffer_funcs: 355error_unreg_buffer_funcs:
356 iio_triggered_buffer_cleanup(indio_dev); 356 iio_triggered_buffer_cleanup(indio_dev);
357error_free_dev_mem: 357error_free_dev_mem:
@@ -364,10 +364,11 @@ static int hid_magn_3d_remove(struct platform_device *pdev)
364{ 364{
365 struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; 365 struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
366 struct iio_dev *indio_dev = platform_get_drvdata(pdev); 366 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
367 struct magn_3d_state *magn_state = iio_priv(indio_dev);
367 368
368 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D); 369 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D);
369 iio_device_unregister(indio_dev); 370 iio_device_unregister(indio_dev);
370 hid_sensor_remove_trigger(indio_dev); 371 hid_sensor_remove_trigger(&magn_state->common_attributes);
371 iio_triggered_buffer_cleanup(indio_dev); 372 iio_triggered_buffer_cleanup(indio_dev);
372 kfree(indio_dev->channels); 373 kfree(indio_dev->channels);
373 374
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index 783c5b417356..becf54496967 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -250,7 +250,12 @@ done:
250 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \ 250 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
251 BIT(IIO_CHAN_INFO_SCALE), \ 251 BIT(IIO_CHAN_INFO_SCALE), \
252 .scan_index = idx, \ 252 .scan_index = idx, \
253 .scan_type = IIO_ST('s', 16, 16, IIO_BE), \ 253 .scan_type = { \
254 .sign = 's', \
255 .realbits = 16, \
256 .storagebits = 16, \
257 .endianness = IIO_BE, \
258 }, \
254} 259}
255 260
256static const struct iio_chan_spec mag3110_channels[] = { 261static const struct iio_chan_spec mag3110_channels[] = {
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index c47c2034ca71..0717940ec3b5 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -181,9 +181,16 @@ static void add_ref(struct iw_cm_id *cm_id)
181static void rem_ref(struct iw_cm_id *cm_id) 181static void rem_ref(struct iw_cm_id *cm_id)
182{ 182{
183 struct iwcm_id_private *cm_id_priv; 183 struct iwcm_id_private *cm_id_priv;
184 int cb_destroy;
185
184 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 186 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
185 if (iwcm_deref_id(cm_id_priv) && 187
186 test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) { 188 /*
189 * Test bit before deref in case the cm_id gets freed on another
190 * thread.
191 */
192 cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
193 if (iwcm_deref_id(cm_id_priv) && cb_destroy) {
187 BUG_ON(!list_empty(&cm_id_priv->work_list)); 194 BUG_ON(!list_empty(&cm_id_priv->work_list));
188 free_cm_id(cm_id_priv); 195 free_cm_id(cm_id_priv);
189 } 196 }
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index bdc842e9faef..a283274a5a09 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -49,12 +49,20 @@
49 49
50#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 50#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
51 do { \ 51 do { \
52 (udata)->inbuf = (void __user *) (ibuf); \ 52 (udata)->inbuf = (const void __user *) (ibuf); \
53 (udata)->outbuf = (void __user *) (obuf); \ 53 (udata)->outbuf = (void __user *) (obuf); \
54 (udata)->inlen = (ilen); \ 54 (udata)->inlen = (ilen); \
55 (udata)->outlen = (olen); \ 55 (udata)->outlen = (olen); \
56 } while (0) 56 } while (0)
57 57
58#define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \
59 do { \
60 (udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \
61 (udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \
62 (udata)->inlen = (ilen); \
63 (udata)->outlen = (olen); \
64 } while (0)
65
58/* 66/*
59 * Our lifetime rules for these structs are the following: 67 * Our lifetime rules for these structs are the following:
60 * 68 *
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 65f6e7dc380c..f1cc83855af6 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2593,6 +2593,9 @@ out_put:
2593static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 2593static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
2594 union ib_flow_spec *ib_spec) 2594 union ib_flow_spec *ib_spec)
2595{ 2595{
2596 if (kern_spec->reserved)
2597 return -EINVAL;
2598
2596 ib_spec->type = kern_spec->type; 2599 ib_spec->type = kern_spec->type;
2597 2600
2598 switch (ib_spec->type) { 2601 switch (ib_spec->type) {
@@ -2646,6 +2649,9 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2646 void *ib_spec; 2649 void *ib_spec;
2647 int i; 2650 int i;
2648 2651
2652 if (ucore->inlen < sizeof(cmd))
2653 return -EINVAL;
2654
2649 if (ucore->outlen < sizeof(resp)) 2655 if (ucore->outlen < sizeof(resp))
2650 return -ENOSPC; 2656 return -ENOSPC;
2651 2657
@@ -2671,6 +2677,10 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2671 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 2677 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
2672 return -EINVAL; 2678 return -EINVAL;
2673 2679
2680 if (cmd.flow_attr.reserved[0] ||
2681 cmd.flow_attr.reserved[1])
2682 return -EINVAL;
2683
2674 if (cmd.flow_attr.num_of_specs) { 2684 if (cmd.flow_attr.num_of_specs) {
2675 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 2685 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
2676 GFP_KERNEL); 2686 GFP_KERNEL);
@@ -2731,6 +2741,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2731 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 2741 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
2732 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 2742 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
2733 i, cmd.flow_attr.size); 2743 i, cmd.flow_attr.size);
2744 err = -EINVAL;
2734 goto err_free; 2745 goto err_free;
2735 } 2746 }
2736 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 2747 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
@@ -2791,10 +2802,16 @@ int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
2791 struct ib_uobject *uobj; 2802 struct ib_uobject *uobj;
2792 int ret; 2803 int ret;
2793 2804
2805 if (ucore->inlen < sizeof(cmd))
2806 return -EINVAL;
2807
2794 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 2808 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2795 if (ret) 2809 if (ret)
2796 return ret; 2810 return ret;
2797 2811
2812 if (cmd.comp_mask)
2813 return -EINVAL;
2814
2798 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, 2815 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
2799 file->ucontext); 2816 file->ucontext);
2800 if (!uobj) 2817 if (!uobj)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 34386943ebcf..08219fb3338b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -668,25 +668,30 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
668 if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count) 668 if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count)
669 return -EINVAL; 669 return -EINVAL;
670 670
671 if (ex_hdr.cmd_hdr_reserved)
672 return -EINVAL;
673
671 if (ex_hdr.response) { 674 if (ex_hdr.response) {
672 if (!hdr.out_words && !ex_hdr.provider_out_words) 675 if (!hdr.out_words && !ex_hdr.provider_out_words)
673 return -EINVAL; 676 return -EINVAL;
677
678 if (!access_ok(VERIFY_WRITE,
679 (void __user *) (unsigned long) ex_hdr.response,
680 (hdr.out_words + ex_hdr.provider_out_words) * 8))
681 return -EFAULT;
674 } else { 682 } else {
675 if (hdr.out_words || ex_hdr.provider_out_words) 683 if (hdr.out_words || ex_hdr.provider_out_words)
676 return -EINVAL; 684 return -EINVAL;
677 } 685 }
678 686
679 INIT_UDATA(&ucore, 687 INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response,
680 (hdr.in_words) ? buf : 0, 688 hdr.in_words * 8, hdr.out_words * 8);
681 (unsigned long)ex_hdr.response, 689
682 hdr.in_words * 8, 690 INIT_UDATA_BUF_OR_NULL(&uhw,
683 hdr.out_words * 8); 691 buf + ucore.inlen,
684 692 (unsigned long) ex_hdr.response + ucore.outlen,
685 INIT_UDATA(&uhw, 693 ex_hdr.provider_in_words * 8,
686 (ex_hdr.provider_in_words) ? buf + ucore.inlen : 0, 694 ex_hdr.provider_out_words * 8);
687 (ex_hdr.provider_out_words) ? (unsigned long)ex_hdr.response + ucore.outlen : 0,
688 ex_hdr.provider_in_words * 8,
689 ex_hdr.provider_out_words * 8);
690 695
691 err = uverbs_ex_cmd_table[command](file, 696 err = uverbs_ex_cmd_table[command](file,
692 &ucore, 697 &ucore,
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 12fef76c791c..45126879ad28 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -524,50 +524,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
524 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 524 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
525} 525}
526 526
527#define VLAN_NONE 0xfff
528#define FILTER_SEL_VLAN_NONE 0xffff
529#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
530#define FILTER_SEL_WIDTH_VIN_P_FC \
531 (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
532#define FILTER_SEL_WIDTH_TAG_P_FC \
533 (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
534#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
535
536static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
537 struct l2t_entry *l2t)
538{
539 unsigned int ntuple = 0;
540 u32 viid;
541
542 switch (dev->rdev.lldi.filt_mode) {
543
544 /* default filter mode */
545 case HW_TPL_FR_MT_PR_IV_P_FC:
546 if (l2t->vlan == VLAN_NONE)
547 ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
548 else {
549 ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
550 ntuple |= 1 << FILTER_SEL_WIDTH_TAG_P_FC;
551 }
552 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
553 FILTER_SEL_WIDTH_VLD_TAG_P_FC;
554 break;
555 case HW_TPL_FR_MT_PR_OV_P_FC: {
556 viid = cxgb4_port_viid(l2t->neigh->dev);
557
558 ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
559 ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
560 ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
561 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
562 FILTER_SEL_WIDTH_VLD_TAG_P_FC;
563 break;
564 }
565 default:
566 break;
567 }
568 return ntuple;
569}
570
571static int send_connect(struct c4iw_ep *ep) 527static int send_connect(struct c4iw_ep *ep)
572{ 528{
573 struct cpl_act_open_req *req; 529 struct cpl_act_open_req *req;
@@ -641,8 +597,9 @@ static int send_connect(struct c4iw_ep *ep)
641 req->local_ip = la->sin_addr.s_addr; 597 req->local_ip = la->sin_addr.s_addr;
642 req->peer_ip = ra->sin_addr.s_addr; 598 req->peer_ip = ra->sin_addr.s_addr;
643 req->opt0 = cpu_to_be64(opt0); 599 req->opt0 = cpu_to_be64(opt0);
644 req->params = cpu_to_be32(select_ntuple(ep->com.dev, 600 req->params = cpu_to_be32(cxgb4_select_ntuple(
645 ep->dst, ep->l2t)); 601 ep->com.dev->rdev.lldi.ports[0],
602 ep->l2t));
646 req->opt2 = cpu_to_be32(opt2); 603 req->opt2 = cpu_to_be32(opt2);
647 } else { 604 } else {
648 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); 605 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
@@ -662,9 +619,9 @@ static int send_connect(struct c4iw_ep *ep)
662 req6->peer_ip_lo = *((__be64 *) 619 req6->peer_ip_lo = *((__be64 *)
663 (ra6->sin6_addr.s6_addr + 8)); 620 (ra6->sin6_addr.s6_addr + 8));
664 req6->opt0 = cpu_to_be64(opt0); 621 req6->opt0 = cpu_to_be64(opt0);
665 req6->params = cpu_to_be32( 622 req6->params = cpu_to_be32(cxgb4_select_ntuple(
666 select_ntuple(ep->com.dev, ep->dst, 623 ep->com.dev->rdev.lldi.ports[0],
667 ep->l2t)); 624 ep->l2t));
668 req6->opt2 = cpu_to_be32(opt2); 625 req6->opt2 = cpu_to_be32(opt2);
669 } 626 }
670 } else { 627 } else {
@@ -681,8 +638,9 @@ static int send_connect(struct c4iw_ep *ep)
681 t5_req->peer_ip = ra->sin_addr.s_addr; 638 t5_req->peer_ip = ra->sin_addr.s_addr;
682 t5_req->opt0 = cpu_to_be64(opt0); 639 t5_req->opt0 = cpu_to_be64(opt0);
683 t5_req->params = cpu_to_be64(V_FILTER_TUPLE( 640 t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
684 select_ntuple(ep->com.dev, 641 cxgb4_select_ntuple(
685 ep->dst, ep->l2t))); 642 ep->com.dev->rdev.lldi.ports[0],
643 ep->l2t)));
686 t5_req->opt2 = cpu_to_be32(opt2); 644 t5_req->opt2 = cpu_to_be32(opt2);
687 } else { 645 } else {
688 t5_req6 = (struct cpl_t5_act_open_req6 *) 646 t5_req6 = (struct cpl_t5_act_open_req6 *)
@@ -703,7 +661,9 @@ static int send_connect(struct c4iw_ep *ep)
703 (ra6->sin6_addr.s6_addr + 8)); 661 (ra6->sin6_addr.s6_addr + 8));
704 t5_req6->opt0 = cpu_to_be64(opt0); 662 t5_req6->opt0 = cpu_to_be64(opt0);
705 t5_req6->params = (__force __be64)cpu_to_be32( 663 t5_req6->params = (__force __be64)cpu_to_be32(
706 select_ntuple(ep->com.dev, ep->dst, ep->l2t)); 664 cxgb4_select_ntuple(
665 ep->com.dev->rdev.lldi.ports[0],
666 ep->l2t));
707 t5_req6->opt2 = cpu_to_be32(opt2); 667 t5_req6->opt2 = cpu_to_be32(opt2);
708 } 668 }
709 } 669 }
@@ -1630,7 +1590,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1630 memset(req, 0, sizeof(*req)); 1590 memset(req, 0, sizeof(*req));
1631 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); 1591 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
1632 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 1592 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
1633 req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, 1593 req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1594 ep->com.dev->rdev.lldi.ports[0],
1634 ep->l2t)); 1595 ep->l2t));
1635 sin = (struct sockaddr_in *)&ep->com.local_addr; 1596 sin = (struct sockaddr_in *)&ep->com.local_addr;
1636 req->le.lport = sin->sin_port; 1597 req->le.lport = sin->sin_port;
@@ -2938,7 +2899,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2938 /* 2899 /*
2939 * Allocate a server TID. 2900 * Allocate a server TID.
2940 */ 2901 */
2941 if (dev->rdev.lldi.enable_fw_ofld_conn) 2902 if (dev->rdev.lldi.enable_fw_ofld_conn &&
2903 ep->com.local_addr.ss_family == AF_INET)
2942 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, 2904 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
2943 cm_id->local_addr.ss_family, ep); 2905 cm_id->local_addr.ss_family, ep);
2944 else 2906 else
@@ -3323,9 +3285,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3323 /* 3285 /*
3324 * Calculate the server tid from filter hit index from cpl_rx_pkt. 3286 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3325 */ 3287 */
3326 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val) 3288 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
3327 - dev->rdev.lldi.tids->sftid_base
3328 + dev->rdev.lldi.tids->nstids;
3329 3289
3330 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); 3290 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
3331 if (!lep) { 3291 if (!lep) {
@@ -3397,7 +3357,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3397 window = (__force u16) htons((__force u16)tcph->window); 3357 window = (__force u16) htons((__force u16)tcph->window);
3398 3358
3399 /* Calcuate filter portion for LE region. */ 3359 /* Calcuate filter portion for LE region. */
3400 filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e)); 3360 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
3361 dev->rdev.lldi.ports[0],
3362 e));
3401 3363
3402 /* 3364 /*
3403 * Synthesize the cpl_pass_accept_req. We have everything except the 3365 * Synthesize the cpl_pass_accept_req. We have everything except the
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 4cb8eb24497c..84e45006451c 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -173,7 +173,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
173 return ret; 173 return ret;
174} 174}
175 175
176int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) 176static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
177{ 177{
178 u32 remain = len; 178 u32 remain = len;
179 u32 dmalen; 179 u32 dmalen;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index c29b5c838833..cdc7df4fdb8a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -31,6 +31,7 @@
31 */ 31 */
32 32
33#include <linux/netdevice.h> 33#include <linux/netdevice.h>
34#include <linux/if_arp.h> /* For ARPHRD_xxx */
34#include <linux/module.h> 35#include <linux/module.h>
35#include <net/rtnetlink.h> 36#include <net/rtnetlink.h>
36#include "ipoib.h" 37#include "ipoib.h"
@@ -103,7 +104,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
103 return -EINVAL; 104 return -EINVAL;
104 105
105 pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); 106 pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
106 if (!pdev) 107 if (!pdev || pdev->type != ARPHRD_INFINIBAND)
107 return -ENODEV; 108 return -ENODEV;
108 109
109 ppriv = netdev_priv(pdev); 110 ppriv = netdev_priv(pdev);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 6be57c38638d..9804fca6bf06 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -207,7 +207,9 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
207 isert_conn->conn_rx_descs = NULL; 207 isert_conn->conn_rx_descs = NULL;
208} 208}
209 209
210static void isert_cq_tx_work(struct work_struct *);
210static void isert_cq_tx_callback(struct ib_cq *, void *); 211static void isert_cq_tx_callback(struct ib_cq *, void *);
212static void isert_cq_rx_work(struct work_struct *);
211static void isert_cq_rx_callback(struct ib_cq *, void *); 213static void isert_cq_rx_callback(struct ib_cq *, void *);
212 214
213static int 215static int
@@ -259,26 +261,36 @@ isert_create_device_ib_res(struct isert_device *device)
259 cq_desc[i].device = device; 261 cq_desc[i].device = device;
260 cq_desc[i].cq_index = i; 262 cq_desc[i].cq_index = i;
261 263
264 INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
262 device->dev_rx_cq[i] = ib_create_cq(device->ib_device, 265 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
263 isert_cq_rx_callback, 266 isert_cq_rx_callback,
264 isert_cq_event_callback, 267 isert_cq_event_callback,
265 (void *)&cq_desc[i], 268 (void *)&cq_desc[i],
266 ISER_MAX_RX_CQ_LEN, i); 269 ISER_MAX_RX_CQ_LEN, i);
267 if (IS_ERR(device->dev_rx_cq[i])) 270 if (IS_ERR(device->dev_rx_cq[i])) {
271 ret = PTR_ERR(device->dev_rx_cq[i]);
272 device->dev_rx_cq[i] = NULL;
268 goto out_cq; 273 goto out_cq;
274 }
269 275
276 INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
270 device->dev_tx_cq[i] = ib_create_cq(device->ib_device, 277 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
271 isert_cq_tx_callback, 278 isert_cq_tx_callback,
272 isert_cq_event_callback, 279 isert_cq_event_callback,
273 (void *)&cq_desc[i], 280 (void *)&cq_desc[i],
274 ISER_MAX_TX_CQ_LEN, i); 281 ISER_MAX_TX_CQ_LEN, i);
275 if (IS_ERR(device->dev_tx_cq[i])) 282 if (IS_ERR(device->dev_tx_cq[i])) {
283 ret = PTR_ERR(device->dev_tx_cq[i]);
284 device->dev_tx_cq[i] = NULL;
276 goto out_cq; 285 goto out_cq;
286 }
277 287
278 if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP)) 288 ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
289 if (ret)
279 goto out_cq; 290 goto out_cq;
280 291
281 if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP)) 292 ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
293 if (ret)
282 goto out_cq; 294 goto out_cq;
283 } 295 }
284 296
@@ -1724,7 +1736,6 @@ isert_cq_tx_callback(struct ib_cq *cq, void *context)
1724{ 1736{
1725 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; 1737 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1726 1738
1727 INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
1728 queue_work(isert_comp_wq, &cq_desc->cq_tx_work); 1739 queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1729} 1740}
1730 1741
@@ -1768,7 +1779,6 @@ isert_cq_rx_callback(struct ib_cq *cq, void *context)
1768{ 1779{
1769 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; 1780 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1770 1781
1771 INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
1772 queue_work(isert_rx_wq, &cq_desc->cq_rx_work); 1782 queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1773} 1783}
1774 1784
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 846ccdd905b1..d2965e4b3224 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1871,6 +1871,10 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
1871 break; 1871 break;
1872 1872
1873 case EV_ABS: 1873 case EV_ABS:
1874 input_alloc_absinfo(dev);
1875 if (!dev->absinfo)
1876 return;
1877
1874 __set_bit(code, dev->absbit); 1878 __set_bit(code, dev->absbit);
1875 break; 1879 break;
1876 1880
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index dbd2047f1641..3ed23513d881 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -536,7 +536,8 @@ static int adp5588_probe(struct i2c_client *client,
536 __set_bit(EV_REP, input->evbit); 536 __set_bit(EV_REP, input->evbit);
537 537
538 for (i = 0; i < input->keycodemax; i++) 538 for (i = 0; i < input->keycodemax; i++)
539 __set_bit(kpad->keycode[i] & KEY_MAX, input->keybit); 539 if (kpad->keycode[i] <= KEY_MAX)
540 __set_bit(kpad->keycode[i], input->keybit);
540 __clear_bit(KEY_RESERVED, input->keybit); 541 __clear_bit(KEY_RESERVED, input->keybit);
541 542
542 if (kpad->gpimapsize) 543 if (kpad->gpimapsize)
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 67d12b3427c9..60dafd4fa692 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -992,7 +992,8 @@ static int adp5589_probe(struct i2c_client *client,
992 __set_bit(EV_REP, input->evbit); 992 __set_bit(EV_REP, input->evbit);
993 993
994 for (i = 0; i < input->keycodemax; i++) 994 for (i = 0; i < input->keycodemax; i++)
995 __set_bit(kpad->keycode[i] & KEY_MAX, input->keybit); 995 if (kpad->keycode[i] <= KEY_MAX)
996 __set_bit(kpad->keycode[i], input->keybit);
996 __clear_bit(KEY_RESERVED, input->keybit); 997 __clear_bit(KEY_RESERVED, input->keybit);
997 998
998 if (kpad->gpimapsize) 999 if (kpad->gpimapsize)
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index fc88fb48d70d..09b91d093087 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -289,7 +289,8 @@ static int bfin_kpad_probe(struct platform_device *pdev)
289 __set_bit(EV_REP, input->evbit); 289 __set_bit(EV_REP, input->evbit);
290 290
291 for (i = 0; i < input->keycodemax; i++) 291 for (i = 0; i < input->keycodemax; i++)
292 __set_bit(bf54x_kpad->keycode[i] & KEY_MAX, input->keybit); 292 if (bf54x_kpad->keycode[i] <= KEY_MAX)
293 __set_bit(bf54x_kpad->keycode[i], input->keybit);
293 __clear_bit(KEY_RESERVED, input->keybit); 294 __clear_bit(KEY_RESERVED, input->keybit);
294 295
295 error = input_register_device(input); 296 error = input_register_device(input);
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 0735de3a6468..1cb1da294419 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -158,7 +158,7 @@
158 158
159/* ORIENT ADXL346 only */ 159/* ORIENT ADXL346 only */
160#define ADXL346_2D_VALID (1 << 6) 160#define ADXL346_2D_VALID (1 << 6)
161#define ADXL346_2D_ORIENT(x) (((x) & 0x3) >> 4) 161#define ADXL346_2D_ORIENT(x) (((x) & 0x30) >> 4)
162#define ADXL346_3D_VALID (1 << 3) 162#define ADXL346_3D_VALID (1 << 3)
163#define ADXL346_3D_ORIENT(x) ((x) & 0x7) 163#define ADXL346_3D_ORIENT(x) ((x) & 0x7)
164#define ADXL346_2D_PORTRAIT_POS 0 /* +X */ 164#define ADXL346_2D_PORTRAIT_POS 0 /* +X */
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index 86b822806e95..45e0e3e55de2 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -180,7 +180,10 @@ static int64_t hp_sdc_rtc_read_i8042timer (uint8_t loadcmd, int numreg)
180 if (WARN_ON(down_interruptible(&i8042tregs))) 180 if (WARN_ON(down_interruptible(&i8042tregs)))
181 return -1; 181 return -1;
182 182
183 if (hp_sdc_enqueue_transaction(&t)) return -1; 183 if (hp_sdc_enqueue_transaction(&t)) {
184 up(&i8042tregs);
185 return -1;
186 }
184 187
185 /* Sleep until results come back. */ 188 /* Sleep until results come back. */
186 if (WARN_ON(down_interruptible(&i8042tregs))) 189 if (WARN_ON(down_interruptible(&i8042tregs)))
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index e37392976fdd..0deca5a3c87f 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -113,9 +113,12 @@ static int pcf8574_kp_probe(struct i2c_client *client, const struct i2c_device_i
113 idev->keycodemax = ARRAY_SIZE(lp->btncode); 113 idev->keycodemax = ARRAY_SIZE(lp->btncode);
114 114
115 for (i = 0; i < ARRAY_SIZE(pcf8574_kp_btncode); i++) { 115 for (i = 0; i < ARRAY_SIZE(pcf8574_kp_btncode); i++) {
116 lp->btncode[i] = pcf8574_kp_btncode[i]; 116 if (lp->btncode[i] <= KEY_MAX) {
117 __set_bit(lp->btncode[i] & KEY_MAX, idev->keybit); 117 lp->btncode[i] = pcf8574_kp_btncode[i];
118 __set_bit(lp->btncode[i], idev->keybit);
119 }
118 } 120 }
121 __clear_bit(KEY_RESERVED, idev->keybit);
119 122
120 sprintf(lp->name, DRV_NAME); 123 sprintf(lp->name, DRV_NAME);
121 sprintf(lp->phys, "kp_data/input0"); 124 sprintf(lp->phys, "kp_data/input0");
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index ca7a26f1dce8..5cf62e315218 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -70,6 +70,25 @@ static const struct alps_nibble_commands alps_v4_nibble_commands[] = {
70 { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */ 70 { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */
71}; 71};
72 72
73static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
74 { PSMOUSE_CMD_ENABLE, 0x00 }, /* 0 */
75 { PSMOUSE_CMD_SETRATE, 0x0a }, /* 1 */
76 { PSMOUSE_CMD_SETRATE, 0x14 }, /* 2 */
77 { PSMOUSE_CMD_SETRATE, 0x28 }, /* 3 */
78 { PSMOUSE_CMD_SETRATE, 0x3c }, /* 4 */
79 { PSMOUSE_CMD_SETRATE, 0x50 }, /* 5 */
80 { PSMOUSE_CMD_SETRATE, 0x64 }, /* 6 */
81 { PSMOUSE_CMD_SETRATE, 0xc8 }, /* 7 */
82 { PSMOUSE_CMD_GETID, 0x00 }, /* 8 */
83 { PSMOUSE_CMD_GETINFO, 0x00 }, /* 9 */
84 { PSMOUSE_CMD_SETRES, 0x00 }, /* a */
85 { PSMOUSE_CMD_SETRES, 0x01 }, /* b */
86 { PSMOUSE_CMD_SETRES, 0x02 }, /* c */
87 { PSMOUSE_CMD_SETRES, 0x03 }, /* d */
88 { PSMOUSE_CMD_SETSCALE21, 0x00 }, /* e */
89 { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */
90};
91
73 92
74#define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */ 93#define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */
75#define ALPS_PASS 0x04 /* device has a pass-through port */ 94#define ALPS_PASS 0x04 /* device has a pass-through port */
@@ -103,6 +122,7 @@ static const struct alps_model_info alps_model_data[] = {
103 /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ 122 /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
104 { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, 123 { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
105 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, 124 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
125 { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT }, /* Dell XT2 */
106 { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ 126 { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
107 { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, 127 { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
108 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ 128 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
@@ -645,6 +665,76 @@ static void alps_process_packet_v3(struct psmouse *psmouse)
645 alps_process_touchpad_packet_v3(psmouse); 665 alps_process_touchpad_packet_v3(psmouse);
646} 666}
647 667
668static void alps_process_packet_v6(struct psmouse *psmouse)
669{
670 struct alps_data *priv = psmouse->private;
671 unsigned char *packet = psmouse->packet;
672 struct input_dev *dev = psmouse->dev;
673 struct input_dev *dev2 = priv->dev2;
674 int x, y, z, left, right, middle;
675
676 /*
677 * We can use Byte5 to distinguish if the packet is from Touchpad
678 * or Trackpoint.
679 * Touchpad: 0 - 0x7E
680 * Trackpoint: 0x7F
681 */
682 if (packet[5] == 0x7F) {
683 /* It should be a DualPoint when received Trackpoint packet */
684 if (!(priv->flags & ALPS_DUALPOINT))
685 return;
686
687 /* Trackpoint packet */
688 x = packet[1] | ((packet[3] & 0x20) << 2);
689 y = packet[2] | ((packet[3] & 0x40) << 1);
690 z = packet[4];
691 left = packet[3] & 0x01;
692 right = packet[3] & 0x02;
693 middle = packet[3] & 0x04;
694
695 /* To prevent the cursor jump when finger lifted */
696 if (x == 0x7F && y == 0x7F && z == 0x7F)
697 x = y = z = 0;
698
699 /* Divide 4 since trackpoint's speed is too fast */
700 input_report_rel(dev2, REL_X, (char)x / 4);
701 input_report_rel(dev2, REL_Y, -((char)y / 4));
702
703 input_report_key(dev2, BTN_LEFT, left);
704 input_report_key(dev2, BTN_RIGHT, right);
705 input_report_key(dev2, BTN_MIDDLE, middle);
706
707 input_sync(dev2);
708 return;
709 }
710
711 /* Touchpad packet */
712 x = packet[1] | ((packet[3] & 0x78) << 4);
713 y = packet[2] | ((packet[4] & 0x78) << 4);
714 z = packet[5];
715 left = packet[3] & 0x01;
716 right = packet[3] & 0x02;
717
718 if (z > 30)
719 input_report_key(dev, BTN_TOUCH, 1);
720 if (z < 25)
721 input_report_key(dev, BTN_TOUCH, 0);
722
723 if (z > 0) {
724 input_report_abs(dev, ABS_X, x);
725 input_report_abs(dev, ABS_Y, y);
726 }
727
728 input_report_abs(dev, ABS_PRESSURE, z);
729 input_report_key(dev, BTN_TOOL_FINGER, z > 0);
730
731 /* v6 touchpad does not have middle button */
732 input_report_key(dev, BTN_LEFT, left);
733 input_report_key(dev, BTN_RIGHT, right);
734
735 input_sync(dev);
736}
737
648static void alps_process_packet_v4(struct psmouse *psmouse) 738static void alps_process_packet_v4(struct psmouse *psmouse)
649{ 739{
650 struct alps_data *priv = psmouse->private; 740 struct alps_data *priv = psmouse->private;
@@ -897,7 +987,7 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
897 } 987 }
898 988
899 /* Bytes 2 - pktsize should have 0 in the highest bit */ 989 /* Bytes 2 - pktsize should have 0 in the highest bit */
900 if (priv->proto_version != ALPS_PROTO_V5 && 990 if ((priv->proto_version < ALPS_PROTO_V5) &&
901 psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize && 991 psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize &&
902 (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) { 992 (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
903 psmouse_dbg(psmouse, "refusing packet[%i] = %x\n", 993 psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
@@ -1085,6 +1175,80 @@ static int alps_absolute_mode_v1_v2(struct psmouse *psmouse)
1085 return ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETPOLL); 1175 return ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETPOLL);
1086} 1176}
1087 1177
1178static int alps_monitor_mode_send_word(struct psmouse *psmouse, u16 word)
1179{
1180 int i, nibble;
1181
1182 /*
1183 * b0-b11 are valid bits, send sequence is inverse.
1184 * e.g. when word = 0x0123, nibble send sequence is 3, 2, 1
1185 */
1186 for (i = 0; i <= 8; i += 4) {
1187 nibble = (word >> i) & 0xf;
1188 if (alps_command_mode_send_nibble(psmouse, nibble))
1189 return -1;
1190 }
1191
1192 return 0;
1193}
1194
1195static int alps_monitor_mode_write_reg(struct psmouse *psmouse,
1196 u16 addr, u16 value)
1197{
1198 struct ps2dev *ps2dev = &psmouse->ps2dev;
1199
1200 /* 0x0A0 is the command to write the word */
1201 if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE) ||
1202 alps_monitor_mode_send_word(psmouse, 0x0A0) ||
1203 alps_monitor_mode_send_word(psmouse, addr) ||
1204 alps_monitor_mode_send_word(psmouse, value) ||
1205 ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE))
1206 return -1;
1207
1208 return 0;
1209}
1210
1211static int alps_monitor_mode(struct psmouse *psmouse, bool enable)
1212{
1213 struct ps2dev *ps2dev = &psmouse->ps2dev;
1214
1215 if (enable) {
1216 /* EC E9 F5 F5 E7 E6 E7 E9 to enter monitor mode */
1217 if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
1218 ps2_command(ps2dev, NULL, PSMOUSE_CMD_GETINFO) ||
1219 ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) ||
1220 ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) ||
1221 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
1222 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
1223 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
1224 ps2_command(ps2dev, NULL, PSMOUSE_CMD_GETINFO))
1225 return -1;
1226 } else {
1227 /* EC to exit monitor mode */
1228 if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP))
1229 return -1;
1230 }
1231
1232 return 0;
1233}
1234
1235static int alps_absolute_mode_v6(struct psmouse *psmouse)
1236{
1237 u16 reg_val = 0x181;
1238 int ret = -1;
1239
1240 /* enter monitor mode, to write the register */
1241 if (alps_monitor_mode(psmouse, true))
1242 return -1;
1243
1244 ret = alps_monitor_mode_write_reg(psmouse, 0x000, reg_val);
1245
1246 if (alps_monitor_mode(psmouse, false))
1247 ret = -1;
1248
1249 return ret;
1250}
1251
1088static int alps_get_status(struct psmouse *psmouse, char *param) 1252static int alps_get_status(struct psmouse *psmouse, char *param)
1089{ 1253{
1090 /* Get status: 0xF5 0xF5 0xF5 0xE9 */ 1254 /* Get status: 0xF5 0xF5 0xF5 0xE9 */
@@ -1189,6 +1353,32 @@ static int alps_hw_init_v1_v2(struct psmouse *psmouse)
1189 return 0; 1353 return 0;
1190} 1354}
1191 1355
1356static int alps_hw_init_v6(struct psmouse *psmouse)
1357{
1358 unsigned char param[2] = {0xC8, 0x14};
1359
1360 /* Enter passthrough mode to let trackpoint enter 6byte raw mode */
1361 if (alps_passthrough_mode_v2(psmouse, true))
1362 return -1;
1363
1364 if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
1365 ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
1366 ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
1367 ps2_command(&psmouse->ps2dev, &param[0], PSMOUSE_CMD_SETRATE) ||
1368 ps2_command(&psmouse->ps2dev, &param[1], PSMOUSE_CMD_SETRATE))
1369 return -1;
1370
1371 if (alps_passthrough_mode_v2(psmouse, false))
1372 return -1;
1373
1374 if (alps_absolute_mode_v6(psmouse)) {
1375 psmouse_err(psmouse, "Failed to enable absolute mode\n");
1376 return -1;
1377 }
1378
1379 return 0;
1380}
1381
1192/* 1382/*
1193 * Enable or disable passthrough mode to the trackstick. 1383 * Enable or disable passthrough mode to the trackstick.
1194 */ 1384 */
@@ -1553,6 +1743,8 @@ static void alps_set_defaults(struct alps_data *priv)
1553 priv->hw_init = alps_hw_init_v1_v2; 1743 priv->hw_init = alps_hw_init_v1_v2;
1554 priv->process_packet = alps_process_packet_v1_v2; 1744 priv->process_packet = alps_process_packet_v1_v2;
1555 priv->set_abs_params = alps_set_abs_params_st; 1745 priv->set_abs_params = alps_set_abs_params_st;
1746 priv->x_max = 1023;
1747 priv->y_max = 767;
1556 break; 1748 break;
1557 case ALPS_PROTO_V3: 1749 case ALPS_PROTO_V3:
1558 priv->hw_init = alps_hw_init_v3; 1750 priv->hw_init = alps_hw_init_v3;
@@ -1584,6 +1776,14 @@ static void alps_set_defaults(struct alps_data *priv)
1584 priv->x_bits = 23; 1776 priv->x_bits = 23;
1585 priv->y_bits = 12; 1777 priv->y_bits = 12;
1586 break; 1778 break;
1779 case ALPS_PROTO_V6:
1780 priv->hw_init = alps_hw_init_v6;
1781 priv->process_packet = alps_process_packet_v6;
1782 priv->set_abs_params = alps_set_abs_params_st;
1783 priv->nibble_commands = alps_v6_nibble_commands;
1784 priv->x_max = 2047;
1785 priv->y_max = 1535;
1786 break;
1587 } 1787 }
1588} 1788}
1589 1789
@@ -1705,8 +1905,8 @@ static void alps_disconnect(struct psmouse *psmouse)
1705static void alps_set_abs_params_st(struct alps_data *priv, 1905static void alps_set_abs_params_st(struct alps_data *priv,
1706 struct input_dev *dev1) 1906 struct input_dev *dev1)
1707{ 1907{
1708 input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0); 1908 input_set_abs_params(dev1, ABS_X, 0, priv->x_max, 0, 0);
1709 input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0); 1909 input_set_abs_params(dev1, ABS_Y, 0, priv->y_max, 0, 0);
1710} 1910}
1711 1911
1712static void alps_set_abs_params_mt(struct alps_data *priv, 1912static void alps_set_abs_params_mt(struct alps_data *priv,
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index eee59853b9ce..704f0f924307 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -17,6 +17,7 @@
17#define ALPS_PROTO_V3 3 17#define ALPS_PROTO_V3 3
18#define ALPS_PROTO_V4 4 18#define ALPS_PROTO_V4 4
19#define ALPS_PROTO_V5 5 19#define ALPS_PROTO_V5 5
20#define ALPS_PROTO_V6 6
20 21
21/** 22/**
22 * struct alps_model_info - touchpad ID table 23 * struct alps_model_info - touchpad ID table
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 8551dcaf24db..597e9b8fc18d 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1313,6 +1313,7 @@ static int elantech_set_properties(struct elantech_data *etd)
1313 break; 1313 break;
1314 case 6: 1314 case 6:
1315 case 7: 1315 case 7:
1316 case 8:
1316 etd->hw_version = 4; 1317 etd->hw_version = 4;
1317 break; 1318 break;
1318 default: 1319 default:
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 98707fb2cb5d..8f4c4ab04bc2 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -455,16 +455,26 @@ static DEVICE_ATTR_RO(type);
455static DEVICE_ATTR_RO(proto); 455static DEVICE_ATTR_RO(proto);
456static DEVICE_ATTR_RO(id); 456static DEVICE_ATTR_RO(id);
457static DEVICE_ATTR_RO(extra); 457static DEVICE_ATTR_RO(extra);
458static DEVICE_ATTR_RO(modalias);
459static DEVICE_ATTR_WO(drvctl);
460static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
461static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
462 458
463static struct attribute *serio_device_id_attrs[] = { 459static struct attribute *serio_device_id_attrs[] = {
464 &dev_attr_type.attr, 460 &dev_attr_type.attr,
465 &dev_attr_proto.attr, 461 &dev_attr_proto.attr,
466 &dev_attr_id.attr, 462 &dev_attr_id.attr,
467 &dev_attr_extra.attr, 463 &dev_attr_extra.attr,
464 NULL
465};
466
467static struct attribute_group serio_id_attr_group = {
468 .name = "id",
469 .attrs = serio_device_id_attrs,
470};
471
472static DEVICE_ATTR_RO(modalias);
473static DEVICE_ATTR_WO(drvctl);
474static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
475static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
476
477static struct attribute *serio_device_attrs[] = {
468 &dev_attr_modalias.attr, 478 &dev_attr_modalias.attr,
469 &dev_attr_description.attr, 479 &dev_attr_description.attr,
470 &dev_attr_drvctl.attr, 480 &dev_attr_drvctl.attr,
@@ -472,13 +482,13 @@ static struct attribute *serio_device_id_attrs[] = {
472 NULL 482 NULL
473}; 483};
474 484
475static struct attribute_group serio_id_attr_group = { 485static struct attribute_group serio_device_attr_group = {
476 .name = "id", 486 .attrs = serio_device_attrs,
477 .attrs = serio_device_id_attrs,
478}; 487};
479 488
480static const struct attribute_group *serio_device_attr_groups[] = { 489static const struct attribute_group *serio_device_attr_groups[] = {
481 &serio_id_attr_group, 490 &serio_id_attr_group,
491 &serio_device_attr_group,
482 NULL 492 NULL
483}; 493};
484 494
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 00d1e547b211..961d58d32647 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -906,6 +906,17 @@ config TOUCHSCREEN_STMPE
906 To compile this driver as a module, choose M here: the 906 To compile this driver as a module, choose M here: the
907 module will be called stmpe-ts. 907 module will be called stmpe-ts.
908 908
909config TOUCHSCREEN_SUR40
910 tristate "Samsung SUR40 (Surface 2.0/PixelSense) touchscreen"
911 depends on USB
912 select INPUT_POLLDEV
913 help
914 Say Y here if you want support for the Samsung SUR40 touchscreen
915 (also known as Microsoft Surface 2.0 or Microsoft PixelSense).
916
917 To compile this driver as a module, choose M here: the
918 module will be called sur40.
919
909config TOUCHSCREEN_TPS6507X 920config TOUCHSCREEN_TPS6507X
910 tristate "TPS6507x based touchscreens" 921 tristate "TPS6507x based touchscreens"
911 depends on I2C 922 depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 7587883b8d38..62801f213346 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_TOUCHSCREEN_PIXCIR) += pixcir_i2c_ts.o
54obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o 54obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
55obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o 55obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
56obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o 56obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
57obj-$(CONFIG_TOUCHSCREEN_SUR40) += sur40.o
57obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC) += ti_am335x_tsc.o 58obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC) += ti_am335x_tsc.o
58obj-$(CONFIG_TOUCHSCREEN_TNETV107X) += tnetv107x-ts.o 59obj-$(CONFIG_TOUCHSCREEN_TNETV107X) += tnetv107x-ts.o
59obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o 60obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
diff --git a/drivers/input/touchscreen/atmel-wm97xx.c b/drivers/input/touchscreen/atmel-wm97xx.c
index 268a35e55d7f..279c0e42b8a7 100644
--- a/drivers/input/touchscreen/atmel-wm97xx.c
+++ b/drivers/input/touchscreen/atmel-wm97xx.c
@@ -391,7 +391,7 @@ static int __exit atmel_wm97xx_remove(struct platform_device *pdev)
391} 391}
392 392
393#ifdef CONFIG_PM_SLEEP 393#ifdef CONFIG_PM_SLEEP
394static int atmel_wm97xx_suspend(struct *dev) 394static int atmel_wm97xx_suspend(struct device *dev)
395{ 395{
396 struct platform_device *pdev = to_platform_device(dev); 396 struct platform_device *pdev = to_platform_device(dev);
397 struct atmel_wm97xx *atmel_wm97xx = platform_get_drvdata(pdev); 397 struct atmel_wm97xx *atmel_wm97xx = platform_get_drvdata(pdev);
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
index 42d830efa316..a035a390f8e2 100644
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ b/drivers/input/touchscreen/cyttsp4_core.c
@@ -1246,8 +1246,7 @@ static void cyttsp4_watchdog_timer(unsigned long handle)
1246 1246
1247 dev_vdbg(cd->dev, "%s: Watchdog timer triggered\n", __func__); 1247 dev_vdbg(cd->dev, "%s: Watchdog timer triggered\n", __func__);
1248 1248
1249 if (!work_pending(&cd->watchdog_work)) 1249 schedule_work(&cd->watchdog_work);
1250 schedule_work(&cd->watchdog_work);
1251 1250
1252 return; 1251 return;
1253} 1252}
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
new file mode 100644
index 000000000000..f1cb05148b46
--- /dev/null
+++ b/drivers/input/touchscreen/sur40.c
@@ -0,0 +1,466 @@
1/*
2 * Surface2.0/SUR40/PixelSense input driver
3 *
4 * Copyright (c) 2013 by Florian 'floe' Echtler <floe@butterbrot.org>
5 *
6 * Derived from the USB Skeleton driver 1.1,
7 * Copyright (c) 2003 Greg Kroah-Hartman (greg@kroah.com)
8 *
9 * and from the Apple USB BCM5974 multitouch driver,
10 * Copyright (c) 2008 Henrik Rydberg (rydberg@euromail.se)
11 *
12 * and from the generic hid-multitouch driver,
13 * Copyright (c) 2010-2012 Stephane Chatty <chatty@enac.fr>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 */
20
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/delay.h>
24#include <linux/init.h>
25#include <linux/slab.h>
26#include <linux/module.h>
27#include <linux/completion.h>
28#include <linux/uaccess.h>
29#include <linux/usb.h>
30#include <linux/printk.h>
31#include <linux/input-polldev.h>
32#include <linux/input/mt.h>
33#include <linux/usb/input.h>
34
35/* read 512 bytes from endpoint 0x86 -> get header + blobs */
36struct sur40_header {
37
38 __le16 type; /* always 0x0001 */
39 __le16 count; /* count of blobs (if 0: continue prev. packet) */
40
41 __le32 packet_id; /* unique ID for all packets in one frame */
42
43 __le32 timestamp; /* milliseconds (inc. by 16 or 17 each frame) */
44 __le32 unknown; /* "epoch?" always 02/03 00 00 00 */
45
46} __packed;
47
48struct sur40_blob {
49
50 __le16 blob_id;
51
52 u8 action; /* 0x02 = enter/exit, 0x03 = update (?) */
53 u8 unknown; /* always 0x01 or 0x02 (no idea what this is?) */
54
55 __le16 bb_pos_x; /* upper left corner of bounding box */
56 __le16 bb_pos_y;
57
58 __le16 bb_size_x; /* size of bounding box */
59 __le16 bb_size_y;
60
61 __le16 pos_x; /* finger tip position */
62 __le16 pos_y;
63
64 __le16 ctr_x; /* centroid position */
65 __le16 ctr_y;
66
67 __le16 axis_x; /* somehow related to major/minor axis, mostly: */
68 __le16 axis_y; /* axis_x == bb_size_y && axis_y == bb_size_x */
69
70 __le32 angle; /* orientation in radians relative to x axis -
71 actually an IEEE754 float, don't use in kernel */
72
73 __le32 area; /* size in pixels/pressure (?) */
74
75 u8 padding[32];
76
77} __packed;
78
79/* combined header/blob data */
80struct sur40_data {
81 struct sur40_header header;
82 struct sur40_blob blobs[];
83} __packed;
84
85
86/* version information */
87#define DRIVER_SHORT "sur40"
88#define DRIVER_AUTHOR "Florian 'floe' Echtler <floe@butterbrot.org>"
89#define DRIVER_DESC "Surface2.0/SUR40/PixelSense input driver"
90
91/* vendor and device IDs */
92#define ID_MICROSOFT 0x045e
93#define ID_SUR40 0x0775
94
95/* sensor resolution */
96#define SENSOR_RES_X 1920
97#define SENSOR_RES_Y 1080
98
99/* touch data endpoint */
100#define TOUCH_ENDPOINT 0x86
101
102/* polling interval (ms) */
103#define POLL_INTERVAL 10
104
105/* maximum number of contacts FIXME: this is a guess? */
106#define MAX_CONTACTS 64
107
108/* control commands */
109#define SUR40_GET_VERSION 0xb0 /* 12 bytes string */
110#define SUR40_UNKNOWN1 0xb3 /* 5 bytes */
111#define SUR40_UNKNOWN2 0xc1 /* 24 bytes */
112
113#define SUR40_GET_STATE 0xc5 /* 4 bytes state (?) */
114#define SUR40_GET_SENSORS 0xb1 /* 8 bytes sensors */
115
116/*
117 * Note: an earlier, non-public version of this driver used USB_RECIP_ENDPOINT
118 * here by mistake which is very likely to have corrupted the firmware EEPROM
119 * on two separate SUR40 devices. Thanks to Alan Stern who spotted this bug.
120 * Should you ever run into a similar problem, the background story to this
121 * incident and instructions on how to fix the corrupted EEPROM are available
122 * at https://floe.butterbrot.org/matrix/hacking/surface/brick.html
123*/
124
125struct sur40_state {
126
127 struct usb_device *usbdev;
128 struct device *dev;
129 struct input_polled_dev *input;
130
131 struct sur40_data *bulk_in_buffer;
132 size_t bulk_in_size;
133 u8 bulk_in_epaddr;
134
135 char phys[64];
136};
137
138static int sur40_command(struct sur40_state *dev,
139 u8 command, u16 index, void *buffer, u16 size)
140{
141 return usb_control_msg(dev->usbdev, usb_rcvctrlpipe(dev->usbdev, 0),
142 command,
143 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
144 0x00, index, buffer, size, 1000);
145}
146
147/* Initialization routine, called from sur40_open */
148static int sur40_init(struct sur40_state *dev)
149{
150 int result;
151 u8 buffer[24];
152
153 /* stupidly replay the original MS driver init sequence */
154 result = sur40_command(dev, SUR40_GET_VERSION, 0x00, buffer, 12);
155 if (result < 0)
156 return result;
157
158 result = sur40_command(dev, SUR40_GET_VERSION, 0x01, buffer, 12);
159 if (result < 0)
160 return result;
161
162 result = sur40_command(dev, SUR40_GET_VERSION, 0x02, buffer, 12);
163 if (result < 0)
164 return result;
165
166 result = sur40_command(dev, SUR40_UNKNOWN2, 0x00, buffer, 24);
167 if (result < 0)
168 return result;
169
170 result = sur40_command(dev, SUR40_UNKNOWN1, 0x00, buffer, 5);
171 if (result < 0)
172 return result;
173
174 result = sur40_command(dev, SUR40_GET_VERSION, 0x03, buffer, 12);
175
176 /*
177 * Discard the result buffer - no known data inside except
178 * some version strings, maybe extract these sometime...
179 */
180
181 return result;
182}
183
184/*
185 * Callback routines from input_polled_dev
186 */
187
188/* Enable the device, polling will now start. */
189static void sur40_open(struct input_polled_dev *polldev)
190{
191 struct sur40_state *sur40 = polldev->private;
192
193 dev_dbg(sur40->dev, "open\n");
194 sur40_init(sur40);
195}
196
197/* Disable device, polling has stopped. */
198static void sur40_close(struct input_polled_dev *polldev)
199{
200 struct sur40_state *sur40 = polldev->private;
201
202 dev_dbg(sur40->dev, "close\n");
203 /*
204 * There is no known way to stop the device, so we simply
205 * stop polling.
206 */
207}
208
209/*
210 * This function is called when a whole contact has been processed,
211 * so that it can assign it to a slot and store the data there.
212 */
213static void sur40_report_blob(struct sur40_blob *blob, struct input_dev *input)
214{
215 int wide, major, minor;
216
217 int bb_size_x = le16_to_cpu(blob->bb_size_x);
218 int bb_size_y = le16_to_cpu(blob->bb_size_y);
219
220 int pos_x = le16_to_cpu(blob->pos_x);
221 int pos_y = le16_to_cpu(blob->pos_y);
222
223 int ctr_x = le16_to_cpu(blob->ctr_x);
224 int ctr_y = le16_to_cpu(blob->ctr_y);
225
226 int slotnum = input_mt_get_slot_by_key(input, blob->blob_id);
227 if (slotnum < 0 || slotnum >= MAX_CONTACTS)
228 return;
229
230 input_mt_slot(input, slotnum);
231 input_mt_report_slot_state(input, MT_TOOL_FINGER, 1);
232 wide = (bb_size_x > bb_size_y);
233 major = max(bb_size_x, bb_size_y);
234 minor = min(bb_size_x, bb_size_y);
235
236 input_report_abs(input, ABS_MT_POSITION_X, pos_x);
237 input_report_abs(input, ABS_MT_POSITION_Y, pos_y);
238 input_report_abs(input, ABS_MT_TOOL_X, ctr_x);
239 input_report_abs(input, ABS_MT_TOOL_Y, ctr_y);
240
241 /* TODO: use a better orientation measure */
242 input_report_abs(input, ABS_MT_ORIENTATION, wide);
243 input_report_abs(input, ABS_MT_TOUCH_MAJOR, major);
244 input_report_abs(input, ABS_MT_TOUCH_MINOR, minor);
245}
246
247/* core function: poll for new input data */
248static void sur40_poll(struct input_polled_dev *polldev)
249{
250
251 struct sur40_state *sur40 = polldev->private;
252 struct input_dev *input = polldev->input;
253 int result, bulk_read, need_blobs, packet_blobs, i;
254 u32 uninitialized_var(packet_id);
255
256 struct sur40_header *header = &sur40->bulk_in_buffer->header;
257 struct sur40_blob *inblob = &sur40->bulk_in_buffer->blobs[0];
258
259 dev_dbg(sur40->dev, "poll\n");
260
261 need_blobs = -1;
262
263 do {
264
265 /* perform a blocking bulk read to get data from the device */
266 result = usb_bulk_msg(sur40->usbdev,
267 usb_rcvbulkpipe(sur40->usbdev, sur40->bulk_in_epaddr),
268 sur40->bulk_in_buffer, sur40->bulk_in_size,
269 &bulk_read, 1000);
270
271 dev_dbg(sur40->dev, "received %d bytes\n", bulk_read);
272
273 if (result < 0) {
274 dev_err(sur40->dev, "error in usb_bulk_read\n");
275 return;
276 }
277
278 result = bulk_read - sizeof(struct sur40_header);
279
280 if (result % sizeof(struct sur40_blob) != 0) {
281 dev_err(sur40->dev, "transfer size mismatch\n");
282 return;
283 }
284
285 /* first packet? */
286 if (need_blobs == -1) {
287 need_blobs = le16_to_cpu(header->count);
288 dev_dbg(sur40->dev, "need %d blobs\n", need_blobs);
289 packet_id = le32_to_cpu(header->packet_id);
290 }
291
292 /*
293 * Sanity check. when video data is also being retrieved, the
294 * packet ID will usually increase in the middle of a series
295 * instead of at the end.
296 */
297 if (packet_id != header->packet_id)
298 dev_warn(sur40->dev, "packet ID mismatch\n");
299
300 packet_blobs = result / sizeof(struct sur40_blob);
301 dev_dbg(sur40->dev, "received %d blobs\n", packet_blobs);
302
303 /* packets always contain at least 4 blobs, even if empty */
304 if (packet_blobs > need_blobs)
305 packet_blobs = need_blobs;
306
307 for (i = 0; i < packet_blobs; i++) {
308 need_blobs--;
309 dev_dbg(sur40->dev, "processing blob\n");
310 sur40_report_blob(&(inblob[i]), input);
311 }
312
313 } while (need_blobs > 0);
314
315 input_mt_sync_frame(input);
316 input_sync(input);
317}
318
319/* Initialize input device parameters. */
320static void sur40_input_setup(struct input_dev *input_dev)
321{
322 __set_bit(EV_KEY, input_dev->evbit);
323 __set_bit(EV_ABS, input_dev->evbit);
324
325 input_set_abs_params(input_dev, ABS_MT_POSITION_X,
326 0, SENSOR_RES_X, 0, 0);
327 input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
328 0, SENSOR_RES_Y, 0, 0);
329
330 input_set_abs_params(input_dev, ABS_MT_TOOL_X,
331 0, SENSOR_RES_X, 0, 0);
332 input_set_abs_params(input_dev, ABS_MT_TOOL_Y,
333 0, SENSOR_RES_Y, 0, 0);
334
335 /* max value unknown, but major/minor axis
336 * can never be larger than screen */
337 input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
338 0, SENSOR_RES_X, 0, 0);
339 input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR,
340 0, SENSOR_RES_Y, 0, 0);
341
342 input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
343
344 input_mt_init_slots(input_dev, MAX_CONTACTS,
345 INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
346}
347
348/* Check candidate USB interface. */
349static int sur40_probe(struct usb_interface *interface,
350 const struct usb_device_id *id)
351{
352 struct usb_device *usbdev = interface_to_usbdev(interface);
353 struct sur40_state *sur40;
354 struct usb_host_interface *iface_desc;
355 struct usb_endpoint_descriptor *endpoint;
356 struct input_polled_dev *poll_dev;
357 int error;
358
359 /* Check if we really have the right interface. */
360 iface_desc = &interface->altsetting[0];
361 if (iface_desc->desc.bInterfaceClass != 0xFF)
362 return -ENODEV;
363
364 /* Use endpoint #4 (0x86). */
365 endpoint = &iface_desc->endpoint[4].desc;
366 if (endpoint->bEndpointAddress != TOUCH_ENDPOINT)
367 return -ENODEV;
368
369 /* Allocate memory for our device state and initialize it. */
370 sur40 = kzalloc(sizeof(struct sur40_state), GFP_KERNEL);
371 if (!sur40)
372 return -ENOMEM;
373
374 poll_dev = input_allocate_polled_device();
375 if (!poll_dev) {
376 error = -ENOMEM;
377 goto err_free_dev;
378 }
379
380 /* Set up polled input device control structure */
381 poll_dev->private = sur40;
382 poll_dev->poll_interval = POLL_INTERVAL;
383 poll_dev->open = sur40_open;
384 poll_dev->poll = sur40_poll;
385 poll_dev->close = sur40_close;
386
387 /* Set up regular input device structure */
388 sur40_input_setup(poll_dev->input);
389
390 poll_dev->input->name = "Samsung SUR40";
391 usb_to_input_id(usbdev, &poll_dev->input->id);
392 usb_make_path(usbdev, sur40->phys, sizeof(sur40->phys));
393 strlcat(sur40->phys, "/input0", sizeof(sur40->phys));
394 poll_dev->input->phys = sur40->phys;
395 poll_dev->input->dev.parent = &interface->dev;
396
397 sur40->usbdev = usbdev;
398 sur40->dev = &interface->dev;
399 sur40->input = poll_dev;
400
401 /* use the bulk-in endpoint tested above */
402 sur40->bulk_in_size = usb_endpoint_maxp(endpoint);
403 sur40->bulk_in_epaddr = endpoint->bEndpointAddress;
404 sur40->bulk_in_buffer = kmalloc(sur40->bulk_in_size, GFP_KERNEL);
405 if (!sur40->bulk_in_buffer) {
406 dev_err(&interface->dev, "Unable to allocate input buffer.");
407 error = -ENOMEM;
408 goto err_free_polldev;
409 }
410
411 error = input_register_polled_device(poll_dev);
412 if (error) {
413 dev_err(&interface->dev,
414 "Unable to register polled input device.");
415 goto err_free_buffer;
416 }
417
418 /* we can register the device now, as it is ready */
419 usb_set_intfdata(interface, sur40);
420 dev_dbg(&interface->dev, "%s is now attached\n", DRIVER_DESC);
421
422 return 0;
423
424err_free_buffer:
425 kfree(sur40->bulk_in_buffer);
426err_free_polldev:
427 input_free_polled_device(sur40->input);
428err_free_dev:
429 kfree(sur40);
430
431 return error;
432}
433
434/* Unregister device & clean up. */
435static void sur40_disconnect(struct usb_interface *interface)
436{
437 struct sur40_state *sur40 = usb_get_intfdata(interface);
438
439 input_unregister_polled_device(sur40->input);
440 input_free_polled_device(sur40->input);
441 kfree(sur40->bulk_in_buffer);
442 kfree(sur40);
443
444 usb_set_intfdata(interface, NULL);
445 dev_dbg(&interface->dev, "%s is now disconnected\n", DRIVER_DESC);
446}
447
448static const struct usb_device_id sur40_table[] = {
449 { USB_DEVICE(ID_MICROSOFT, ID_SUR40) }, /* Samsung SUR40 */
450 { } /* terminating null entry */
451};
452MODULE_DEVICE_TABLE(usb, sur40_table);
453
454/* USB-specific object needed to register this driver with the USB subsystem. */
455static struct usb_driver sur40_driver = {
456 .name = DRIVER_SHORT,
457 .probe = sur40_probe,
458 .disconnect = sur40_disconnect,
459 .id_table = sur40_table,
460};
461
462module_usb_driver(sur40_driver);
463
464MODULE_AUTHOR(DRIVER_AUTHOR);
465MODULE_DESCRIPTION(DRIVER_DESC);
466MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index ae4b6b903629..5f87bed05467 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -106,6 +106,7 @@ struct usbtouch_device_info {
106struct usbtouch_usb { 106struct usbtouch_usb {
107 unsigned char *data; 107 unsigned char *data;
108 dma_addr_t data_dma; 108 dma_addr_t data_dma;
109 int data_size;
109 unsigned char *buffer; 110 unsigned char *buffer;
110 int buf_len; 111 int buf_len;
111 struct urb *irq; 112 struct urb *irq;
@@ -1521,7 +1522,7 @@ static int usbtouch_reset_resume(struct usb_interface *intf)
1521static void usbtouch_free_buffers(struct usb_device *udev, 1522static void usbtouch_free_buffers(struct usb_device *udev,
1522 struct usbtouch_usb *usbtouch) 1523 struct usbtouch_usb *usbtouch)
1523{ 1524{
1524 usb_free_coherent(udev, usbtouch->type->rept_size, 1525 usb_free_coherent(udev, usbtouch->data_size,
1525 usbtouch->data, usbtouch->data_dma); 1526 usbtouch->data, usbtouch->data_dma);
1526 kfree(usbtouch->buffer); 1527 kfree(usbtouch->buffer);
1527} 1528}
@@ -1566,7 +1567,20 @@ static int usbtouch_probe(struct usb_interface *intf,
1566 if (!type->process_pkt) 1567 if (!type->process_pkt)
1567 type->process_pkt = usbtouch_process_pkt; 1568 type->process_pkt = usbtouch_process_pkt;
1568 1569
1569 usbtouch->data = usb_alloc_coherent(udev, type->rept_size, 1570 usbtouch->data_size = type->rept_size;
1571 if (type->get_pkt_len) {
1572 /*
1573 * When dealing with variable-length packets we should
1574 * not request more than wMaxPacketSize bytes at once
1575 * as we do not know if there is more data coming or
1576 * we filled exactly wMaxPacketSize bytes and there is
1577 * nothing else.
1578 */
1579 usbtouch->data_size = min(usbtouch->data_size,
1580 usb_endpoint_maxp(endpoint));
1581 }
1582
1583 usbtouch->data = usb_alloc_coherent(udev, usbtouch->data_size,
1570 GFP_KERNEL, &usbtouch->data_dma); 1584 GFP_KERNEL, &usbtouch->data_dma);
1571 if (!usbtouch->data) 1585 if (!usbtouch->data)
1572 goto out_free; 1586 goto out_free;
@@ -1626,12 +1640,12 @@ static int usbtouch_probe(struct usb_interface *intf,
1626 if (usb_endpoint_type(endpoint) == USB_ENDPOINT_XFER_INT) 1640 if (usb_endpoint_type(endpoint) == USB_ENDPOINT_XFER_INT)
1627 usb_fill_int_urb(usbtouch->irq, udev, 1641 usb_fill_int_urb(usbtouch->irq, udev,
1628 usb_rcvintpipe(udev, endpoint->bEndpointAddress), 1642 usb_rcvintpipe(udev, endpoint->bEndpointAddress),
1629 usbtouch->data, type->rept_size, 1643 usbtouch->data, usbtouch->data_size,
1630 usbtouch_irq, usbtouch, endpoint->bInterval); 1644 usbtouch_irq, usbtouch, endpoint->bInterval);
1631 else 1645 else
1632 usb_fill_bulk_urb(usbtouch->irq, udev, 1646 usb_fill_bulk_urb(usbtouch->irq, udev,
1633 usb_rcvbulkpipe(udev, endpoint->bEndpointAddress), 1647 usb_rcvbulkpipe(udev, endpoint->bEndpointAddress),
1634 usbtouch->data, type->rept_size, 1648 usbtouch->data, usbtouch->data_size,
1635 usbtouch_irq, usbtouch); 1649 usbtouch_irq, usbtouch);
1636 1650
1637 usbtouch->irq->dev = udev; 1651 usbtouch->irq->dev = udev;
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 75762d6ff3ba..aa127ba392a4 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -455,7 +455,18 @@ static void zforce_complete(struct zforce_ts *ts, int cmd, int result)
455 } 455 }
456} 456}
457 457
458static irqreturn_t zforce_interrupt(int irq, void *dev_id) 458static irqreturn_t zforce_irq(int irq, void *dev_id)
459{
460 struct zforce_ts *ts = dev_id;
461 struct i2c_client *client = ts->client;
462
463 if (ts->suspended && device_may_wakeup(&client->dev))
464 pm_wakeup_event(&client->dev, 500);
465
466 return IRQ_WAKE_THREAD;
467}
468
469static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
459{ 470{
460 struct zforce_ts *ts = dev_id; 471 struct zforce_ts *ts = dev_id;
461 struct i2c_client *client = ts->client; 472 struct i2c_client *client = ts->client;
@@ -465,12 +476,10 @@ static irqreturn_t zforce_interrupt(int irq, void *dev_id)
465 u8 *payload; 476 u8 *payload;
466 477
467 /* 478 /*
468 * When suspended, emit a wakeup signal if necessary and return. 479 * When still suspended, return.
469 * Due to the level-interrupt we will get re-triggered later. 480 * Due to the level-interrupt we will get re-triggered later.
470 */ 481 */
471 if (ts->suspended) { 482 if (ts->suspended) {
472 if (device_may_wakeup(&client->dev))
473 pm_wakeup_event(&client->dev, 500);
474 msleep(20); 483 msleep(20);
475 return IRQ_HANDLED; 484 return IRQ_HANDLED;
476 } 485 }
@@ -763,8 +772,8 @@ static int zforce_probe(struct i2c_client *client,
763 * Therefore we can trigger the interrupt anytime it is low and do 772 * Therefore we can trigger the interrupt anytime it is low and do
764 * not need to limit it to the interrupt edge. 773 * not need to limit it to the interrupt edge.
765 */ 774 */
766 ret = devm_request_threaded_irq(&client->dev, client->irq, NULL, 775 ret = devm_request_threaded_irq(&client->dev, client->irq,
767 zforce_interrupt, 776 zforce_irq, zforce_irq_thread,
768 IRQF_TRIGGER_LOW | IRQF_ONESHOT, 777 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
769 input_dev->name, ts); 778 input_dev->name, ts);
770 if (ret) { 779 if (ret) {
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 1abfb5684ab7..e46a88700b68 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -392,7 +392,7 @@ struct arm_smmu_domain {
392 struct arm_smmu_cfg root_cfg; 392 struct arm_smmu_cfg root_cfg;
393 phys_addr_t output_mask; 393 phys_addr_t output_mask;
394 394
395 spinlock_t lock; 395 struct mutex lock;
396}; 396};
397 397
398static DEFINE_SPINLOCK(arm_smmu_devices_lock); 398static DEFINE_SPINLOCK(arm_smmu_devices_lock);
@@ -900,7 +900,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
900 goto out_free_domain; 900 goto out_free_domain;
901 smmu_domain->root_cfg.pgd = pgd; 901 smmu_domain->root_cfg.pgd = pgd;
902 902
903 spin_lock_init(&smmu_domain->lock); 903 mutex_init(&smmu_domain->lock);
904 domain->priv = smmu_domain; 904 domain->priv = smmu_domain;
905 return 0; 905 return 0;
906 906
@@ -1137,7 +1137,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1137 * Sanity check the domain. We don't currently support domains 1137 * Sanity check the domain. We don't currently support domains
1138 * that cross between different SMMU chains. 1138 * that cross between different SMMU chains.
1139 */ 1139 */
1140 spin_lock(&smmu_domain->lock); 1140 mutex_lock(&smmu_domain->lock);
1141 if (!smmu_domain->leaf_smmu) { 1141 if (!smmu_domain->leaf_smmu) {
1142 /* Now that we have a master, we can finalise the domain */ 1142 /* Now that we have a master, we can finalise the domain */
1143 ret = arm_smmu_init_domain_context(domain, dev); 1143 ret = arm_smmu_init_domain_context(domain, dev);
@@ -1152,7 +1152,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1152 dev_name(device_smmu->dev)); 1152 dev_name(device_smmu->dev));
1153 goto err_unlock; 1153 goto err_unlock;
1154 } 1154 }
1155 spin_unlock(&smmu_domain->lock); 1155 mutex_unlock(&smmu_domain->lock);
1156 1156
1157 /* Looks ok, so add the device to the domain */ 1157 /* Looks ok, so add the device to the domain */
1158 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); 1158 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
@@ -1162,7 +1162,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1162 return arm_smmu_domain_add_master(smmu_domain, master); 1162 return arm_smmu_domain_add_master(smmu_domain, master);
1163 1163
1164err_unlock: 1164err_unlock:
1165 spin_unlock(&smmu_domain->lock); 1165 mutex_unlock(&smmu_domain->lock);
1166 return ret; 1166 return ret;
1167} 1167}
1168 1168
@@ -1394,7 +1394,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1394 if (paddr & ~output_mask) 1394 if (paddr & ~output_mask)
1395 return -ERANGE; 1395 return -ERANGE;
1396 1396
1397 spin_lock(&smmu_domain->lock); 1397 mutex_lock(&smmu_domain->lock);
1398 pgd += pgd_index(iova); 1398 pgd += pgd_index(iova);
1399 end = iova + size; 1399 end = iova + size;
1400 do { 1400 do {
@@ -1410,7 +1410,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1410 } while (pgd++, iova != end); 1410 } while (pgd++, iova != end);
1411 1411
1412out_unlock: 1412out_unlock:
1413 spin_unlock(&smmu_domain->lock); 1413 mutex_unlock(&smmu_domain->lock);
1414 1414
1415 /* Ensure new page tables are visible to the hardware walker */ 1415 /* Ensure new page tables are visible to the hardware walker */
1416 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) 1416 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
@@ -1423,9 +1423,8 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1423 phys_addr_t paddr, size_t size, int flags) 1423 phys_addr_t paddr, size_t size, int flags)
1424{ 1424{
1425 struct arm_smmu_domain *smmu_domain = domain->priv; 1425 struct arm_smmu_domain *smmu_domain = domain->priv;
1426 struct arm_smmu_device *smmu = smmu_domain->leaf_smmu;
1427 1426
1428 if (!smmu_domain || !smmu) 1427 if (!smmu_domain)
1429 return -ENODEV; 1428 return -ENODEV;
1430 1429
1431 /* Check for silent address truncation up the SMMU chain. */ 1430 /* Check for silent address truncation up the SMMU chain. */
@@ -1449,44 +1448,34 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1449static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, 1448static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1450 dma_addr_t iova) 1449 dma_addr_t iova)
1451{ 1450{
1452 pgd_t *pgd; 1451 pgd_t *pgdp, pgd;
1453 pud_t *pud; 1452 pud_t pud;
1454 pmd_t *pmd; 1453 pmd_t pmd;
1455 pte_t *pte; 1454 pte_t pte;
1456 struct arm_smmu_domain *smmu_domain = domain->priv; 1455 struct arm_smmu_domain *smmu_domain = domain->priv;
1457 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; 1456 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
1458 struct arm_smmu_device *smmu = root_cfg->smmu;
1459 1457
1460 spin_lock(&smmu_domain->lock); 1458 pgdp = root_cfg->pgd;
1461 pgd = root_cfg->pgd; 1459 if (!pgdp)
1462 if (!pgd) 1460 return 0;
1463 goto err_unlock;
1464 1461
1465 pgd += pgd_index(iova); 1462 pgd = *(pgdp + pgd_index(iova));
1466 if (pgd_none_or_clear_bad(pgd)) 1463 if (pgd_none(pgd))
1467 goto err_unlock; 1464 return 0;
1468 1465
1469 pud = pud_offset(pgd, iova); 1466 pud = *pud_offset(&pgd, iova);
1470 if (pud_none_or_clear_bad(pud)) 1467 if (pud_none(pud))
1471 goto err_unlock; 1468 return 0;
1472 1469
1473 pmd = pmd_offset(pud, iova); 1470 pmd = *pmd_offset(&pud, iova);
1474 if (pmd_none_or_clear_bad(pmd)) 1471 if (pmd_none(pmd))
1475 goto err_unlock; 1472 return 0;
1476 1473
1477 pte = pmd_page_vaddr(*pmd) + pte_index(iova); 1474 pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
1478 if (pte_none(pte)) 1475 if (pte_none(pte))
1479 goto err_unlock; 1476 return 0;
1480
1481 spin_unlock(&smmu_domain->lock);
1482 return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK);
1483 1477
1484err_unlock: 1478 return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
1485 spin_unlock(&smmu_domain->lock);
1486 dev_warn(smmu->dev,
1487 "invalid (corrupt?) page tables detected for iova 0x%llx\n",
1488 (unsigned long long)iova);
1489 return -EINVAL;
1490} 1479}
1491 1480
1492static int arm_smmu_domain_has_cap(struct iommu_domain *domain, 1481static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
@@ -1863,6 +1852,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1863 dev_err(dev, 1852 dev_err(dev,
1864 "found only %d context interrupt(s) but %d required\n", 1853 "found only %d context interrupt(s) but %d required\n",
1865 smmu->num_context_irqs, smmu->num_context_banks); 1854 smmu->num_context_irqs, smmu->num_context_banks);
1855 err = -ENODEV;
1866 goto out_put_parent; 1856 goto out_put_parent;
1867 } 1857 }
1868 1858
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 9031171c141b..341c6016812d 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -957,12 +957,13 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
957 if (WARN_ON(!gic->domain)) 957 if (WARN_ON(!gic->domain))
958 return; 958 return;
959 959
960 if (gic_nr == 0) {
960#ifdef CONFIG_SMP 961#ifdef CONFIG_SMP
961 set_smp_cross_call(gic_raise_softirq); 962 set_smp_cross_call(gic_raise_softirq);
962 register_cpu_notifier(&gic_cpu_notifier); 963 register_cpu_notifier(&gic_cpu_notifier);
963#endif 964#endif
964 965 set_handle_irq(gic_handle_irq);
965 set_handle_irq(gic_handle_irq); 966 }
966 967
967 gic_chip.flags |= gic_arch_extn.flags; 968 gic_chip.flags |= gic_arch_extn.flags;
968 gic_dist_init(gic); 969 gic_dist_init(gic);
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 82cec63a9011..3ee78f02e5d7 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -149,8 +149,9 @@ static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p,
149static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p, 149static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
150 int irq, int do_mask) 150 int irq, int do_mask)
151{ 151{
152 int bitfield_width = 4; /* PRIO assumed to have fixed bitfield width */ 152 /* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */
153 int shift = (7 - irq) * bitfield_width; /* PRIO assumed to be 32-bit */ 153 int bitfield_width = 4;
154 int shift = 32 - (irq + 1) * bitfield_width;
154 155
155 intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO, 156 intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO,
156 shift, bitfield_width, 157 shift, bitfield_width,
@@ -159,8 +160,9 @@ static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
159 160
160static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value) 161static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value)
161{ 162{
163 /* The SENSE register is assumed to be 32-bit. */
162 int bitfield_width = p->config.sense_bitfield_width; 164 int bitfield_width = p->config.sense_bitfield_width;
163 int shift = (7 - irq) * bitfield_width; /* SENSE assumed to be 32-bit */ 165 int shift = 32 - (irq + 1) * bitfield_width;
164 166
165 dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value); 167 dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value);
166 168
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 497bd026c237..4a4825528188 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1643,10 +1643,6 @@ setup_hfcpci(struct IsdnCard *card)
1643 int i; 1643 int i;
1644 struct pci_dev *tmp_hfcpci = NULL; 1644 struct pci_dev *tmp_hfcpci = NULL;
1645 1645
1646#ifdef __BIG_ENDIAN
1647#error "not running on big endian machines now"
1648#endif
1649
1650 strcpy(tmp, hfcpci_revision); 1646 strcpy(tmp, hfcpci_revision);
1651 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp)); 1647 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
1652 1648
diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c
index f6ab63aa6995..33eeb4602c7e 100644
--- a/drivers/isdn/hisax/telespci.c
+++ b/drivers/isdn/hisax/telespci.c
@@ -290,10 +290,6 @@ int setup_telespci(struct IsdnCard *card)
290 struct IsdnCardState *cs = card->cs; 290 struct IsdnCardState *cs = card->cs;
291 char tmp[64]; 291 char tmp[64];
292 292
293#ifdef __BIG_ENDIAN
294#error "not running on big endian machines now"
295#endif
296
297 strcpy(tmp, telespci_revision); 293 strcpy(tmp, telespci_revision);
298 printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp)); 294 printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp));
299 if (cs->typ != ISDN_CTYPE_TELESPCI) 295 if (cs->typ != ISDN_CTYPE_TELESPCI)
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 05188351711d..a97263e902ff 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -244,18 +244,12 @@ static int lp5521_update_program_memory(struct lp55xx_chip *chip,
244 if (i % 2) 244 if (i % 2)
245 goto err; 245 goto err;
246 246
247 mutex_lock(&chip->lock);
248
249 for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) { 247 for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) {
250 ret = lp55xx_write(chip, addr[idx] + i, pattern[i]); 248 ret = lp55xx_write(chip, addr[idx] + i, pattern[i]);
251 if (ret) { 249 if (ret)
252 mutex_unlock(&chip->lock);
253 return -EINVAL; 250 return -EINVAL;
254 }
255 } 251 }
256 252
257 mutex_unlock(&chip->lock);
258
259 return size; 253 return size;
260 254
261err: 255err:
@@ -427,15 +421,17 @@ static ssize_t store_engine_load(struct device *dev,
427{ 421{
428 struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); 422 struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
429 struct lp55xx_chip *chip = led->chip; 423 struct lp55xx_chip *chip = led->chip;
424 int ret;
430 425
431 mutex_lock(&chip->lock); 426 mutex_lock(&chip->lock);
432 427
433 chip->engine_idx = nr; 428 chip->engine_idx = nr;
434 lp5521_load_engine(chip); 429 lp5521_load_engine(chip);
430 ret = lp5521_update_program_memory(chip, buf, len);
435 431
436 mutex_unlock(&chip->lock); 432 mutex_unlock(&chip->lock);
437 433
438 return lp5521_update_program_memory(chip, buf, len); 434 return ret;
439} 435}
440store_load(1) 436store_load(1)
441store_load(2) 437store_load(2)
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 6b553d9f4266..fd9ab5f61441 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -337,18 +337,12 @@ static int lp5523_update_program_memory(struct lp55xx_chip *chip,
337 if (i % 2) 337 if (i % 2)
338 goto err; 338 goto err;
339 339
340 mutex_lock(&chip->lock);
341
342 for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) { 340 for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) {
343 ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]); 341 ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]);
344 if (ret) { 342 if (ret)
345 mutex_unlock(&chip->lock);
346 return -EINVAL; 343 return -EINVAL;
347 }
348 } 344 }
349 345
350 mutex_unlock(&chip->lock);
351
352 return size; 346 return size;
353 347
354err: 348err:
@@ -548,15 +542,17 @@ static ssize_t store_engine_load(struct device *dev,
548{ 542{
549 struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); 543 struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
550 struct lp55xx_chip *chip = led->chip; 544 struct lp55xx_chip *chip = led->chip;
545 int ret;
551 546
552 mutex_lock(&chip->lock); 547 mutex_lock(&chip->lock);
553 548
554 chip->engine_idx = nr; 549 chip->engine_idx = nr;
555 lp5523_load_engine_and_select_page(chip); 550 lp5523_load_engine_and_select_page(chip);
551 ret = lp5523_update_program_memory(chip, buf, len);
556 552
557 mutex_unlock(&chip->lock); 553 mutex_unlock(&chip->lock);
558 554
559 return lp5523_update_program_memory(chip, buf, len); 555 return ret;
560} 556}
561store_load(1) 557store_load(1)
562store_load(2) 558store_load(2)
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 2848171b8576..b31d8e99c419 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -82,22 +82,12 @@ static inline size_t sizeof_pwm_leds_priv(int num_leds)
82 (sizeof(struct led_pwm_data) * num_leds); 82 (sizeof(struct led_pwm_data) * num_leds);
83} 83}
84 84
85static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev) 85static int led_pwm_create_of(struct platform_device *pdev,
86 struct led_pwm_priv *priv)
86{ 87{
87 struct device_node *node = pdev->dev.of_node; 88 struct device_node *node = pdev->dev.of_node;
88 struct device_node *child; 89 struct device_node *child;
89 struct led_pwm_priv *priv; 90 int ret;
90 int count, ret;
91
92 /* count LEDs in this device, so we know how much to allocate */
93 count = of_get_child_count(node);
94 if (!count)
95 return NULL;
96
97 priv = devm_kzalloc(&pdev->dev, sizeof_pwm_leds_priv(count),
98 GFP_KERNEL);
99 if (!priv)
100 return NULL;
101 91
102 for_each_child_of_node(node, child) { 92 for_each_child_of_node(node, child) {
103 struct led_pwm_data *led_dat = &priv->leds[priv->num_leds]; 93 struct led_pwm_data *led_dat = &priv->leds[priv->num_leds];
@@ -109,6 +99,7 @@ static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev)
109 if (IS_ERR(led_dat->pwm)) { 99 if (IS_ERR(led_dat->pwm)) {
110 dev_err(&pdev->dev, "unable to request PWM for %s\n", 100 dev_err(&pdev->dev, "unable to request PWM for %s\n",
111 led_dat->cdev.name); 101 led_dat->cdev.name);
102 ret = PTR_ERR(led_dat->pwm);
112 goto err; 103 goto err;
113 } 104 }
114 /* Get the period from PWM core when n*/ 105 /* Get the period from PWM core when n*/
@@ -137,28 +128,36 @@ static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev)
137 priv->num_leds++; 128 priv->num_leds++;
138 } 129 }
139 130
140 return priv; 131 return 0;
141err: 132err:
142 while (priv->num_leds--) 133 while (priv->num_leds--)
143 led_classdev_unregister(&priv->leds[priv->num_leds].cdev); 134 led_classdev_unregister(&priv->leds[priv->num_leds].cdev);
144 135
145 return NULL; 136 return ret;
146} 137}
147 138
148static int led_pwm_probe(struct platform_device *pdev) 139static int led_pwm_probe(struct platform_device *pdev)
149{ 140{
150 struct led_pwm_platform_data *pdata = dev_get_platdata(&pdev->dev); 141 struct led_pwm_platform_data *pdata = dev_get_platdata(&pdev->dev);
151 struct led_pwm_priv *priv; 142 struct led_pwm_priv *priv;
152 int i, ret = 0; 143 int count, i;
144 int ret = 0;
145
146 if (pdata)
147 count = pdata->num_leds;
148 else
149 count = of_get_child_count(pdev->dev.of_node);
150
151 if (!count)
152 return -EINVAL;
153 153
154 if (pdata && pdata->num_leds) { 154 priv = devm_kzalloc(&pdev->dev, sizeof_pwm_leds_priv(count),
155 priv = devm_kzalloc(&pdev->dev, 155 GFP_KERNEL);
156 sizeof_pwm_leds_priv(pdata->num_leds), 156 if (!priv)
157 GFP_KERNEL); 157 return -ENOMEM;
158 if (!priv)
159 return -ENOMEM;
160 158
161 for (i = 0; i < pdata->num_leds; i++) { 159 if (pdata) {
160 for (i = 0; i < count; i++) {
162 struct led_pwm *cur_led = &pdata->leds[i]; 161 struct led_pwm *cur_led = &pdata->leds[i];
163 struct led_pwm_data *led_dat = &priv->leds[i]; 162 struct led_pwm_data *led_dat = &priv->leds[i];
164 163
@@ -188,11 +187,11 @@ static int led_pwm_probe(struct platform_device *pdev)
188 if (ret < 0) 187 if (ret < 0)
189 goto err; 188 goto err;
190 } 189 }
191 priv->num_leds = pdata->num_leds; 190 priv->num_leds = count;
192 } else { 191 } else {
193 priv = led_pwm_create_of(pdev); 192 ret = led_pwm_create_of(pdev, priv);
194 if (!priv) 193 if (ret)
195 return -ENODEV; 194 return ret;
196 } 195 }
197 196
198 platform_set_drvdata(pdev, priv); 197 platform_set_drvdata(pdev, priv);
diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
index 6753b65f8ede..d2f0120bc878 100644
--- a/drivers/macintosh/Makefile
+++ b/drivers/macintosh/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_WINDFARM_RM31) += windfarm_fcu_controls.o \
40 windfarm_ad7417_sensor.o \ 40 windfarm_ad7417_sensor.o \
41 windfarm_lm75_sensor.o \ 41 windfarm_lm75_sensor.o \
42 windfarm_lm87_sensor.o \ 42 windfarm_lm87_sensor.o \
43 windfarm_max6690_sensor.o \
43 windfarm_pid.o \ 44 windfarm_pid.o \
44 windfarm_cpufreq_clamp.o \ 45 windfarm_cpufreq_clamp.o \
45 windfarm_rm31.o 46 windfarm_rm31.o
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 2b46bf1d7e40..4c9852d92b0a 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -421,9 +421,11 @@ out:
421 421
422 if (watermark <= WATERMARK_METADATA) { 422 if (watermark <= WATERMARK_METADATA) {
423 SET_GC_MARK(b, GC_MARK_METADATA); 423 SET_GC_MARK(b, GC_MARK_METADATA);
424 SET_GC_MOVE(b, 0);
424 b->prio = BTREE_PRIO; 425 b->prio = BTREE_PRIO;
425 } else { 426 } else {
426 SET_GC_MARK(b, GC_MARK_RECLAIMABLE); 427 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
428 SET_GC_MOVE(b, 0);
427 b->prio = INITIAL_PRIO; 429 b->prio = INITIAL_PRIO;
428 } 430 }
429 431
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 4beb55a0ff30..754f43177483 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -197,7 +197,7 @@ struct bucket {
197 uint8_t disk_gen; 197 uint8_t disk_gen;
198 uint8_t last_gc; /* Most out of date gen in the btree */ 198 uint8_t last_gc; /* Most out of date gen in the btree */
199 uint8_t gc_gen; 199 uint8_t gc_gen;
200 uint16_t gc_mark; 200 uint16_t gc_mark; /* Bitfield used by GC. See below for field */
201}; 201};
202 202
203/* 203/*
@@ -209,7 +209,8 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
209#define GC_MARK_RECLAIMABLE 0 209#define GC_MARK_RECLAIMABLE 0
210#define GC_MARK_DIRTY 1 210#define GC_MARK_DIRTY 1
211#define GC_MARK_METADATA 2 211#define GC_MARK_METADATA 2
212BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14); 212BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13);
213BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
213 214
214#include "journal.h" 215#include "journal.h"
215#include "stats.h" 216#include "stats.h"
@@ -372,14 +373,14 @@ struct cached_dev {
372 unsigned char writeback_percent; 373 unsigned char writeback_percent;
373 unsigned writeback_delay; 374 unsigned writeback_delay;
374 375
375 int writeback_rate_change;
376 int64_t writeback_rate_derivative;
377 uint64_t writeback_rate_target; 376 uint64_t writeback_rate_target;
377 int64_t writeback_rate_proportional;
378 int64_t writeback_rate_derivative;
379 int64_t writeback_rate_change;
378 380
379 unsigned writeback_rate_update_seconds; 381 unsigned writeback_rate_update_seconds;
380 unsigned writeback_rate_d_term; 382 unsigned writeback_rate_d_term;
381 unsigned writeback_rate_p_term_inverse; 383 unsigned writeback_rate_p_term_inverse;
382 unsigned writeback_rate_d_smooth;
383}; 384};
384 385
385enum alloc_watermarks { 386enum alloc_watermarks {
@@ -445,7 +446,6 @@ struct cache {
445 * call prio_write() to keep gens from wrapping. 446 * call prio_write() to keep gens from wrapping.
446 */ 447 */
447 uint8_t need_save_prio; 448 uint8_t need_save_prio;
448 unsigned gc_move_threshold;
449 449
450 /* 450 /*
451 * If nonzero, we know we aren't going to find any buckets to invalidate 451 * If nonzero, we know we aren't going to find any buckets to invalidate
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 5e2765aadce1..31bb53fcc67a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1561,6 +1561,28 @@ size_t bch_btree_gc_finish(struct cache_set *c)
1561 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), 1561 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1562 GC_MARK_METADATA); 1562 GC_MARK_METADATA);
1563 1563
1564 /* don't reclaim buckets to which writeback keys point */
1565 rcu_read_lock();
1566 for (i = 0; i < c->nr_uuids; i++) {
1567 struct bcache_device *d = c->devices[i];
1568 struct cached_dev *dc;
1569 struct keybuf_key *w, *n;
1570 unsigned j;
1571
1572 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1573 continue;
1574 dc = container_of(d, struct cached_dev, disk);
1575
1576 spin_lock(&dc->writeback_keys.lock);
1577 rbtree_postorder_for_each_entry_safe(w, n,
1578 &dc->writeback_keys.keys, node)
1579 for (j = 0; j < KEY_PTRS(&w->key); j++)
1580 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1581 GC_MARK_DIRTY);
1582 spin_unlock(&dc->writeback_keys.lock);
1583 }
1584 rcu_read_unlock();
1585
1564 for_each_cache(ca, c, i) { 1586 for_each_cache(ca, c, i) {
1565 uint64_t *i; 1587 uint64_t *i;
1566 1588
@@ -1817,7 +1839,8 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
1817 if (KEY_START(k) > KEY_START(insert) + sectors_found) 1839 if (KEY_START(k) > KEY_START(insert) + sectors_found)
1818 goto check_failed; 1840 goto check_failed;
1819 1841
1820 if (KEY_PTRS(replace_key) != KEY_PTRS(k)) 1842 if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
1843 KEY_DIRTY(k) != KEY_DIRTY(replace_key))
1821 goto check_failed; 1844 goto check_failed;
1822 1845
1823 /* skip past gen */ 1846 /* skip past gen */
@@ -2217,7 +2240,7 @@ struct btree_insert_op {
2217 struct bkey *replace_key; 2240 struct bkey *replace_key;
2218}; 2241};
2219 2242
2220int btree_insert_fn(struct btree_op *b_op, struct btree *b) 2243static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2221{ 2244{
2222 struct btree_insert_op *op = container_of(b_op, 2245 struct btree_insert_op *op = container_of(b_op,
2223 struct btree_insert_op, op); 2246 struct btree_insert_op, op);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 7c1275e66025..f2f0998c4a91 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -25,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
25 unsigned i; 25 unsigned i;
26 26
27 for (i = 0; i < KEY_PTRS(k); i++) { 27 for (i = 0; i < KEY_PTRS(k); i++) {
28 struct cache *ca = PTR_CACHE(c, k, i);
29 struct bucket *g = PTR_BUCKET(c, k, i); 28 struct bucket *g = PTR_BUCKET(c, k, i);
30 29
31 if (GC_SECTORS_USED(g) < ca->gc_move_threshold) 30 if (GC_MOVE(g))
32 return true; 31 return true;
33 } 32 }
34 33
@@ -65,11 +64,16 @@ static void write_moving_finish(struct closure *cl)
65 64
66static void read_moving_endio(struct bio *bio, int error) 65static void read_moving_endio(struct bio *bio, int error)
67{ 66{
67 struct bbio *b = container_of(bio, struct bbio, bio);
68 struct moving_io *io = container_of(bio->bi_private, 68 struct moving_io *io = container_of(bio->bi_private,
69 struct moving_io, cl); 69 struct moving_io, cl);
70 70
71 if (error) 71 if (error)
72 io->op.error = error; 72 io->op.error = error;
73 else if (!KEY_DIRTY(&b->key) &&
74 ptr_stale(io->op.c, &b->key, 0)) {
75 io->op.error = -EINTR;
76 }
73 77
74 bch_bbio_endio(io->op.c, bio, error, "reading data to move"); 78 bch_bbio_endio(io->op.c, bio, error, "reading data to move");
75} 79}
@@ -141,6 +145,11 @@ static void read_moving(struct cache_set *c)
141 if (!w) 145 if (!w)
142 break; 146 break;
143 147
148 if (ptr_stale(c, &w->key, 0)) {
149 bch_keybuf_del(&c->moving_gc_keys, w);
150 continue;
151 }
152
144 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) 153 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
145 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 154 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
146 GFP_KERNEL); 155 GFP_KERNEL);
@@ -184,7 +193,8 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r)
184 193
185static unsigned bucket_heap_top(struct cache *ca) 194static unsigned bucket_heap_top(struct cache *ca)
186{ 195{
187 return GC_SECTORS_USED(heap_peek(&ca->heap)); 196 struct bucket *b;
197 return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
188} 198}
189 199
190void bch_moving_gc(struct cache_set *c) 200void bch_moving_gc(struct cache_set *c)
@@ -226,9 +236,8 @@ void bch_moving_gc(struct cache_set *c)
226 sectors_to_move -= GC_SECTORS_USED(b); 236 sectors_to_move -= GC_SECTORS_USED(b);
227 } 237 }
228 238
229 ca->gc_move_threshold = bucket_heap_top(ca); 239 while (heap_pop(&ca->heap, b, bucket_cmp))
230 240 SET_GC_MOVE(b, 1);
231 pr_debug("threshold %u", ca->gc_move_threshold);
232 } 241 }
233 242
234 mutex_unlock(&c->bucket_lock); 243 mutex_unlock(&c->bucket_lock);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index dec15cd2d797..c57bfa071a57 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1676,7 +1676,7 @@ err:
1676static bool can_attach_cache(struct cache *ca, struct cache_set *c) 1676static bool can_attach_cache(struct cache *ca, struct cache_set *c)
1677{ 1677{
1678 return ca->sb.block_size == c->sb.block_size && 1678 return ca->sb.block_size == c->sb.block_size &&
1679 ca->sb.bucket_size == c->sb.block_size && 1679 ca->sb.bucket_size == c->sb.bucket_size &&
1680 ca->sb.nr_in_set == c->sb.nr_in_set; 1680 ca->sb.nr_in_set == c->sb.nr_in_set;
1681} 1681}
1682 1682
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 80d4c2bee18a..a1f85612f0b3 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -83,7 +83,6 @@ rw_attribute(writeback_rate);
83rw_attribute(writeback_rate_update_seconds); 83rw_attribute(writeback_rate_update_seconds);
84rw_attribute(writeback_rate_d_term); 84rw_attribute(writeback_rate_d_term);
85rw_attribute(writeback_rate_p_term_inverse); 85rw_attribute(writeback_rate_p_term_inverse);
86rw_attribute(writeback_rate_d_smooth);
87read_attribute(writeback_rate_debug); 86read_attribute(writeback_rate_debug);
88 87
89read_attribute(stripe_size); 88read_attribute(stripe_size);
@@ -129,31 +128,41 @@ SHOW(__bch_cached_dev)
129 var_printf(writeback_running, "%i"); 128 var_printf(writeback_running, "%i");
130 var_print(writeback_delay); 129 var_print(writeback_delay);
131 var_print(writeback_percent); 130 var_print(writeback_percent);
132 sysfs_print(writeback_rate, dc->writeback_rate.rate); 131 sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
133 132
134 var_print(writeback_rate_update_seconds); 133 var_print(writeback_rate_update_seconds);
135 var_print(writeback_rate_d_term); 134 var_print(writeback_rate_d_term);
136 var_print(writeback_rate_p_term_inverse); 135 var_print(writeback_rate_p_term_inverse);
137 var_print(writeback_rate_d_smooth);
138 136
139 if (attr == &sysfs_writeback_rate_debug) { 137 if (attr == &sysfs_writeback_rate_debug) {
138 char rate[20];
140 char dirty[20]; 139 char dirty[20];
141 char derivative[20];
142 char target[20]; 140 char target[20];
143 bch_hprint(dirty, 141 char proportional[20];
144 bcache_dev_sectors_dirty(&dc->disk) << 9); 142 char derivative[20];
145 bch_hprint(derivative, dc->writeback_rate_derivative << 9); 143 char change[20];
144 s64 next_io;
145
146 bch_hprint(rate, dc->writeback_rate.rate << 9);
147 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
146 bch_hprint(target, dc->writeback_rate_target << 9); 148 bch_hprint(target, dc->writeback_rate_target << 9);
149 bch_hprint(proportional,dc->writeback_rate_proportional << 9);
150 bch_hprint(derivative, dc->writeback_rate_derivative << 9);
151 bch_hprint(change, dc->writeback_rate_change << 9);
152
153 next_io = div64_s64(dc->writeback_rate.next - local_clock(),
154 NSEC_PER_MSEC);
147 155
148 return sprintf(buf, 156 return sprintf(buf,
149 "rate:\t\t%u\n" 157 "rate:\t\t%s/sec\n"
150 "change:\t\t%i\n"
151 "dirty:\t\t%s\n" 158 "dirty:\t\t%s\n"
159 "target:\t\t%s\n"
160 "proportional:\t%s\n"
152 "derivative:\t%s\n" 161 "derivative:\t%s\n"
153 "target:\t\t%s\n", 162 "change:\t\t%s/sec\n"
154 dc->writeback_rate.rate, 163 "next io:\t%llims\n",
155 dc->writeback_rate_change, 164 rate, dirty, target, proportional,
156 dirty, derivative, target); 165 derivative, change, next_io);
157 } 166 }
158 167
159 sysfs_hprint(dirty_data, 168 sysfs_hprint(dirty_data,
@@ -189,6 +198,7 @@ STORE(__cached_dev)
189 struct kobj_uevent_env *env; 198 struct kobj_uevent_env *env;
190 199
191#define d_strtoul(var) sysfs_strtoul(var, dc->var) 200#define d_strtoul(var) sysfs_strtoul(var, dc->var)
201#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
192#define d_strtoi_h(var) sysfs_hatoi(var, dc->var) 202#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
193 203
194 sysfs_strtoul(data_csum, dc->disk.data_csum); 204 sysfs_strtoul(data_csum, dc->disk.data_csum);
@@ -197,16 +207,15 @@ STORE(__cached_dev)
197 d_strtoul(writeback_metadata); 207 d_strtoul(writeback_metadata);
198 d_strtoul(writeback_running); 208 d_strtoul(writeback_running);
199 d_strtoul(writeback_delay); 209 d_strtoul(writeback_delay);
200 sysfs_strtoul_clamp(writeback_rate, 210
201 dc->writeback_rate.rate, 1, 1000000);
202 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40); 211 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
203 212
204 d_strtoul(writeback_rate_update_seconds); 213 sysfs_strtoul_clamp(writeback_rate,
214 dc->writeback_rate.rate, 1, INT_MAX);
215
216 d_strtoul_nonzero(writeback_rate_update_seconds);
205 d_strtoul(writeback_rate_d_term); 217 d_strtoul(writeback_rate_d_term);
206 d_strtoul(writeback_rate_p_term_inverse); 218 d_strtoul_nonzero(writeback_rate_p_term_inverse);
207 sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
208 dc->writeback_rate_p_term_inverse, 1, INT_MAX);
209 d_strtoul(writeback_rate_d_smooth);
210 219
211 d_strtoi_h(sequential_cutoff); 220 d_strtoi_h(sequential_cutoff);
212 d_strtoi_h(readahead); 221 d_strtoi_h(readahead);
@@ -313,7 +322,6 @@ static struct attribute *bch_cached_dev_files[] = {
313 &sysfs_writeback_rate_update_seconds, 322 &sysfs_writeback_rate_update_seconds,
314 &sysfs_writeback_rate_d_term, 323 &sysfs_writeback_rate_d_term,
315 &sysfs_writeback_rate_p_term_inverse, 324 &sysfs_writeback_rate_p_term_inverse,
316 &sysfs_writeback_rate_d_smooth,
317 &sysfs_writeback_rate_debug, 325 &sysfs_writeback_rate_debug,
318 &sysfs_dirty_data, 326 &sysfs_dirty_data,
319 &sysfs_stripe_size, 327 &sysfs_stripe_size,
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 462214eeacbe..bb37618e7664 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -209,7 +209,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
209{ 209{
210 uint64_t now = local_clock(); 210 uint64_t now = local_clock();
211 211
212 d->next += div_u64(done, d->rate); 212 d->next += div_u64(done * NSEC_PER_SEC, d->rate);
213
214 if (time_before64(now + NSEC_PER_SEC, d->next))
215 d->next = now + NSEC_PER_SEC;
216
217 if (time_after64(now - NSEC_PER_SEC * 2, d->next))
218 d->next = now - NSEC_PER_SEC * 2;
213 219
214 return time_after64(d->next, now) 220 return time_after64(d->next, now)
215 ? div_u64(d->next - now, NSEC_PER_SEC / HZ) 221 ? div_u64(d->next - now, NSEC_PER_SEC / HZ)
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 362c4b3f8b4a..1030c6020e98 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -110,7 +110,7 @@ do { \
110 _r; \ 110 _r; \
111}) 111})
112 112
113#define heap_peek(h) ((h)->size ? (h)->data[0] : NULL) 113#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
114 114
115#define heap_full(h) ((h)->used == (h)->size) 115#define heap_full(h) ((h)->used == (h)->size)
116 116
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 99053b1251be..6c44fe059c27 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -30,38 +30,40 @@ static void __update_writeback_rate(struct cached_dev *dc)
30 30
31 /* PD controller */ 31 /* PD controller */
32 32
33 int change = 0;
34 int64_t error;
35 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); 33 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
36 int64_t derivative = dirty - dc->disk.sectors_dirty_last; 34 int64_t derivative = dirty - dc->disk.sectors_dirty_last;
35 int64_t proportional = dirty - target;
36 int64_t change;
37 37
38 dc->disk.sectors_dirty_last = dirty; 38 dc->disk.sectors_dirty_last = dirty;
39 39
40 derivative *= dc->writeback_rate_d_term; 40 /* Scale to sectors per second */
41 derivative = clamp(derivative, -dirty, dirty);
42 41
43 derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, 42 proportional *= dc->writeback_rate_update_seconds;
44 dc->writeback_rate_d_smooth, 0); 43 proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);
45 44
46 /* Avoid divide by zero */ 45 derivative = div_s64(derivative, dc->writeback_rate_update_seconds);
47 if (!target)
48 goto out;
49 46
50 error = div64_s64((dirty + derivative - target) << 8, target); 47 derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
48 (dc->writeback_rate_d_term /
49 dc->writeback_rate_update_seconds) ?: 1, 0);
50
51 derivative *= dc->writeback_rate_d_term;
52 derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);
51 53
52 change = div_s64((dc->writeback_rate.rate * error) >> 8, 54 change = proportional + derivative;
53 dc->writeback_rate_p_term_inverse);
54 55
55 /* Don't increase writeback rate if the device isn't keeping up */ 56 /* Don't increase writeback rate if the device isn't keeping up */
56 if (change > 0 && 57 if (change > 0 &&
57 time_after64(local_clock(), 58 time_after64(local_clock(),
58 dc->writeback_rate.next + 10 * NSEC_PER_MSEC)) 59 dc->writeback_rate.next + NSEC_PER_MSEC))
59 change = 0; 60 change = 0;
60 61
61 dc->writeback_rate.rate = 62 dc->writeback_rate.rate =
62 clamp_t(int64_t, dc->writeback_rate.rate + change, 63 clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
63 1, NSEC_PER_MSEC); 64 1, NSEC_PER_MSEC);
64out: 65
66 dc->writeback_rate_proportional = proportional;
65 dc->writeback_rate_derivative = derivative; 67 dc->writeback_rate_derivative = derivative;
66 dc->writeback_rate_change = change; 68 dc->writeback_rate_change = change;
67 dc->writeback_rate_target = target; 69 dc->writeback_rate_target = target;
@@ -87,15 +89,11 @@ static void update_writeback_rate(struct work_struct *work)
87 89
88static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) 90static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
89{ 91{
90 uint64_t ret;
91
92 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || 92 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
93 !dc->writeback_percent) 93 !dc->writeback_percent)
94 return 0; 94 return 0;
95 95
96 ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); 96 return bch_next_delay(&dc->writeback_rate, sectors);
97
98 return min_t(uint64_t, ret, HZ);
99} 97}
100 98
101struct dirty_io { 99struct dirty_io {
@@ -241,7 +239,7 @@ static void read_dirty(struct cached_dev *dc)
241 if (KEY_START(&w->key) != dc->last_read || 239 if (KEY_START(&w->key) != dc->last_read ||
242 jiffies_to_msecs(delay) > 50) 240 jiffies_to_msecs(delay) > 50)
243 while (!kthread_should_stop() && delay) 241 while (!kthread_should_stop() && delay)
244 delay = schedule_timeout_interruptible(delay); 242 delay = schedule_timeout_uninterruptible(delay);
245 243
246 dc->last_read = KEY_OFFSET(&w->key); 244 dc->last_read = KEY_OFFSET(&w->key);
247 245
@@ -438,7 +436,7 @@ static int bch_writeback_thread(void *arg)
438 while (delay && 436 while (delay &&
439 !kthread_should_stop() && 437 !kthread_should_stop() &&
440 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 438 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
441 delay = schedule_timeout_interruptible(delay); 439 delay = schedule_timeout_uninterruptible(delay);
442 } 440 }
443 } 441 }
444 442
@@ -476,6 +474,8 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
476 474
477 bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), 475 bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
478 sectors_dirty_init_fn, 0); 476 sectors_dirty_init_fn, 0);
477
478 dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
479} 479}
480 480
481int bch_cached_dev_writeback_init(struct cached_dev *dc) 481int bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -490,18 +490,15 @@ int bch_cached_dev_writeback_init(struct cached_dev *dc)
490 dc->writeback_delay = 30; 490 dc->writeback_delay = 30;
491 dc->writeback_rate.rate = 1024; 491 dc->writeback_rate.rate = 1024;
492 492
493 dc->writeback_rate_update_seconds = 30; 493 dc->writeback_rate_update_seconds = 5;
494 dc->writeback_rate_d_term = 16; 494 dc->writeback_rate_d_term = 30;
495 dc->writeback_rate_p_term_inverse = 64; 495 dc->writeback_rate_p_term_inverse = 6000;
496 dc->writeback_rate_d_smooth = 8;
497 496
498 dc->writeback_thread = kthread_create(bch_writeback_thread, dc, 497 dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
499 "bcache_writeback"); 498 "bcache_writeback");
500 if (IS_ERR(dc->writeback_thread)) 499 if (IS_ERR(dc->writeback_thread))
501 return PTR_ERR(dc->writeback_thread); 500 return PTR_ERR(dc->writeback_thread);
502 501
503 set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE);
504
505 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); 502 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
506 schedule_delayed_work(&dc->writeback_rate_update, 503 schedule_delayed_work(&dc->writeback_rate_update,
507 dc->writeback_rate_update_seconds * HZ); 504 dc->writeback_rate_update_seconds * HZ);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 173cbb20d104..54bdd923316f 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1717,6 +1717,11 @@ static int __init dm_bufio_init(void)
1717{ 1717{
1718 __u64 mem; 1718 __u64 mem;
1719 1719
1720 dm_bufio_allocated_kmem_cache = 0;
1721 dm_bufio_allocated_get_free_pages = 0;
1722 dm_bufio_allocated_vmalloc = 0;
1723 dm_bufio_current_allocated = 0;
1724
1720 memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches); 1725 memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1721 memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names); 1726 memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1722 1727
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 416b7b752a6e..64780ad73bb0 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -730,15 +730,18 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
730 int r = 0; 730 int r = 0;
731 bool updated = updated_this_tick(mq, e); 731 bool updated = updated_this_tick(mq, e);
732 732
733 requeue_and_update_tick(mq, e);
734
735 if ((!discarded_oblock && updated) || 733 if ((!discarded_oblock && updated) ||
736 !should_promote(mq, e, discarded_oblock, data_dir)) 734 !should_promote(mq, e, discarded_oblock, data_dir)) {
735 requeue_and_update_tick(mq, e);
737 result->op = POLICY_MISS; 736 result->op = POLICY_MISS;
738 else if (!can_migrate) 737
738 } else if (!can_migrate)
739 r = -EWOULDBLOCK; 739 r = -EWOULDBLOCK;
740 else 740
741 else {
742 requeue_and_update_tick(mq, e);
741 r = pre_cache_to_cache(mq, e, result); 743 r = pre_cache_to_cache(mq, e, result);
744 }
742 745
743 return r; 746 return r;
744} 747}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 9efcf1059b99..1b1469ebe5cb 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2755,7 +2755,7 @@ static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
2755{ 2755{
2756 int r; 2756 int r;
2757 2757
2758 r = dm_cache_resize(cache->cmd, cache->cache_size); 2758 r = dm_cache_resize(cache->cmd, new_size);
2759 if (r) { 2759 if (r) {
2760 DMERR("could not resize cache metadata"); 2760 DMERR("could not resize cache metadata");
2761 return r; 2761 return r;
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 496d5f3646a5..2f91d6d4a2cc 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -20,6 +20,7 @@
20struct delay_c { 20struct delay_c {
21 struct timer_list delay_timer; 21 struct timer_list delay_timer;
22 struct mutex timer_lock; 22 struct mutex timer_lock;
23 struct workqueue_struct *kdelayd_wq;
23 struct work_struct flush_expired_bios; 24 struct work_struct flush_expired_bios;
24 struct list_head delayed_bios; 25 struct list_head delayed_bios;
25 atomic_t may_delay; 26 atomic_t may_delay;
@@ -45,14 +46,13 @@ struct dm_delay_info {
45 46
46static DEFINE_MUTEX(delayed_bios_lock); 47static DEFINE_MUTEX(delayed_bios_lock);
47 48
48static struct workqueue_struct *kdelayd_wq;
49static struct kmem_cache *delayed_cache; 49static struct kmem_cache *delayed_cache;
50 50
51static void handle_delayed_timer(unsigned long data) 51static void handle_delayed_timer(unsigned long data)
52{ 52{
53 struct delay_c *dc = (struct delay_c *)data; 53 struct delay_c *dc = (struct delay_c *)data;
54 54
55 queue_work(kdelayd_wq, &dc->flush_expired_bios); 55 queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
56} 56}
57 57
58static void queue_timeout(struct delay_c *dc, unsigned long expires) 58static void queue_timeout(struct delay_c *dc, unsigned long expires)
@@ -191,6 +191,12 @@ out:
191 goto bad_dev_write; 191 goto bad_dev_write;
192 } 192 }
193 193
194 dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
195 if (!dc->kdelayd_wq) {
196 DMERR("Couldn't start kdelayd");
197 goto bad_queue;
198 }
199
194 setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc); 200 setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
195 201
196 INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); 202 INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
@@ -203,6 +209,8 @@ out:
203 ti->private = dc; 209 ti->private = dc;
204 return 0; 210 return 0;
205 211
212bad_queue:
213 mempool_destroy(dc->delayed_pool);
206bad_dev_write: 214bad_dev_write:
207 if (dc->dev_write) 215 if (dc->dev_write)
208 dm_put_device(ti, dc->dev_write); 216 dm_put_device(ti, dc->dev_write);
@@ -217,7 +225,7 @@ static void delay_dtr(struct dm_target *ti)
217{ 225{
218 struct delay_c *dc = ti->private; 226 struct delay_c *dc = ti->private;
219 227
220 flush_workqueue(kdelayd_wq); 228 destroy_workqueue(dc->kdelayd_wq);
221 229
222 dm_put_device(ti, dc->dev_read); 230 dm_put_device(ti, dc->dev_read);
223 231
@@ -350,12 +358,6 @@ static int __init dm_delay_init(void)
350{ 358{
351 int r = -ENOMEM; 359 int r = -ENOMEM;
352 360
353 kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
354 if (!kdelayd_wq) {
355 DMERR("Couldn't start kdelayd");
356 goto bad_queue;
357 }
358
359 delayed_cache = KMEM_CACHE(dm_delay_info, 0); 361 delayed_cache = KMEM_CACHE(dm_delay_info, 0);
360 if (!delayed_cache) { 362 if (!delayed_cache) {
361 DMERR("Couldn't create delayed bio cache."); 363 DMERR("Couldn't create delayed bio cache.");
@@ -373,8 +375,6 @@ static int __init dm_delay_init(void)
373bad_register: 375bad_register:
374 kmem_cache_destroy(delayed_cache); 376 kmem_cache_destroy(delayed_cache);
375bad_memcache: 377bad_memcache:
376 destroy_workqueue(kdelayd_wq);
377bad_queue:
378 return r; 378 return r;
379} 379}
380 380
@@ -382,7 +382,6 @@ static void __exit dm_delay_exit(void)
382{ 382{
383 dm_unregister_target(&delay_target); 383 dm_unregister_target(&delay_target);
384 kmem_cache_destroy(delayed_cache); 384 kmem_cache_destroy(delayed_cache);
385 destroy_workqueue(kdelayd_wq);
386} 385}
387 386
388/* Module hooks */ 387/* Module hooks */
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index aec57d76db5d..944690bafd93 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -66,6 +66,18 @@ struct dm_snapshot {
66 66
67 atomic_t pending_exceptions_count; 67 atomic_t pending_exceptions_count;
68 68
69 /* Protected by "lock" */
70 sector_t exception_start_sequence;
71
72 /* Protected by kcopyd single-threaded callback */
73 sector_t exception_complete_sequence;
74
75 /*
76 * A list of pending exceptions that completed out of order.
77 * Protected by kcopyd single-threaded callback.
78 */
79 struct list_head out_of_order_list;
80
69 mempool_t *pending_pool; 81 mempool_t *pending_pool;
70 82
71 struct dm_exception_table pending; 83 struct dm_exception_table pending;
@@ -173,6 +185,14 @@ struct dm_snap_pending_exception {
173 */ 185 */
174 int started; 186 int started;
175 187
188 /* There was copying error. */
189 int copy_error;
190
191 /* A sequence number, it is used for in-order completion. */
192 sector_t exception_sequence;
193
194 struct list_head out_of_order_entry;
195
176 /* 196 /*
177 * For writing a complete chunk, bypassing the copy. 197 * For writing a complete chunk, bypassing the copy.
178 */ 198 */
@@ -1094,6 +1114,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1094 s->valid = 1; 1114 s->valid = 1;
1095 s->active = 0; 1115 s->active = 0;
1096 atomic_set(&s->pending_exceptions_count, 0); 1116 atomic_set(&s->pending_exceptions_count, 0);
1117 s->exception_start_sequence = 0;
1118 s->exception_complete_sequence = 0;
1119 INIT_LIST_HEAD(&s->out_of_order_list);
1097 init_rwsem(&s->lock); 1120 init_rwsem(&s->lock);
1098 INIT_LIST_HEAD(&s->list); 1121 INIT_LIST_HEAD(&s->list);
1099 spin_lock_init(&s->pe_lock); 1122 spin_lock_init(&s->pe_lock);
@@ -1443,6 +1466,19 @@ static void commit_callback(void *context, int success)
1443 pending_complete(pe, success); 1466 pending_complete(pe, success);
1444} 1467}
1445 1468
1469static void complete_exception(struct dm_snap_pending_exception *pe)
1470{
1471 struct dm_snapshot *s = pe->snap;
1472
1473 if (unlikely(pe->copy_error))
1474 pending_complete(pe, 0);
1475
1476 else
1477 /* Update the metadata if we are persistent */
1478 s->store->type->commit_exception(s->store, &pe->e,
1479 commit_callback, pe);
1480}
1481
1446/* 1482/*
1447 * Called when the copy I/O has finished. kcopyd actually runs 1483 * Called when the copy I/O has finished. kcopyd actually runs
1448 * this code so don't block. 1484 * this code so don't block.
@@ -1452,13 +1488,32 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
1452 struct dm_snap_pending_exception *pe = context; 1488 struct dm_snap_pending_exception *pe = context;
1453 struct dm_snapshot *s = pe->snap; 1489 struct dm_snapshot *s = pe->snap;
1454 1490
1455 if (read_err || write_err) 1491 pe->copy_error = read_err || write_err;
1456 pending_complete(pe, 0);
1457 1492
1458 else 1493 if (pe->exception_sequence == s->exception_complete_sequence) {
1459 /* Update the metadata if we are persistent */ 1494 s->exception_complete_sequence++;
1460 s->store->type->commit_exception(s->store, &pe->e, 1495 complete_exception(pe);
1461 commit_callback, pe); 1496
1497 while (!list_empty(&s->out_of_order_list)) {
1498 pe = list_entry(s->out_of_order_list.next,
1499 struct dm_snap_pending_exception, out_of_order_entry);
1500 if (pe->exception_sequence != s->exception_complete_sequence)
1501 break;
1502 s->exception_complete_sequence++;
1503 list_del(&pe->out_of_order_entry);
1504 complete_exception(pe);
1505 }
1506 } else {
1507 struct list_head *lh;
1508 struct dm_snap_pending_exception *pe2;
1509
1510 list_for_each_prev(lh, &s->out_of_order_list) {
1511 pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
1512 if (pe2->exception_sequence < pe->exception_sequence)
1513 break;
1514 }
1515 list_add(&pe->out_of_order_entry, lh);
1516 }
1462} 1517}
1463 1518
1464/* 1519/*
@@ -1553,6 +1608,8 @@ __find_pending_exception(struct dm_snapshot *s,
1553 return NULL; 1608 return NULL;
1554 } 1609 }
1555 1610
1611 pe->exception_sequence = s->exception_start_sequence++;
1612
1556 dm_insert_exception(&s->pending, &pe->e); 1613 dm_insert_exception(&s->pending, &pe->e);
1557 1614
1558 return pe; 1615 return pe;
@@ -2192,7 +2249,7 @@ static struct target_type origin_target = {
2192 2249
2193static struct target_type snapshot_target = { 2250static struct target_type snapshot_target = {
2194 .name = "snapshot", 2251 .name = "snapshot",
2195 .version = {1, 11, 1}, 2252 .version = {1, 12, 0},
2196 .module = THIS_MODULE, 2253 .module = THIS_MODULE,
2197 .ctr = snapshot_ctr, 2254 .ctr = snapshot_ctr,
2198 .dtr = snapshot_dtr, 2255 .dtr = snapshot_dtr,
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 3d404c1371ed..28a90122a5a8 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -964,6 +964,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
964 964
965int __init dm_statistics_init(void) 965int __init dm_statistics_init(void)
966{ 966{
967 shared_memory_amount = 0;
967 dm_stat_need_rcu_barrier = 0; 968 dm_stat_need_rcu_barrier = 0;
968 return 0; 969 return 0;
969} 970}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 465f08ca62b1..3ba6a3859ce3 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -200,6 +200,11 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
200 200
201 num_targets = dm_round_up(num_targets, KEYS_PER_NODE); 201 num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
202 202
203 if (!num_targets) {
204 kfree(t);
205 return -ENOMEM;
206 }
207
203 if (alloc_targets(t, num_targets)) { 208 if (alloc_targets(t, num_targets)) {
204 kfree(t); 209 kfree(t);
205 return -ENOMEM; 210 return -ENOMEM;
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 60bce435f4fa..8a30ad54bd46 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1697,6 +1697,14 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
1697 up_write(&pmd->root_lock); 1697 up_write(&pmd->root_lock);
1698} 1698}
1699 1699
1700void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd)
1701{
1702 down_write(&pmd->root_lock);
1703 pmd->read_only = false;
1704 dm_bm_set_read_write(pmd->bm);
1705 up_write(&pmd->root_lock);
1706}
1707
1700int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, 1708int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
1701 dm_block_t threshold, 1709 dm_block_t threshold,
1702 dm_sm_threshold_fn fn, 1710 dm_sm_threshold_fn fn,
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 845ebbe589a9..7bcc0e1d6238 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -193,6 +193,7 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_siz
193 * that nothing is changing. 193 * that nothing is changing.
194 */ 194 */
195void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd); 195void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd);
196void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd);
196 197
197int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, 198int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
198 dm_block_t threshold, 199 dm_block_t threshold,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 2c0cf511ec23..ee29037ffc2e 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -640,7 +640,9 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
640 */ 640 */
641 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); 641 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
642 if (r) { 642 if (r) {
643 DMERR_LIMIT("dm_thin_insert_block() failed"); 643 DMERR_LIMIT("%s: dm_thin_insert_block() failed: error = %d",
644 dm_device_name(pool->pool_md), r);
645 set_pool_mode(pool, PM_READ_ONLY);
644 cell_error(pool, m->cell); 646 cell_error(pool, m->cell);
645 goto out; 647 goto out;
646 } 648 }
@@ -881,32 +883,23 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
881 } 883 }
882} 884}
883 885
884static int commit(struct pool *pool)
885{
886 int r;
887
888 r = dm_pool_commit_metadata(pool->pmd);
889 if (r)
890 DMERR_LIMIT("%s: commit failed: error = %d",
891 dm_device_name(pool->pool_md), r);
892
893 return r;
894}
895
896/* 886/*
897 * A non-zero return indicates read_only or fail_io mode. 887 * A non-zero return indicates read_only or fail_io mode.
898 * Many callers don't care about the return value. 888 * Many callers don't care about the return value.
899 */ 889 */
900static int commit_or_fallback(struct pool *pool) 890static int commit(struct pool *pool)
901{ 891{
902 int r; 892 int r;
903 893
904 if (get_pool_mode(pool) != PM_WRITE) 894 if (get_pool_mode(pool) != PM_WRITE)
905 return -EINVAL; 895 return -EINVAL;
906 896
907 r = commit(pool); 897 r = dm_pool_commit_metadata(pool->pmd);
908 if (r) 898 if (r) {
899 DMERR_LIMIT("%s: dm_pool_commit_metadata failed: error = %d",
900 dm_device_name(pool->pool_md), r);
909 set_pool_mode(pool, PM_READ_ONLY); 901 set_pool_mode(pool, PM_READ_ONLY);
902 }
910 903
911 return r; 904 return r;
912} 905}
@@ -943,7 +936,9 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
943 * Try to commit to see if that will free up some 936 * Try to commit to see if that will free up some
944 * more space. 937 * more space.
945 */ 938 */
946 (void) commit_or_fallback(pool); 939 r = commit(pool);
940 if (r)
941 return r;
947 942
948 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); 943 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
949 if (r) 944 if (r)
@@ -957,7 +952,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
957 * table reload). 952 * table reload).
958 */ 953 */
959 if (!free_blocks) { 954 if (!free_blocks) {
960 DMWARN("%s: no free space available.", 955 DMWARN("%s: no free data space available.",
961 dm_device_name(pool->pool_md)); 956 dm_device_name(pool->pool_md));
962 spin_lock_irqsave(&pool->lock, flags); 957 spin_lock_irqsave(&pool->lock, flags);
963 pool->no_free_space = 1; 958 pool->no_free_space = 1;
@@ -967,8 +962,16 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
967 } 962 }
968 963
969 r = dm_pool_alloc_data_block(pool->pmd, result); 964 r = dm_pool_alloc_data_block(pool->pmd, result);
970 if (r) 965 if (r) {
966 if (r == -ENOSPC &&
967 !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) &&
968 !free_blocks) {
969 DMWARN("%s: no free metadata space available.",
970 dm_device_name(pool->pool_md));
971 set_pool_mode(pool, PM_READ_ONLY);
972 }
971 return r; 973 return r;
974 }
972 975
973 return 0; 976 return 0;
974} 977}
@@ -1349,7 +1352,7 @@ static void process_deferred_bios(struct pool *pool)
1349 if (bio_list_empty(&bios) && !need_commit_due_to_time(pool)) 1352 if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
1350 return; 1353 return;
1351 1354
1352 if (commit_or_fallback(pool)) { 1355 if (commit(pool)) {
1353 while ((bio = bio_list_pop(&bios))) 1356 while ((bio = bio_list_pop(&bios)))
1354 bio_io_error(bio); 1357 bio_io_error(bio);
1355 return; 1358 return;
@@ -1397,6 +1400,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
1397 case PM_FAIL: 1400 case PM_FAIL:
1398 DMERR("%s: switching pool to failure mode", 1401 DMERR("%s: switching pool to failure mode",
1399 dm_device_name(pool->pool_md)); 1402 dm_device_name(pool->pool_md));
1403 dm_pool_metadata_read_only(pool->pmd);
1400 pool->process_bio = process_bio_fail; 1404 pool->process_bio = process_bio_fail;
1401 pool->process_discard = process_bio_fail; 1405 pool->process_discard = process_bio_fail;
1402 pool->process_prepared_mapping = process_prepared_mapping_fail; 1406 pool->process_prepared_mapping = process_prepared_mapping_fail;
@@ -1421,6 +1425,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
1421 break; 1425 break;
1422 1426
1423 case PM_WRITE: 1427 case PM_WRITE:
1428 dm_pool_metadata_read_write(pool->pmd);
1424 pool->process_bio = process_bio; 1429 pool->process_bio = process_bio;
1425 pool->process_discard = process_discard; 1430 pool->process_discard = process_discard;
1426 pool->process_prepared_mapping = process_prepared_mapping; 1431 pool->process_prepared_mapping = process_prepared_mapping;
@@ -1637,12 +1642,19 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
1637 struct pool_c *pt = ti->private; 1642 struct pool_c *pt = ti->private;
1638 1643
1639 /* 1644 /*
1640 * We want to make sure that degraded pools are never upgraded. 1645 * We want to make sure that a pool in PM_FAIL mode is never upgraded.
1641 */ 1646 */
1642 enum pool_mode old_mode = pool->pf.mode; 1647 enum pool_mode old_mode = pool->pf.mode;
1643 enum pool_mode new_mode = pt->adjusted_pf.mode; 1648 enum pool_mode new_mode = pt->adjusted_pf.mode;
1644 1649
1645 if (old_mode > new_mode) 1650 /*
1651 * If we were in PM_FAIL mode, rollback of metadata failed. We're
1652 * not going to recover without a thin_repair. So we never let the
1653 * pool move out of the old mode. On the other hand a PM_READ_ONLY
1654 * may have been due to a lack of metadata or data space, and may
1655 * now work (ie. if the underlying devices have been resized).
1656 */
1657 if (old_mode == PM_FAIL)
1646 new_mode = old_mode; 1658 new_mode = old_mode;
1647 1659
1648 pool->ti = ti; 1660 pool->ti = ti;
@@ -2266,7 +2278,7 @@ static int pool_preresume(struct dm_target *ti)
2266 return r; 2278 return r;
2267 2279
2268 if (need_commit1 || need_commit2) 2280 if (need_commit1 || need_commit2)
2269 (void) commit_or_fallback(pool); 2281 (void) commit(pool);
2270 2282
2271 return 0; 2283 return 0;
2272} 2284}
@@ -2293,7 +2305,7 @@ static void pool_postsuspend(struct dm_target *ti)
2293 2305
2294 cancel_delayed_work(&pool->waker); 2306 cancel_delayed_work(&pool->waker);
2295 flush_workqueue(pool->wq); 2307 flush_workqueue(pool->wq);
2296 (void) commit_or_fallback(pool); 2308 (void) commit(pool);
2297} 2309}
2298 2310
2299static int check_arg_count(unsigned argc, unsigned args_required) 2311static int check_arg_count(unsigned argc, unsigned args_required)
@@ -2427,7 +2439,7 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
2427 if (r) 2439 if (r)
2428 return r; 2440 return r;
2429 2441
2430 (void) commit_or_fallback(pool); 2442 (void) commit(pool);
2431 2443
2432 r = dm_pool_reserve_metadata_snap(pool->pmd); 2444 r = dm_pool_reserve_metadata_snap(pool->pmd);
2433 if (r) 2445 if (r)
@@ -2489,7 +2501,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2489 DMWARN("Unrecognised thin pool target message received: %s", argv[0]); 2501 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2490 2502
2491 if (!r) 2503 if (!r)
2492 (void) commit_or_fallback(pool); 2504 (void) commit(pool);
2493 2505
2494 return r; 2506 return r;
2495} 2507}
@@ -2544,7 +2556,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
2544 2556
2545 /* Commit to ensure statistics aren't out-of-date */ 2557 /* Commit to ensure statistics aren't out-of-date */
2546 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) 2558 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
2547 (void) commit_or_fallback(pool); 2559 (void) commit(pool);
2548 2560
2549 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); 2561 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
2550 if (r) { 2562 if (r) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b6b7a2866c9e..21f4d7ff0da2 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -776,16 +776,10 @@ void md_super_wait(struct mddev *mddev)
776 finish_wait(&mddev->sb_wait, &wq); 776 finish_wait(&mddev->sb_wait, &wq);
777} 777}
778 778
779static void bi_complete(struct bio *bio, int error)
780{
781 complete((struct completion*)bio->bi_private);
782}
783
784int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 779int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
785 struct page *page, int rw, bool metadata_op) 780 struct page *page, int rw, bool metadata_op)
786{ 781{
787 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); 782 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
788 struct completion event;
789 int ret; 783 int ret;
790 784
791 rw |= REQ_SYNC; 785 rw |= REQ_SYNC;
@@ -801,11 +795,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
801 else 795 else
802 bio->bi_sector = sector + rdev->data_offset; 796 bio->bi_sector = sector + rdev->data_offset;
803 bio_add_page(bio, page, size, 0); 797 bio_add_page(bio, page, size, 0);
804 init_completion(&event); 798 submit_bio_wait(rw, bio);
805 bio->bi_private = &event;
806 bio->bi_end_io = bi_complete;
807 submit_bio(rw, bio);
808 wait_for_completion(&event);
809 799
810 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 800 ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
811 bio_put(bio); 801 bio_put(bio);
@@ -7777,7 +7767,7 @@ void md_check_recovery(struct mddev *mddev)
7777 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 7767 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
7778 return; 7768 return;
7779 if ( ! ( 7769 if ( ! (
7780 (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) || 7770 (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
7781 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 7771 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
7782 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 7772 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
7783 (mddev->external == 0 && mddev->safemode == 1) || 7773 (mddev->external == 0 && mddev->safemode == 1) ||
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index af96e24ec328..1d75b1dc1e2e 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -317,8 +317,16 @@ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
317 * The shadow op will often be a noop. Only insert if it really 317 * The shadow op will often be a noop. Only insert if it really
318 * copied data. 318 * copied data.
319 */ 319 */
320 if (dm_block_location(*block) != b) 320 if (dm_block_location(*block) != b) {
321 /*
322 * dm_tm_shadow_block will have already decremented the old
323 * block, but it is still referenced by the btree. We
324 * increment to stop the insert decrementing it below zero
325 * when overwriting the old value.
326 */
327 dm_tm_inc(info->btree_info.tm, b);
321 r = insert_ablock(info, index, *block, root); 328 r = insert_ablock(info, index, *block, root);
329 }
322 330
323 return r; 331 return r;
324} 332}
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index a7e8bf296388..064a3c271baa 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -626,6 +626,12 @@ void dm_bm_set_read_only(struct dm_block_manager *bm)
626} 626}
627EXPORT_SYMBOL_GPL(dm_bm_set_read_only); 627EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
628 628
629void dm_bm_set_read_write(struct dm_block_manager *bm)
630{
631 bm->read_only = false;
632}
633EXPORT_SYMBOL_GPL(dm_bm_set_read_write);
634
629u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor) 635u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor)
630{ 636{
631 return crc32c(~(u32) 0, data, len) ^ init_xor; 637 return crc32c(~(u32) 0, data, len) ^ init_xor;
diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
index 9a82083a66b6..13cd58e1fe69 100644
--- a/drivers/md/persistent-data/dm-block-manager.h
+++ b/drivers/md/persistent-data/dm-block-manager.h
@@ -108,9 +108,9 @@ int dm_bm_unlock(struct dm_block *b);
108int dm_bm_flush_and_unlock(struct dm_block_manager *bm, 108int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
109 struct dm_block *superblock); 109 struct dm_block *superblock);
110 110
111 /* 111/*
112 * Request data be prefetched into the cache. 112 * Request data is prefetched into the cache.
113 */ 113 */
114void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b); 114void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
115 115
116/* 116/*
@@ -125,6 +125,7 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
125 * be returned if you do. 125 * be returned if you do.
126 */ 126 */
127void dm_bm_set_read_only(struct dm_block_manager *bm); 127void dm_bm_set_read_only(struct dm_block_manager *bm);
128void dm_bm_set_read_write(struct dm_block_manager *bm);
128 129
129u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor); 130u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor);
130 131
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 6058569fe86c..466a60bbd716 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -381,7 +381,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
381} 381}
382 382
383static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b, 383static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
384 uint32_t (*mutator)(void *context, uint32_t old), 384 int (*mutator)(void *context, uint32_t old, uint32_t *new),
385 void *context, enum allocation_event *ev) 385 void *context, enum allocation_event *ev)
386{ 386{
387 int r; 387 int r;
@@ -410,11 +410,17 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
410 410
411 if (old > 2) { 411 if (old > 2) {
412 r = sm_ll_lookup_big_ref_count(ll, b, &old); 412 r = sm_ll_lookup_big_ref_count(ll, b, &old);
413 if (r < 0) 413 if (r < 0) {
414 dm_tm_unlock(ll->tm, nb);
414 return r; 415 return r;
416 }
415 } 417 }
416 418
417 ref_count = mutator(context, old); 419 r = mutator(context, old, &ref_count);
420 if (r) {
421 dm_tm_unlock(ll->tm, nb);
422 return r;
423 }
418 424
419 if (ref_count <= 2) { 425 if (ref_count <= 2) {
420 sm_set_bitmap(bm_le, bit, ref_count); 426 sm_set_bitmap(bm_le, bit, ref_count);
@@ -465,9 +471,10 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
465 return ll->save_ie(ll, index, &ie_disk); 471 return ll->save_ie(ll, index, &ie_disk);
466} 472}
467 473
468static uint32_t set_ref_count(void *context, uint32_t old) 474static int set_ref_count(void *context, uint32_t old, uint32_t *new)
469{ 475{
470 return *((uint32_t *) context); 476 *new = *((uint32_t *) context);
477 return 0;
471} 478}
472 479
473int sm_ll_insert(struct ll_disk *ll, dm_block_t b, 480int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
@@ -476,9 +483,10 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
476 return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev); 483 return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev);
477} 484}
478 485
479static uint32_t inc_ref_count(void *context, uint32_t old) 486static int inc_ref_count(void *context, uint32_t old, uint32_t *new)
480{ 487{
481 return old + 1; 488 *new = old + 1;
489 return 0;
482} 490}
483 491
484int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) 492int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
@@ -486,9 +494,15 @@ int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
486 return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev); 494 return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev);
487} 495}
488 496
489static uint32_t dec_ref_count(void *context, uint32_t old) 497static int dec_ref_count(void *context, uint32_t old, uint32_t *new)
490{ 498{
491 return old - 1; 499 if (!old) {
500 DMERR_LIMIT("unable to decrement a reference count below 0");
501 return -EINVAL;
502 }
503
504 *new = old - 1;
505 return 0;
492} 506}
493 507
494int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) 508int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 1c959684caef..58fc1eef7499 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -384,12 +384,16 @@ static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
384 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); 384 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
385 385
386 int r = sm_metadata_new_block_(sm, b); 386 int r = sm_metadata_new_block_(sm, b);
387 if (r) 387 if (r) {
388 DMERR("unable to allocate new metadata block"); 388 DMERR("unable to allocate new metadata block");
389 return r;
390 }
389 391
390 r = sm_metadata_get_nr_free(sm, &count); 392 r = sm_metadata_get_nr_free(sm, &count);
391 if (r) 393 if (r) {
392 DMERR("couldn't get free block count"); 394 DMERR("couldn't get free block count");
395 return r;
396 }
393 397
394 check_threshold(&smm->threshold, count); 398 check_threshold(&smm->threshold, count);
395 399
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 47da0af6322b..cc055da02e2a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -678,26 +678,23 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
678 } else 678 } else
679 init_stripe(sh, sector, previous); 679 init_stripe(sh, sector, previous);
680 } else { 680 } else {
681 spin_lock(&conf->device_lock);
681 if (atomic_read(&sh->count)) { 682 if (atomic_read(&sh->count)) {
682 BUG_ON(!list_empty(&sh->lru) 683 BUG_ON(!list_empty(&sh->lru)
683 && !test_bit(STRIPE_EXPANDING, &sh->state) 684 && !test_bit(STRIPE_EXPANDING, &sh->state)
684 && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state) 685 && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)
685 && !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state)); 686 );
686 } else { 687 } else {
687 spin_lock(&conf->device_lock);
688 if (!test_bit(STRIPE_HANDLE, &sh->state)) 688 if (!test_bit(STRIPE_HANDLE, &sh->state))
689 atomic_inc(&conf->active_stripes); 689 atomic_inc(&conf->active_stripes);
690 if (list_empty(&sh->lru) && 690 BUG_ON(list_empty(&sh->lru));
691 !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state) &&
692 !test_bit(STRIPE_EXPANDING, &sh->state))
693 BUG();
694 list_del_init(&sh->lru); 691 list_del_init(&sh->lru);
695 if (sh->group) { 692 if (sh->group) {
696 sh->group->stripes_cnt--; 693 sh->group->stripes_cnt--;
697 sh->group = NULL; 694 sh->group = NULL;
698 } 695 }
699 spin_unlock(&conf->device_lock);
700 } 696 }
697 spin_unlock(&conf->device_lock);
701 } 698 }
702 } while (sh == NULL); 699 } while (sh == NULL);
703 700
@@ -5471,7 +5468,7 @@ static int alloc_thread_groups(struct r5conf *conf, int cnt,
5471 for (i = 0; i < *group_cnt; i++) { 5468 for (i = 0; i < *group_cnt; i++) {
5472 struct r5worker_group *group; 5469 struct r5worker_group *group;
5473 5470
5474 group = worker_groups[i]; 5471 group = &(*worker_groups)[i];
5475 INIT_LIST_HEAD(&group->handle_list); 5472 INIT_LIST_HEAD(&group->handle_list);
5476 group->conf = conf; 5473 group->conf = conf;
5477 group->workers = workers + i * cnt; 5474 group->workers = workers + i * cnt;
diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h
index d0799e323364..9c9063cd3208 100644
--- a/drivers/media/common/siano/smscoreapi.h
+++ b/drivers/media/common/siano/smscoreapi.h
@@ -955,7 +955,7 @@ struct sms_rx_stats {
955 u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */ 955 u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */
956 s32 SNR; /* dB */ 956 s32 SNR; /* dB */
957 u32 ber; /* Post Viterbi ber [1E-5] */ 957 u32 ber; /* Post Viterbi ber [1E-5] */
958 u32 ber_error_count; /* Number of erronous SYNC bits. */ 958 u32 ber_error_count; /* Number of erroneous SYNC bits. */
959 u32 ber_bit_count; /* Total number of SYNC bits. */ 959 u32 ber_bit_count; /* Total number of SYNC bits. */
960 u32 ts_per; /* Transport stream PER, 960 u32 ts_per; /* Transport stream PER,
961 0xFFFFFFFF indicate N/A */ 961 0xFFFFFFFF indicate N/A */
@@ -981,7 +981,7 @@ struct sms_rx_stats_ex {
981 u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */ 981 u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */
982 s32 SNR; /* dB */ 982 s32 SNR; /* dB */
983 u32 ber; /* Post Viterbi ber [1E-5] */ 983 u32 ber; /* Post Viterbi ber [1E-5] */
984 u32 ber_error_count; /* Number of erronous SYNC bits. */ 984 u32 ber_error_count; /* Number of erroneous SYNC bits. */
985 u32 ber_bit_count; /* Total number of SYNC bits. */ 985 u32 ber_bit_count; /* Total number of SYNC bits. */
986 u32 ts_per; /* Transport stream PER, 986 u32 ts_per; /* Transport stream PER,
987 0xFFFFFFFF indicate N/A */ 987 0xFFFFFFFF indicate N/A */
diff --git a/drivers/media/common/siano/smsdvb.h b/drivers/media/common/siano/smsdvb.h
index 92c413ba0c79..ae36d0ae0fb1 100644
--- a/drivers/media/common/siano/smsdvb.h
+++ b/drivers/media/common/siano/smsdvb.h
@@ -95,7 +95,7 @@ struct RECEPTION_STATISTICS_PER_SLICES_S {
95 u32 is_demod_locked; /* 0 - not locked, 1 - locked */ 95 u32 is_demod_locked; /* 0 - not locked, 1 - locked */
96 96
97 u32 ber_bit_count; /* Total number of SYNC bits. */ 97 u32 ber_bit_count; /* Total number of SYNC bits. */
98 u32 ber_error_count; /* Number of erronous SYNC bits. */ 98 u32 ber_error_count; /* Number of erroneous SYNC bits. */
99 99
100 s32 MRC_SNR; /* dB */ 100 s32 MRC_SNR; /* dB */
101 s32 mrc_in_band_pwr; /* In band power in dBM */ 101 s32 mrc_in_band_pwr; /* In band power in dBM */
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 58de4410c525..6c7ff0cdcd32 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -435,7 +435,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
435 dprintk_tscheck("TEI detected. " 435 dprintk_tscheck("TEI detected. "
436 "PID=0x%x data1=0x%x\n", 436 "PID=0x%x data1=0x%x\n",
437 pid, buf[1]); 437 pid, buf[1]);
438 /* data in this packet cant be trusted - drop it unless 438 /* data in this packet can't be trusted - drop it unless
439 * module option dvb_demux_feed_err_pkts is set */ 439 * module option dvb_demux_feed_err_pkts is set */
440 if (!dvb_demux_feed_err_pkts) 440 if (!dvb_demux_feed_err_pkts)
441 return; 441 return;
@@ -1032,8 +1032,13 @@ static int dmx_section_feed_release_filter(struct dmx_section_feed *feed,
1032 return -EINVAL; 1032 return -EINVAL;
1033 } 1033 }
1034 1034
1035 if (feed->is_filtering) 1035 if (feed->is_filtering) {
1036 /* release dvbdmx->mutex as far as it is
1037 acquired by stop_filtering() itself */
1038 mutex_unlock(&dvbdmx->mutex);
1036 feed->stop_filtering(feed); 1039 feed->stop_filtering(feed);
1040 mutex_lock(&dvbdmx->mutex);
1041 }
1037 1042
1038 spin_lock_irq(&dvbdmx->lock); 1043 spin_lock_irq(&dvbdmx->lock);
1039 f = dvbdmxfeed->filter; 1044 f = dvbdmxfeed->filter;
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 30ee59052157..65728c25ea05 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -170,18 +170,18 @@ static int af9033_rd_reg_mask(struct af9033_state *state, u32 reg, u8 *val,
170static int af9033_wr_reg_val_tab(struct af9033_state *state, 170static int af9033_wr_reg_val_tab(struct af9033_state *state,
171 const struct reg_val *tab, int tab_len) 171 const struct reg_val *tab, int tab_len)
172{ 172{
173#define MAX_TAB_LEN 212
173 int ret, i, j; 174 int ret, i, j;
174 u8 buf[MAX_XFER_SIZE]; 175 u8 buf[1 + MAX_TAB_LEN];
176
177 dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
175 178
176 if (tab_len > sizeof(buf)) { 179 if (tab_len > sizeof(buf)) {
177 dev_warn(&state->i2c->dev, 180 dev_warn(&state->i2c->dev, "%s: tab len %d is too big\n",
178 "%s: i2c wr len=%d is too big!\n", 181 KBUILD_MODNAME, tab_len);
179 KBUILD_MODNAME, tab_len);
180 return -EINVAL; 182 return -EINVAL;
181 } 183 }
182 184
183 dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
184
185 for (i = 0, j = 0; i < tab_len; i++) { 185 for (i = 0, j = 0; i < tab_len; i++) {
186 buf[j] = tab[i].val; 186 buf[j] = tab[i].val;
187 187
diff --git a/drivers/media/dvb-frontends/cxd2820r_c.c b/drivers/media/dvb-frontends/cxd2820r_c.c
index 125a44041011..5c6ab4921bf1 100644
--- a/drivers/media/dvb-frontends/cxd2820r_c.c
+++ b/drivers/media/dvb-frontends/cxd2820r_c.c
@@ -78,7 +78,7 @@ int cxd2820r_set_frontend_c(struct dvb_frontend *fe)
78 78
79 num = if_freq / 1000; /* Hz => kHz */ 79 num = if_freq / 1000; /* Hz => kHz */
80 num *= 0x4000; 80 num *= 0x4000;
81 if_ctl = cxd2820r_div_u64_round_closest(num, 41000); 81 if_ctl = 0x4000 - cxd2820r_div_u64_round_closest(num, 41000);
82 buf[0] = (if_ctl >> 8) & 0x3f; 82 buf[0] = (if_ctl >> 8) & 0x3f;
83 buf[1] = (if_ctl >> 0) & 0xff; 83 buf[1] = (if_ctl >> 0) & 0xff;
84 84
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index 90536147bf04..6dbbee453ee1 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -3048,7 +3048,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
3048 dib8000_set_diversity_in(state->fe[0], state->diversity_onoff); 3048 dib8000_set_diversity_in(state->fe[0], state->diversity_onoff);
3049 3049
3050 locks = (dib8000_read_word(state, 180) >> 6) & 0x3f; /* P_coff_winlen ? */ 3050 locks = (dib8000_read_word(state, 180) >> 6) & 0x3f; /* P_coff_winlen ? */
3051 /* coff should lock over P_coff_winlen ofdm symbols : give 3 times this lenght to lock */ 3051 /* coff should lock over P_coff_winlen ofdm symbols : give 3 times this length to lock */
3052 *timeout = dib8000_get_timeout(state, 2 * locks, SYMBOL_DEPENDENT_ON); 3052 *timeout = dib8000_get_timeout(state, 2 * locks, SYMBOL_DEPENDENT_ON);
3053 *tune_state = CT_DEMOD_STEP_5; 3053 *tune_state = CT_DEMOD_STEP_5;
3054 break; 3054 break;
@@ -3115,7 +3115,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
3115 3115
3116 case CT_DEMOD_STEP_9: /* 39 */ 3116 case CT_DEMOD_STEP_9: /* 39 */
3117 if ((state->revision == 0x8090) || ((dib8000_read_word(state, 1291) >> 9) & 0x1)) { /* fe capable of deinterleaving : esram */ 3117 if ((state->revision == 0x8090) || ((dib8000_read_word(state, 1291) >> 9) & 0x1)) { /* fe capable of deinterleaving : esram */
3118 /* defines timeout for mpeg lock depending on interleaver lenght of longest layer */ 3118 /* defines timeout for mpeg lock depending on interleaver length of longest layer */
3119 for (i = 0; i < 3; i++) { 3119 for (i = 0; i < 3; i++) {
3120 if (c->layer[i].interleaving >= deeper_interleaver) { 3120 if (c->layer[i].interleaving >= deeper_interleaver) {
3121 dprintk("layer%i: time interleaver = %d ", i, c->layer[i].interleaving); 3121 dprintk("layer%i: time interleaver = %d ", i, c->layer[i].interleaving);
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index d416c15691da..bf29a3f0e6f0 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -1191,7 +1191,7 @@ static int mpegts_configure_pins(struct drxk_state *state, bool mpeg_enable)
1191 goto error; 1191 goto error;
1192 1192
1193 if (state->m_enable_parallel == true) { 1193 if (state->m_enable_parallel == true) {
1194 /* paralel -> enable MD1 to MD7 */ 1194 /* parallel -> enable MD1 to MD7 */
1195 status = write16(state, SIO_PDR_MD1_CFG__A, 1195 status = write16(state, SIO_PDR_MD1_CFG__A,
1196 sio_pdr_mdx_cfg); 1196 sio_pdr_mdx_cfg);
1197 if (status < 0) 1197 if (status < 0)
@@ -1428,7 +1428,7 @@ static int mpegts_stop(struct drxk_state *state)
1428 1428
1429 dprintk(1, "\n"); 1429 dprintk(1, "\n");
1430 1430
1431 /* Gracefull shutdown (byte boundaries) */ 1431 /* Graceful shutdown (byte boundaries) */
1432 status = read16(state, FEC_OC_SNC_MODE__A, &fec_oc_snc_mode); 1432 status = read16(state, FEC_OC_SNC_MODE__A, &fec_oc_snc_mode);
1433 if (status < 0) 1433 if (status < 0)
1434 goto error; 1434 goto error;
@@ -2021,7 +2021,7 @@ static int mpegts_dto_setup(struct drxk_state *state,
2021 fec_oc_dto_burst_len = 204; 2021 fec_oc_dto_burst_len = 204;
2022 } 2022 }
2023 2023
2024 /* Check serial or parrallel output */ 2024 /* Check serial or parallel output */
2025 fec_oc_reg_ipr_mode &= (~(FEC_OC_IPR_MODE_SERIAL__M)); 2025 fec_oc_reg_ipr_mode &= (~(FEC_OC_IPR_MODE_SERIAL__M));
2026 if (state->m_enable_parallel == false) { 2026 if (state->m_enable_parallel == false) {
2027 /* MPEG data output is serial -> set ipr_mode[0] */ 2027 /* MPEG data output is serial -> set ipr_mode[0] */
@@ -2908,7 +2908,7 @@ static int adc_synchronization(struct drxk_state *state)
2908 goto error; 2908 goto error;
2909 2909
2910 if (count == 1) { 2910 if (count == 1) {
2911 /* Try sampling on a diffrent edge */ 2911 /* Try sampling on a different edge */
2912 u16 clk_neg = 0; 2912 u16 clk_neg = 0;
2913 2913
2914 status = read16(state, IQM_AF_CLKNEG__A, &clk_neg); 2914 status = read16(state, IQM_AF_CLKNEG__A, &clk_neg);
@@ -3306,7 +3306,7 @@ static int dvbt_sc_command(struct drxk_state *state,
3306 if (status < 0) 3306 if (status < 0)
3307 goto error; 3307 goto error;
3308 3308
3309 /* Retreive results parameters from SC */ 3309 /* Retrieve results parameters from SC */
3310 switch (cmd) { 3310 switch (cmd) {
3311 /* All commands yielding 5 results */ 3311 /* All commands yielding 5 results */
3312 /* All commands yielding 4 results */ 3312 /* All commands yielding 4 results */
@@ -3849,7 +3849,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
3849 break; 3849 break;
3850 } 3850 }
3851#if 0 3851#if 0
3852 /* No hierachical channels support in BDA */ 3852 /* No hierarchical channels support in BDA */
3853 /* Priority (only for hierarchical channels) */ 3853 /* Priority (only for hierarchical channels) */
3854 switch (channel->priority) { 3854 switch (channel->priority) {
3855 case DRX_PRIORITY_LOW: 3855 case DRX_PRIORITY_LOW:
@@ -4081,7 +4081,7 @@ error:
4081/*============================================================================*/ 4081/*============================================================================*/
4082 4082
4083/** 4083/**
4084* \brief Retreive lock status . 4084* \brief Retrieve lock status .
4085* \param demod Pointer to demodulator instance. 4085* \param demod Pointer to demodulator instance.
4086* \param lockStat Pointer to lock status structure. 4086* \param lockStat Pointer to lock status structure.
4087* \return DRXStatus_t. 4087* \return DRXStatus_t.
@@ -6174,7 +6174,7 @@ static int init_drxk(struct drxk_state *state)
6174 goto error; 6174 goto error;
6175 6175
6176 /* Stamp driver version number in SCU data RAM in BCD code 6176 /* Stamp driver version number in SCU data RAM in BCD code
6177 Done to enable field application engineers to retreive drxdriver version 6177 Done to enable field application engineers to retrieve drxdriver version
6178 via I2C from SCU RAM. 6178 via I2C from SCU RAM.
6179 Not using SCU command interface for SCU register access since no 6179 Not using SCU command interface for SCU register access since no
6180 microcode may be present. 6180 microcode may be present.
@@ -6399,7 +6399,7 @@ static int drxk_set_parameters(struct dvb_frontend *fe)
6399 fe->ops.tuner_ops.get_if_frequency(fe, &IF); 6399 fe->ops.tuner_ops.get_if_frequency(fe, &IF);
6400 start(state, 0, IF); 6400 start(state, 0, IF);
6401 6401
6402 /* After set_frontend, stats aren't avaliable */ 6402 /* After set_frontend, stats aren't available */
6403 p->strength.stat[0].scale = FE_SCALE_RELATIVE; 6403 p->strength.stat[0].scale = FE_SCALE_RELATIVE;
6404 p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; 6404 p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
6405 p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; 6405 p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index 7efb796c472c..50e8b63e5169 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -710,6 +710,7 @@ struct dvb_frontend *rtl2830_attach(const struct rtl2830_config *cfg,
710 sizeof(priv->tuner_i2c_adapter.name)); 710 sizeof(priv->tuner_i2c_adapter.name));
711 priv->tuner_i2c_adapter.algo = &rtl2830_tuner_i2c_algo; 711 priv->tuner_i2c_adapter.algo = &rtl2830_tuner_i2c_algo;
712 priv->tuner_i2c_adapter.algo_data = NULL; 712 priv->tuner_i2c_adapter.algo_data = NULL;
713 priv->tuner_i2c_adapter.dev.parent = &i2c->dev;
713 i2c_set_adapdata(&priv->tuner_i2c_adapter, priv); 714 i2c_set_adapdata(&priv->tuner_i2c_adapter, priv);
714 if (i2c_add_adapter(&priv->tuner_i2c_adapter) < 0) { 715 if (i2c_add_adapter(&priv->tuner_i2c_adapter) < 0) {
715 dev_err(&i2c->dev, 716 dev_err(&i2c->dev,
diff --git a/drivers/media/i2c/adv7183_regs.h b/drivers/media/i2c/adv7183_regs.h
index 4a5b7d211d2f..b253d400e817 100644
--- a/drivers/media/i2c/adv7183_regs.h
+++ b/drivers/media/i2c/adv7183_regs.h
@@ -52,9 +52,9 @@
52#define ADV7183_VS_FIELD_CTRL_1 0x31 /* Vsync field control 1 */ 52#define ADV7183_VS_FIELD_CTRL_1 0x31 /* Vsync field control 1 */
53#define ADV7183_VS_FIELD_CTRL_2 0x32 /* Vsync field control 2 */ 53#define ADV7183_VS_FIELD_CTRL_2 0x32 /* Vsync field control 2 */
54#define ADV7183_VS_FIELD_CTRL_3 0x33 /* Vsync field control 3 */ 54#define ADV7183_VS_FIELD_CTRL_3 0x33 /* Vsync field control 3 */
55#define ADV7183_HS_POS_CTRL_1 0x34 /* Hsync positon control 1 */ 55#define ADV7183_HS_POS_CTRL_1 0x34 /* Hsync position control 1 */
56#define ADV7183_HS_POS_CTRL_2 0x35 /* Hsync positon control 2 */ 56#define ADV7183_HS_POS_CTRL_2 0x35 /* Hsync position control 2 */
57#define ADV7183_HS_POS_CTRL_3 0x36 /* Hsync positon control 3 */ 57#define ADV7183_HS_POS_CTRL_3 0x36 /* Hsync position control 3 */
58#define ADV7183_POLARITY 0x37 /* Polarity */ 58#define ADV7183_POLARITY 0x37 /* Polarity */
59#define ADV7183_NTSC_COMB_CTRL 0x38 /* NTSC comb control */ 59#define ADV7183_NTSC_COMB_CTRL 0x38 /* NTSC comb control */
60#define ADV7183_PAL_COMB_CTRL 0x39 /* PAL comb control */ 60#define ADV7183_PAL_COMB_CTRL 0x39 /* PAL comb control */
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index fbfdd2fc2a36..a324106b9f11 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -877,7 +877,7 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd,
877 break; 877 break;
878 case ADV7604_MODE_HDMI: 878 case ADV7604_MODE_HDMI:
879 /* set default prim_mode/vid_std for HDMI 879 /* set default prim_mode/vid_std for HDMI
880 accoring to [REF_03, c. 4.2] */ 880 according to [REF_03, c. 4.2] */
881 io_write(sd, 0x00, 0x02); /* video std */ 881 io_write(sd, 0x00, 0x02); /* video std */
882 io_write(sd, 0x01, 0x06); /* prim mode */ 882 io_write(sd, 0x01, 0x06); /* prim mode */
883 break; 883 break;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 22f729d66a96..b154f36740b4 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -1013,7 +1013,7 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd,
1013 break; 1013 break;
1014 case ADV7842_MODE_HDMI: 1014 case ADV7842_MODE_HDMI:
1015 /* set default prim_mode/vid_std for HDMI 1015 /* set default prim_mode/vid_std for HDMI
1016 accoring to [REF_03, c. 4.2] */ 1016 according to [REF_03, c. 4.2] */
1017 io_write(sd, 0x00, 0x02); /* video std */ 1017 io_write(sd, 0x00, 0x02); /* video std */
1018 io_write(sd, 0x01, 0x06); /* prim mode */ 1018 io_write(sd, 0x01, 0x06); /* prim mode */
1019 break; 1019 break;
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 82bf5679da30..99ee456700f4 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -394,7 +394,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
394 394
395 if (!rc) { 395 if (!rc) {
396 /* 396 /*
397 * If platform_data doesn't specify rc_dev, initilize it 397 * If platform_data doesn't specify rc_dev, initialize it
398 * internally 398 * internally
399 */ 399 */
400 rc = rc_allocate_device(); 400 rc = rc_allocate_device();
diff --git a/drivers/media/i2c/m5mols/m5mols_controls.c b/drivers/media/i2c/m5mols/m5mols_controls.c
index f34429e452ab..a60931e66312 100644
--- a/drivers/media/i2c/m5mols/m5mols_controls.c
+++ b/drivers/media/i2c/m5mols/m5mols_controls.c
@@ -544,7 +544,7 @@ int m5mols_init_controls(struct v4l2_subdev *sd)
544 u16 zoom_step; 544 u16 zoom_step;
545 int ret; 545 int ret;
546 546
547 /* Determine the firmware dependant control range and step values */ 547 /* Determine the firmware dependent control range and step values */
548 ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &exposure_max); 548 ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &exposure_max);
549 if (ret < 0) 549 if (ret < 0)
550 return ret; 550 return ret;
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 4734836fe5a4..1c2303d18bf4 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -19,6 +19,7 @@
19#include <linux/i2c.h> 19#include <linux/i2c.h>
20#include <linux/log2.h> 20#include <linux/log2.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/of.h>
22#include <linux/of_gpio.h> 23#include <linux/of_gpio.h>
23#include <linux/pm.h> 24#include <linux/pm.h>
24#include <linux/regulator/consumer.h> 25#include <linux/regulator/consumer.h>
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index 6fec9384d86e..e7f555cc827a 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1460,7 +1460,7 @@ static int s5c73m3_oif_registered(struct v4l2_subdev *sd)
1460 mutex_unlock(&state->lock); 1460 mutex_unlock(&state->lock);
1461 1461
1462 v4l2_dbg(1, s5c73m3_dbg, sd, "%s: Booting %s (%d)\n", 1462 v4l2_dbg(1, s5c73m3_dbg, sd, "%s: Booting %s (%d)\n",
1463 __func__, ret ? "failed" : "succeded", ret); 1463 __func__, ret ? "failed" : "succeeded", ret);
1464 1464
1465 return ret; 1465 return ret;
1466} 1466}
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h
index 9d2c08652246..9dfa516f6944 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3.h
+++ b/drivers/media/i2c/s5c73m3/s5c73m3.h
@@ -393,7 +393,7 @@ struct s5c73m3 {
393 393
394 /* External master clock frequency */ 394 /* External master clock frequency */
395 u32 mclk_frequency; 395 u32 mclk_frequency;
396 /* Video bus type - MIPI-CSI2/paralell */ 396 /* Video bus type - MIPI-CSI2/parallel */
397 enum v4l2_mbus_type bus_type; 397 enum v4l2_mbus_type bus_type;
398 398
399 const struct s5c73m3_frame_size *sensor_pix_size[2]; 399 const struct s5c73m3_frame_size *sensor_pix_size[2];
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index 637d02634527..afdbcb045cee 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -1699,7 +1699,7 @@ static void saa711x_write_platform_data(struct saa711x_state *state,
1699 * the analog demod. 1699 * the analog demod.
1700 * If the tuner is not found, it returns -ENODEV. 1700 * If the tuner is not found, it returns -ENODEV.
1701 * If auto-detection is disabled and the tuner doesn't match what it was 1701 * If auto-detection is disabled and the tuner doesn't match what it was
1702 * requred, it returns -EINVAL and fills 'name'. 1702 * required, it returns -EINVAL and fills 'name'.
1703 * If the chip is found, it returns the chip ID and fills 'name'. 1703 * If the chip is found, it returns the chip ID and fills 'name'.
1704 */ 1704 */
1705static int saa711x_detect_chip(struct i2c_client *client, 1705static int saa711x_detect_chip(struct i2c_client *client,
diff --git a/drivers/media/i2c/soc_camera/ov5642.c b/drivers/media/i2c/soc_camera/ov5642.c
index 0a5c5d4fedd6..d2daa6a8f272 100644
--- a/drivers/media/i2c/soc_camera/ov5642.c
+++ b/drivers/media/i2c/soc_camera/ov5642.c
@@ -642,7 +642,7 @@ static const struct ov5642_datafmt
642static int reg_read(struct i2c_client *client, u16 reg, u8 *val) 642static int reg_read(struct i2c_client *client, u16 reg, u8 *val)
643{ 643{
644 int ret; 644 int ret;
645 /* We have 16-bit i2c addresses - care for endianess */ 645 /* We have 16-bit i2c addresses - care for endianness */
646 unsigned char data[2] = { reg >> 8, reg & 0xff }; 646 unsigned char data[2] = { reg >> 8, reg & 0xff };
647 647
648 ret = i2c_master_send(client, data, 2); 648 ret = i2c_master_send(client, data, 2);
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index 42276d93624c..ed9ae8875348 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -83,7 +83,8 @@ static int ths7303_write(struct v4l2_subdev *sd, u8 reg, u8 val)
83} 83}
84 84
85/* following function is used to set ths7303 */ 85/* following function is used to set ths7303 */
86int ths7303_setval(struct v4l2_subdev *sd, enum ths7303_filter_mode mode) 86static int ths7303_setval(struct v4l2_subdev *sd,
87 enum ths7303_filter_mode mode)
87{ 88{
88 struct i2c_client *client = v4l2_get_subdevdata(sd); 89 struct i2c_client *client = v4l2_get_subdevdata(sd);
89 struct ths7303_state *state = to_state(sd); 90 struct ths7303_state *state = to_state(sd);
diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c
index 3f584a7d0781..bee7946faa7c 100644
--- a/drivers/media/i2c/wm8775.c
+++ b/drivers/media/i2c/wm8775.c
@@ -130,12 +130,10 @@ static int wm8775_s_routing(struct v4l2_subdev *sd,
130 return -EINVAL; 130 return -EINVAL;
131 } 131 }
132 state->input = input; 132 state->input = input;
133 if (!v4l2_ctrl_g_ctrl(state->mute)) 133 if (v4l2_ctrl_g_ctrl(state->mute))
134 return 0; 134 return 0;
135 if (!v4l2_ctrl_g_ctrl(state->vol)) 135 if (!v4l2_ctrl_g_ctrl(state->vol))
136 return 0; 136 return 0;
137 if (!v4l2_ctrl_g_ctrl(state->bal))
138 return 0;
139 wm8775_set_audio(sd, 1); 137 wm8775_set_audio(sd, 1);
140 return 0; 138 return 0;
141} 139}
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index a3b1ee9c00d7..92a06fd85865 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -4182,7 +4182,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
4182 } 4182 }
4183 btv->std = V4L2_STD_PAL; 4183 btv->std = V4L2_STD_PAL;
4184 init_irqreg(btv); 4184 init_irqreg(btv);
4185 v4l2_ctrl_handler_setup(hdl); 4185 if (!bttv_tvcards[btv->c.type].no_video)
4186 v4l2_ctrl_handler_setup(hdl);
4186 if (hdl->error) { 4187 if (hdl->error) {
4187 result = hdl->error; 4188 result = hdl->error;
4188 goto fail2; 4189 goto fail2;
diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h
index 2767c64df0c8..57f4688ea55b 100644
--- a/drivers/media/pci/cx18/cx18-driver.h
+++ b/drivers/media/pci/cx18/cx18-driver.h
@@ -262,7 +262,7 @@ struct cx18_options {
262}; 262};
263 263
264/* per-mdl bit flags */ 264/* per-mdl bit flags */
265#define CX18_F_M_NEED_SWAP 0 /* mdl buffer data must be endianess swapped */ 265#define CX18_F_M_NEED_SWAP 0 /* mdl buffer data must be endianness swapped */
266 266
267/* per-stream, s_flags */ 267/* per-stream, s_flags */
268#define CX18_F_S_CLAIMED 3 /* this stream is claimed */ 268#define CX18_F_S_CLAIMED 3 /* this stream is claimed */
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index e3fc2c71808a..95666eee7b27 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -427,7 +427,7 @@ int mc417_register_read(struct cx23885_dev *dev, u16 address, u32 *value)
427 cx_write(MC417_RWD, regval); 427 cx_write(MC417_RWD, regval);
428 428
429 /* Transition RD to effect read transaction across bus. 429 /* Transition RD to effect read transaction across bus.
430 * Transtion 0x5000 -> 0x9000 correct (RD/RDY -> WR/RDY)? 430 * Transition 0x5000 -> 0x9000 correct (RD/RDY -> WR/RDY)?
431 * Should it be 0x9000 -> 0xF000 (also why is RDY being set, its 431 * Should it be 0x9000 -> 0xF000 (also why is RDY being set, its
432 * input only...) 432 * input only...)
433 */ 433 */
diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c
index 8164d74b46a4..655d6854a8d7 100644
--- a/drivers/media/pci/pluto2/pluto2.c
+++ b/drivers/media/pci/pluto2/pluto2.c
@@ -401,7 +401,7 @@ static int pluto_hw_init(struct pluto *pluto)
401 /* set automatic LED control by FPGA */ 401 /* set automatic LED control by FPGA */
402 pluto_rw(pluto, REG_MISC, MISC_ALED, MISC_ALED); 402 pluto_rw(pluto, REG_MISC, MISC_ALED, MISC_ALED);
403 403
404 /* set data endianess */ 404 /* set data endianness */
405#ifdef __LITTLE_ENDIAN 405#ifdef __LITTLE_ENDIAN
406 pluto_rw(pluto, REG_PIDn(0), PID0_END, PID0_END); 406 pluto_rw(pluto, REG_PIDn(0), PID0_END, PID0_END);
407#else 407#else
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 57ef5456f1e8..1bf06970ca3e 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -1354,9 +1354,11 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
1354 if (fw_debug) { 1354 if (fw_debug) {
1355 dev->kthread = kthread_run(saa7164_thread_function, dev, 1355 dev->kthread = kthread_run(saa7164_thread_function, dev,
1356 "saa7164 debug"); 1356 "saa7164 debug");
1357 if (!dev->kthread) 1357 if (IS_ERR(dev->kthread)) {
1358 dev->kthread = NULL;
1358 printk(KERN_ERR "%s() Failed to create " 1359 printk(KERN_ERR "%s() Failed to create "
1359 "debug kernel thread\n", __func__); 1360 "debug kernel thread\n", __func__);
1361 }
1360 } 1362 }
1361 1363
1362 } /* != BOARD_UNKNOWN */ 1364 } /* != BOARD_UNKNOWN */
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index bd72fb97fea5..61f3dbcc259f 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -1434,7 +1434,7 @@ static void coda_buf_queue(struct vb2_buffer *vb)
1434 if (q_data->fourcc == V4L2_PIX_FMT_H264 && 1434 if (q_data->fourcc == V4L2_PIX_FMT_H264 &&
1435 vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { 1435 vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
1436 /* 1436 /*
1437 * For backwards compatiblity, queuing an empty buffer marks 1437 * For backwards compatibility, queuing an empty buffer marks
1438 * the stream end 1438 * the stream end
1439 */ 1439 */
1440 if (vb2_get_plane_payload(vb, 0) == 0) 1440 if (vb2_get_plane_payload(vb, 0) == 0)
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index 3d66d88ea3a1..f7915695c907 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -1039,7 +1039,7 @@ static int fimc_runtime_resume(struct device *dev)
1039 1039
1040 dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state); 1040 dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
1041 1041
1042 /* Enable clocks and perform basic initalization */ 1042 /* Enable clocks and perform basic initialization */
1043 clk_enable(fimc->clock[CLK_GATE]); 1043 clk_enable(fimc->clock[CLK_GATE]);
1044 fimc_hw_reset(fimc); 1044 fimc_hw_reset(fimc);
1045 1045
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 7a4ee4c0449d..c1bce170df6f 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -759,7 +759,7 @@ static int fimc_md_register_platform_entity(struct fimc_md *fmd,
759 goto dev_unlock; 759 goto dev_unlock;
760 760
761 drvdata = dev_get_drvdata(dev); 761 drvdata = dev_get_drvdata(dev);
762 /* Some subdev didn't probe succesfully id drvdata is NULL */ 762 /* Some subdev didn't probe successfully id drvdata is NULL */
763 if (drvdata) { 763 if (drvdata) {
764 switch (plat_entity) { 764 switch (plat_entity) {
765 case IDX_FIMC: 765 case IDX_FIMC:
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
index 3458fa0e2fd5..054507f16734 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -142,12 +142,6 @@ static int mmpcam_power_up(struct mcam_camera *mcam)
142 struct mmp_camera *cam = mcam_to_cam(mcam); 142 struct mmp_camera *cam = mcam_to_cam(mcam);
143 struct mmp_camera_platform_data *pdata; 143 struct mmp_camera_platform_data *pdata;
144 144
145 if (mcam->bus_type == V4L2_MBUS_CSI2) {
146 cam->mipi_clk = devm_clk_get(mcam->dev, "mipi");
147 if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0))
148 return PTR_ERR(cam->mipi_clk);
149 }
150
151/* 145/*
152 * Turn on power and clocks to the controller. 146 * Turn on power and clocks to the controller.
153 */ 147 */
@@ -186,12 +180,6 @@ static void mmpcam_power_down(struct mcam_camera *mcam)
186 gpio_set_value(pdata->sensor_power_gpio, 0); 180 gpio_set_value(pdata->sensor_power_gpio, 0);
187 gpio_set_value(pdata->sensor_reset_gpio, 0); 181 gpio_set_value(pdata->sensor_reset_gpio, 0);
188 182
189 if (mcam->bus_type == V4L2_MBUS_CSI2 && !IS_ERR(cam->mipi_clk)) {
190 if (cam->mipi_clk)
191 devm_clk_put(mcam->dev, cam->mipi_clk);
192 cam->mipi_clk = NULL;
193 }
194
195 mcam_clk_disable(mcam); 183 mcam_clk_disable(mcam);
196} 184}
197 185
@@ -292,8 +280,9 @@ void mmpcam_calc_dphy(struct mcam_camera *mcam)
292 return; 280 return;
293 281
294 /* get the escape clk, this is hard coded */ 282 /* get the escape clk, this is hard coded */
283 clk_prepare_enable(cam->mipi_clk);
295 tx_clk_esc = (clk_get_rate(cam->mipi_clk) / 1000000) / 12; 284 tx_clk_esc = (clk_get_rate(cam->mipi_clk) / 1000000) / 12;
296 285 clk_disable_unprepare(cam->mipi_clk);
297 /* 286 /*
298 * dphy[2] - CSI2_DPHY6: 287 * dphy[2] - CSI2_DPHY6:
299 * bit 0 ~ bit 7: CK Term Enable 288 * bit 0 ~ bit 7: CK Term Enable
@@ -325,19 +314,6 @@ static irqreturn_t mmpcam_irq(int irq, void *data)
325 return IRQ_RETVAL(handled); 314 return IRQ_RETVAL(handled);
326} 315}
327 316
328static void mcam_deinit_clk(struct mcam_camera *mcam)
329{
330 unsigned int i;
331
332 for (i = 0; i < NR_MCAM_CLK; i++) {
333 if (!IS_ERR(mcam->clk[i])) {
334 if (mcam->clk[i])
335 devm_clk_put(mcam->dev, mcam->clk[i]);
336 }
337 mcam->clk[i] = NULL;
338 }
339}
340
341static void mcam_init_clk(struct mcam_camera *mcam) 317static void mcam_init_clk(struct mcam_camera *mcam)
342{ 318{
343 unsigned int i; 319 unsigned int i;
@@ -371,7 +347,6 @@ static int mmpcam_probe(struct platform_device *pdev)
371 if (cam == NULL) 347 if (cam == NULL)
372 return -ENOMEM; 348 return -ENOMEM;
373 cam->pdev = pdev; 349 cam->pdev = pdev;
374 cam->mipi_clk = NULL;
375 INIT_LIST_HEAD(&cam->devlist); 350 INIT_LIST_HEAD(&cam->devlist);
376 351
377 mcam = &cam->mcam; 352 mcam = &cam->mcam;
@@ -387,6 +362,11 @@ static int mmpcam_probe(struct platform_device *pdev)
387 mcam->mclk_div = pdata->mclk_div; 362 mcam->mclk_div = pdata->mclk_div;
388 mcam->bus_type = pdata->bus_type; 363 mcam->bus_type = pdata->bus_type;
389 mcam->dphy = pdata->dphy; 364 mcam->dphy = pdata->dphy;
365 if (mcam->bus_type == V4L2_MBUS_CSI2) {
366 cam->mipi_clk = devm_clk_get(mcam->dev, "mipi");
367 if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0))
368 return PTR_ERR(cam->mipi_clk);
369 }
390 mcam->mipi_enabled = false; 370 mcam->mipi_enabled = false;
391 mcam->lane = pdata->lane; 371 mcam->lane = pdata->lane;
392 mcam->chip_id = MCAM_ARMADA610; 372 mcam->chip_id = MCAM_ARMADA610;
@@ -444,7 +424,7 @@ static int mmpcam_probe(struct platform_device *pdev)
444 */ 424 */
445 ret = mmpcam_power_up(mcam); 425 ret = mmpcam_power_up(mcam);
446 if (ret) 426 if (ret)
447 goto out_deinit_clk; 427 return ret;
448 ret = mccic_register(mcam); 428 ret = mccic_register(mcam);
449 if (ret) 429 if (ret)
450 goto out_power_down; 430 goto out_power_down;
@@ -469,8 +449,6 @@ out_unregister:
469 mccic_shutdown(mcam); 449 mccic_shutdown(mcam);
470out_power_down: 450out_power_down:
471 mmpcam_power_down(mcam); 451 mmpcam_power_down(mcam);
472out_deinit_clk:
473 mcam_deinit_clk(mcam);
474 return ret; 452 return ret;
475} 453}
476 454
@@ -478,18 +456,10 @@ out_deinit_clk:
478static int mmpcam_remove(struct mmp_camera *cam) 456static int mmpcam_remove(struct mmp_camera *cam)
479{ 457{
480 struct mcam_camera *mcam = &cam->mcam; 458 struct mcam_camera *mcam = &cam->mcam;
481 struct mmp_camera_platform_data *pdata;
482 459
483 mmpcam_remove_device(cam); 460 mmpcam_remove_device(cam);
484 mccic_shutdown(mcam); 461 mccic_shutdown(mcam);
485 mmpcam_power_down(mcam); 462 mmpcam_power_down(mcam);
486 pdata = cam->pdev->dev.platform_data;
487 gpio_free(pdata->sensor_reset_gpio);
488 gpio_free(pdata->sensor_power_gpio);
489 mcam_deinit_clk(mcam);
490 iounmap(cam->power_regs);
491 iounmap(mcam->regs);
492 kfree(cam);
493 return 0; 463 return 0;
494} 464}
495 465
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 1c3608039663..561bce8ffb1b 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -1673,7 +1673,7 @@ void omap3isp_print_status(struct isp_device *isp)
1673 * ISP clocks get disabled in suspend(). Similarly, the clocks are reenabled in 1673 * ISP clocks get disabled in suspend(). Similarly, the clocks are reenabled in
1674 * resume(), and the the pipelines are restarted in complete(). 1674 * resume(), and the the pipelines are restarted in complete().
1675 * 1675 *
1676 * TODO: PM dependencies between the ISP and sensors are not modeled explicitly 1676 * TODO: PM dependencies between the ISP and sensors are not modelled explicitly
1677 * yet. 1677 * yet.
1678 */ 1678 */
1679static int isp_pm_prepare(struct device *dev) 1679static int isp_pm_prepare(struct device *dev)
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index a908d006f527..f6304bb074f5 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -339,14 +339,11 @@ __isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
339 if (subdev == NULL) 339 if (subdev == NULL)
340 return -EINVAL; 340 return -EINVAL;
341 341
342 mutex_lock(&video->mutex);
343
344 fmt.pad = pad; 342 fmt.pad = pad;
345 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; 343 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
346 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
347 if (ret == -ENOIOCTLCMD)
348 ret = -EINVAL;
349 344
345 mutex_lock(&video->mutex);
346 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
350 mutex_unlock(&video->mutex); 347 mutex_unlock(&video->mutex);
351 348
352 if (ret) 349 if (ret)
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc.h b/drivers/media/platform/s5p-mfc/regs-mfc.h
index 9319e93599ae..6ccc3f8c122a 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc.h
@@ -382,7 +382,7 @@
382#define S5P_FIMV_R2H_CMD_EDFU_INIT_RET 16 382#define S5P_FIMV_R2H_CMD_EDFU_INIT_RET 16
383#define S5P_FIMV_R2H_CMD_ERR_RET 32 383#define S5P_FIMV_R2H_CMD_ERR_RET 32
384 384
385/* Dummy definition for MFCv6 compatibilty */ 385/* Dummy definition for MFCv6 compatibility */
386#define S5P_FIMV_CODEC_H264_MVC_DEC -1 386#define S5P_FIMV_CODEC_H264_MVC_DEC -1
387#define S5P_FIMV_R2H_CMD_FIELD_DONE_RET -1 387#define S5P_FIMV_R2H_CMD_FIELD_DONE_RET -1
388#define S5P_FIMV_MFC_RESET -1 388#define S5P_FIMV_MFC_RESET -1
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 5f2c4ad6c2cb..e46067a57853 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -239,7 +239,7 @@ static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
239 frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev); 239 frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
240 240
241 /* Copy timestamp / timecode from decoded src to dst and set 241 /* Copy timestamp / timecode from decoded src to dst and set
242 appropraite flags */ 242 appropriate flags */
243 src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); 243 src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
244 list_for_each_entry(dst_buf, &ctx->dst_queue, list) { 244 list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
245 if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) { 245 if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) {
@@ -428,7 +428,7 @@ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
428 case MFCINST_FINISHING: 428 case MFCINST_FINISHING:
429 case MFCINST_FINISHED: 429 case MFCINST_FINISHED:
430 case MFCINST_RUNNING: 430 case MFCINST_RUNNING:
431 /* It is higly probable that an error occured 431 /* It is highly probable that an error occurred
432 * while decoding a frame */ 432 * while decoding a frame */
433 clear_work_bit(ctx); 433 clear_work_bit(ctx);
434 ctx->state = MFCINST_ERROR; 434 ctx->state = MFCINST_ERROR;
@@ -611,7 +611,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
611 mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err); 611 mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err);
612 switch (reason) { 612 switch (reason) {
613 case S5P_MFC_R2H_CMD_ERR_RET: 613 case S5P_MFC_R2H_CMD_ERR_RET:
614 /* An error has occured */ 614 /* An error has occurred */
615 if (ctx->state == MFCINST_RUNNING && 615 if (ctx->state == MFCINST_RUNNING &&
616 s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >= 616 s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >=
617 dev->warn_start) 617 dev->warn_start)
@@ -840,7 +840,7 @@ static int s5p_mfc_open(struct file *file)
840 mutex_unlock(&dev->mfc_mutex); 840 mutex_unlock(&dev->mfc_mutex);
841 mfc_debug_leave(); 841 mfc_debug_leave();
842 return ret; 842 return ret;
843 /* Deinit when failure occured */ 843 /* Deinit when failure occurred */
844err_queue_init: 844err_queue_init:
845 if (dev->num_inst == 1) 845 if (dev->num_inst == 1)
846 s5p_mfc_deinit_hw(dev); 846 s5p_mfc_deinit_hw(dev);
@@ -881,14 +881,14 @@ static int s5p_mfc_release(struct file *file)
881 /* Mark context as idle */ 881 /* Mark context as idle */
882 clear_work_bit_irqsave(ctx); 882 clear_work_bit_irqsave(ctx);
883 /* If instance was initialised then 883 /* If instance was initialised then
884 * return instance and free reosurces */ 884 * return instance and free resources */
885 if (ctx->inst_no != MFC_NO_INSTANCE_SET) { 885 if (ctx->inst_no != MFC_NO_INSTANCE_SET) {
886 mfc_debug(2, "Has to free instance\n"); 886 mfc_debug(2, "Has to free instance\n");
887 ctx->state = MFCINST_RETURN_INST; 887 ctx->state = MFCINST_RETURN_INST;
888 set_work_bit_irqsave(ctx); 888 set_work_bit_irqsave(ctx);
889 s5p_mfc_clean_ctx_int_flags(ctx); 889 s5p_mfc_clean_ctx_int_flags(ctx);
890 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); 890 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
891 /* Wait until instance is returned or timeout occured */ 891 /* Wait until instance is returned or timeout occurred */
892 if (s5p_mfc_wait_for_done_ctx 892 if (s5p_mfc_wait_for_done_ctx
893 (ctx, S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)) { 893 (ctx, S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)) {
894 s5p_mfc_clock_off(); 894 s5p_mfc_clock_off();
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
index 7cab6849fb5b..2475a3c9a0a6 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -69,7 +69,7 @@ int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev)
69 69
70 } else { 70 } else {
71 /* In this case bank2 can point to the same address as bank1. 71 /* In this case bank2 can point to the same address as bank1.
72 * Firmware will always occupy the beggining of this area so it is 72 * Firmware will always occupy the beginning of this area so it is
73 * impossible having a video frame buffer with zero address. */ 73 * impossible having a video frame buffer with zero address. */
74 dev->bank2 = dev->bank1; 74 dev->bank2 = dev->bank1;
75 } 75 }
diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
index 04e6490a45be..fb2acc53112a 100644
--- a/drivers/media/platform/s5p-tv/mixer.h
+++ b/drivers/media/platform/s5p-tv/mixer.h
@@ -65,7 +65,7 @@ struct mxr_format {
65 int num_subframes; 65 int num_subframes;
66 /** specifies to which subframe belong given plane */ 66 /** specifies to which subframe belong given plane */
67 int plane2subframe[MXR_MAX_PLANES]; 67 int plane2subframe[MXR_MAX_PLANES];
68 /** internal code, driver dependant */ 68 /** internal code, driver dependent */
69 unsigned long cookie; 69 unsigned long cookie;
70}; 70};
71 71
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index 641b1f071e06..81b97db111d8 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -528,7 +528,7 @@ static int mxr_s_dv_timings(struct file *file, void *fh,
528 mutex_lock(&mdev->mutex); 528 mutex_lock(&mdev->mutex);
529 529
530 /* timings change cannot be done while there is an entity 530 /* timings change cannot be done while there is an entity
531 * dependant on output configuration 531 * dependent on output configuration
532 */ 532 */
533 if (mdev->n_output > 0) { 533 if (mdev->n_output > 0) {
534 mutex_unlock(&mdev->mutex); 534 mutex_unlock(&mdev->mutex);
@@ -585,7 +585,7 @@ static int mxr_s_std(struct file *file, void *fh, v4l2_std_id norm)
585 mutex_lock(&mdev->mutex); 585 mutex_lock(&mdev->mutex);
586 586
587 /* standard change cannot be done while there is an entity 587 /* standard change cannot be done while there is an entity
588 * dependant on output configuration 588 * dependent on output configuration
589 */ 589 */
590 if (mdev->n_output > 0) { 590 if (mdev->n_output > 0) {
591 mutex_unlock(&mdev->mutex); 591 mutex_unlock(&mdev->mutex);
diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c
index 6769193c7c7b..74ce8b6b79fa 100644
--- a/drivers/media/platform/soc_camera/omap1_camera.c
+++ b/drivers/media/platform/soc_camera/omap1_camera.c
@@ -1495,7 +1495,7 @@ static int omap1_cam_set_bus_param(struct soc_camera_device *icd)
1495 if (ctrlclock & LCLK_EN) 1495 if (ctrlclock & LCLK_EN)
1496 CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock); 1496 CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
1497 1497
1498 /* select bus endianess */ 1498 /* select bus endianness */
1499 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); 1499 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
1500 fmt = xlate->host_fmt; 1500 fmt = xlate->host_fmt;
1501 1501
diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
index 1d3f11965196..2d4e73b45c5e 100644
--- a/drivers/media/platform/vivi.c
+++ b/drivers/media/platform/vivi.c
@@ -1108,7 +1108,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
1108 return 0; 1108 return 0;
1109} 1109}
1110 1110
1111/* timeperframe is arbitrary and continous */ 1111/* timeperframe is arbitrary and continuous */
1112static int vidioc_enum_frameintervals(struct file *file, void *priv, 1112static int vidioc_enum_frameintervals(struct file *file, void *priv,
1113 struct v4l2_frmivalenum *fival) 1113 struct v4l2_frmivalenum *fival)
1114{ 1114{
@@ -1125,7 +1125,7 @@ static int vidioc_enum_frameintervals(struct file *file, void *priv,
1125 1125
1126 fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS; 1126 fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
1127 1127
1128 /* fill in stepwise (step=1.0 is requred by V4L2 spec) */ 1128 /* fill in stepwise (step=1.0 is required by V4L2 spec) */
1129 fival->stepwise.min = tpf_min; 1129 fival->stepwise.min = tpf_min;
1130 fival->stepwise.max = tpf_max; 1130 fival->stepwise.max = tpf_max;
1131 fival->stepwise.step = (struct v4l2_fract) {1, 1}; 1131 fival->stepwise.step = (struct v4l2_fract) {1, 1};
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 1c9e771aa15c..d16bf0f41e24 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -323,7 +323,7 @@ static void vsp1_clocks_disable(struct vsp1_device *vsp1)
323 * Increment the VSP1 reference count and initialize the device if the first 323 * Increment the VSP1 reference count and initialize the device if the first
324 * reference is taken. 324 * reference is taken.
325 * 325 *
326 * Return a pointer to the VSP1 device or NULL if an error occured. 326 * Return a pointer to the VSP1 device or NULL if an error occurred.
327 */ 327 */
328struct vsp1_device *vsp1_device_get(struct vsp1_device *vsp1) 328struct vsp1_device *vsp1_device_get(struct vsp1_device *vsp1)
329{ 329{
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 714c53ef6c11..4b0ac07af662 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -1026,8 +1026,10 @@ int vsp1_video_init(struct vsp1_video *video, struct vsp1_entity *rwpf)
1026 1026
1027 /* ... and the buffers queue... */ 1027 /* ... and the buffers queue... */
1028 video->alloc_ctx = vb2_dma_contig_init_ctx(video->vsp1->dev); 1028 video->alloc_ctx = vb2_dma_contig_init_ctx(video->vsp1->dev);
1029 if (IS_ERR(video->alloc_ctx)) 1029 if (IS_ERR(video->alloc_ctx)) {
1030 ret = PTR_ERR(video->alloc_ctx);
1030 goto error; 1031 goto error;
1032 }
1031 1033
1032 video->queue.type = video->type; 1034 video->queue.type = video->type;
1033 video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; 1035 video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
index 3db8a8cfe1a8..050b3bb96fec 100644
--- a/drivers/media/radio/radio-shark.c
+++ b/drivers/media/radio/radio-shark.c
@@ -271,8 +271,7 @@ static void shark_unregister_leds(struct shark_device *shark)
271 cancel_work_sync(&shark->led_work); 271 cancel_work_sync(&shark->led_work);
272} 272}
273 273
274#ifdef CONFIG_PM 274static inline void shark_resume_leds(struct shark_device *shark)
275static void shark_resume_leds(struct shark_device *shark)
276{ 275{
277 if (test_bit(BLUE_IS_PULSE, &shark->brightness_new)) 276 if (test_bit(BLUE_IS_PULSE, &shark->brightness_new))
278 set_bit(BLUE_PULSE_LED, &shark->brightness_new); 277 set_bit(BLUE_PULSE_LED, &shark->brightness_new);
@@ -281,7 +280,6 @@ static void shark_resume_leds(struct shark_device *shark)
281 set_bit(RED_LED, &shark->brightness_new); 280 set_bit(RED_LED, &shark->brightness_new);
282 schedule_work(&shark->led_work); 281 schedule_work(&shark->led_work);
283} 282}
284#endif
285#else 283#else
286static int shark_register_leds(struct shark_device *shark, struct device *dev) 284static int shark_register_leds(struct shark_device *shark, struct device *dev)
287{ 285{
diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
index d86d90dab8bf..8654e0dc5c95 100644
--- a/drivers/media/radio/radio-shark2.c
+++ b/drivers/media/radio/radio-shark2.c
@@ -237,8 +237,7 @@ static void shark_unregister_leds(struct shark_device *shark)
237 cancel_work_sync(&shark->led_work); 237 cancel_work_sync(&shark->led_work);
238} 238}
239 239
240#ifdef CONFIG_PM 240static inline void shark_resume_leds(struct shark_device *shark)
241static void shark_resume_leds(struct shark_device *shark)
242{ 241{
243 int i; 242 int i;
244 243
@@ -247,7 +246,6 @@ static void shark_resume_leds(struct shark_device *shark)
247 246
248 schedule_work(&shark->led_work); 247 schedule_work(&shark->led_work);
249} 248}
250#endif
251#else 249#else
252static int shark_register_leds(struct shark_device *shark, struct device *dev) 250static int shark_register_leds(struct shark_device *shark, struct device *dev)
253{ 251{
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index 9c9084cb99f7..2fd9009f8663 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -268,8 +268,8 @@ struct si476x_radio;
268 * 268 *
269 * @tune_freq: Tune chip to a specific frequency 269 * @tune_freq: Tune chip to a specific frequency
270 * @seek_start: Star station seeking 270 * @seek_start: Star station seeking
271 * @rsq_status: Get Recieved Signal Quality(RSQ) status 271 * @rsq_status: Get Received Signal Quality(RSQ) status
272 * @rds_blckcnt: Get recived RDS blocks count 272 * @rds_blckcnt: Get received RDS blocks count
273 * @phase_diversity: Change phase diversity mode of the tuner 273 * @phase_diversity: Change phase diversity mode of the tuner
274 * @phase_div_status: Get phase diversity mode status 274 * @phase_div_status: Get phase diversity mode status
275 * @acf_status: Get the status of Automatically Controlled 275 * @acf_status: Get the status of Automatically Controlled
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 036e2f54f4db..3ed1f5669f79 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -356,7 +356,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
356 So we keep it as-is. */ 356 So we keep it as-is. */
357 return -EINVAL; 357 return -EINVAL;
358 } 358 }
359 clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL); 359 freq = clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL);
360 tea5764_power_up(radio); 360 tea5764_power_up(radio);
361 tea5764_tune(radio, (freq * 125) / 2); 361 tea5764_tune(radio, (freq * 125) / 2);
362 return 0; 362 return 0;
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index 69e3245a58a0..a9319a24c7ef 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -112,7 +112,7 @@ static int tef6862_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequen
112 if (f->tuner != 0) 112 if (f->tuner != 0)
113 return -EINVAL; 113 return -EINVAL;
114 114
115 clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ); 115 freq = clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ);
116 pll = 1964 + ((freq - TEF6862_LO_FREQ) * 20) / FREQ_MUL; 116 pll = 1964 + ((freq - TEF6862_LO_FREQ) * 20) / FREQ_MUL;
117 i2cmsg[0] = (MSA_MODE_PRESET << MSA_MODE_SHIFT) | WM_SUB_PLLM; 117 i2cmsg[0] = (MSA_MODE_PRESET << MSA_MODE_SHIFT) | WM_SUB_PLLM;
118 i2cmsg[1] = (pll >> 8) & 0xff; 118 i2cmsg[1] = (pll >> 8) & 0xff;
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 72e3fa652481..f329485c6629 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1370,7 +1370,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
1370 * 0x68nnnnB7 to 0x6AnnnnB7, the left mouse button generates 1370 * 0x68nnnnB7 to 0x6AnnnnB7, the left mouse button generates
1371 * 0x688301b7 and the right one 0x688481b7. All other keys generate 1371 * 0x688301b7 and the right one 0x688481b7. All other keys generate
1372 * 0x2nnnnnnn. Position coordinate is encoded in buf[1] and buf[2] with 1372 * 0x2nnnnnnn. Position coordinate is encoded in buf[1] and buf[2] with
1373 * reversed endianess. Extract direction from buffer, rotate endianess, 1373 * reversed endianness. Extract direction from buffer, rotate endianness,
1374 * adjust sign and feed the values into stabilize(). The resulting codes 1374 * adjust sign and feed the values into stabilize(). The resulting codes
1375 * will be 0x01008000, 0x01007F00, which match the newer devices. 1375 * will be 0x01008000, 0x01007F00, which match the newer devices.
1376 */ 1376 */
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 094484fac94c..a5d4f883d053 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -118,7 +118,7 @@ static int debug;
118#define RR3_IR_IO_LENGTH_FUZZ 0x04 118#define RR3_IR_IO_LENGTH_FUZZ 0x04
119/* Timeout for end of signal detection */ 119/* Timeout for end of signal detection */
120#define RR3_IR_IO_SIG_TIMEOUT 0x05 120#define RR3_IR_IO_SIG_TIMEOUT 0x05
121/* Minumum value for pause recognition. */ 121/* Minimum value for pause recognition. */
122#define RR3_IR_IO_MIN_PAUSE 0x06 122#define RR3_IR_IO_MIN_PAUSE 0x06
123 123
124/* Clock freq. of EZ-USB chip */ 124/* Clock freq. of EZ-USB chip */
diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c
index 2e1a02e360ff..20cca405bf45 100644
--- a/drivers/media/tuners/mt2063.c
+++ b/drivers/media/tuners/mt2063.c
@@ -1195,7 +1195,7 @@ static u32 mt2063_set_dnc_output_enable(struct mt2063_state *state,
1195 * DNC Output is selected, the other is always off) 1195 * DNC Output is selected, the other is always off)
1196 * 1196 *
1197 * @state: ptr to mt2063_state structure 1197 * @state: ptr to mt2063_state structure
1198 * @Mode: desired reciever delivery system 1198 * @Mode: desired receiver delivery system
1199 * 1199 *
1200 * Note: Register cache must be valid for it to work 1200 * Note: Register cache must be valid for it to work
1201 */ 1201 */
@@ -2119,7 +2119,7 @@ static int mt2063_set_analog_params(struct dvb_frontend *fe,
2119 2119
2120/* 2120/*
2121 * As defined on EN 300 429, the DVB-C roll-off factor is 0.15. 2121 * As defined on EN 300 429, the DVB-C roll-off factor is 0.15.
2122 * So, the amount of the needed bandwith is given by: 2122 * So, the amount of the needed bandwidth is given by:
2123 * Bw = Symbol_rate * (1 + 0.15) 2123 * Bw = Symbol_rate * (1 + 0.15)
2124 * As such, the maximum symbol rate supported by 6 MHz is given by: 2124 * As such, the maximum symbol rate supported by 6 MHz is given by:
2125 * max_symbol_rate = 6 MHz / 1.15 = 5217391 Bauds 2125 * max_symbol_rate = 6 MHz / 1.15 = 5217391 Bauds
diff --git a/drivers/media/tuners/tuner-xc2028-types.h b/drivers/media/tuners/tuner-xc2028-types.h
index 74dc46a71f64..7e4798783db7 100644
--- a/drivers/media/tuners/tuner-xc2028-types.h
+++ b/drivers/media/tuners/tuner-xc2028-types.h
@@ -119,7 +119,7 @@
119#define V4L2_STD_A2 (V4L2_STD_A2_A | V4L2_STD_A2_B) 119#define V4L2_STD_A2 (V4L2_STD_A2_A | V4L2_STD_A2_B)
120#define V4L2_STD_NICAM (V4L2_STD_NICAM_A | V4L2_STD_NICAM_B) 120#define V4L2_STD_NICAM (V4L2_STD_NICAM_A | V4L2_STD_NICAM_B)
121 121
122/* To preserve backward compatibilty, 122/* To preserve backward compatibility,
123 (std & V4L2_STD_AUDIO) = 0 means that ALL audio stds are supported 123 (std & V4L2_STD_AUDIO) = 0 means that ALL audio stds are supported
124 */ 124 */
125 125
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index e9d017bea377..528cce958a82 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1412,8 +1412,8 @@ err_v4l2:
1412 usb_set_intfdata(interface, NULL); 1412 usb_set_intfdata(interface, NULL);
1413err_if: 1413err_if:
1414 usb_put_dev(udev); 1414 usb_put_dev(udev);
1415 kfree(dev);
1416 clear_bit(dev->devno, &cx231xx_devused); 1415 clear_bit(dev->devno, &cx231xx_devused);
1416 kfree(dev);
1417 return retval; 1417 return retval;
1418} 1418}
1419 1419
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index c8fcd78425bd..8f9b2cea88f0 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -131,7 +131,7 @@ static int af9035_wr_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len)
131{ 131{
132 u8 wbuf[MAX_XFER_SIZE]; 132 u8 wbuf[MAX_XFER_SIZE];
133 u8 mbox = (reg >> 16) & 0xff; 133 u8 mbox = (reg >> 16) & 0xff;
134 struct usb_req req = { CMD_MEM_WR, mbox, sizeof(wbuf), wbuf, 0, NULL }; 134 struct usb_req req = { CMD_MEM_WR, mbox, 6 + len, wbuf, 0, NULL };
135 135
136 if (6 + len > sizeof(wbuf)) { 136 if (6 + len > sizeof(wbuf)) {
137 dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n", 137 dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n",
@@ -238,14 +238,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
238 } else { 238 } else {
239 /* I2C */ 239 /* I2C */
240 u8 buf[MAX_XFER_SIZE]; 240 u8 buf[MAX_XFER_SIZE];
241 struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf), 241 struct usb_req req = { CMD_I2C_RD, 0, 5 + msg[0].len,
242 buf, msg[1].len, msg[1].buf }; 242 buf, msg[1].len, msg[1].buf };
243 243
244 if (5 + msg[0].len > sizeof(buf)) { 244 if (5 + msg[0].len > sizeof(buf)) {
245 dev_warn(&d->udev->dev, 245 dev_warn(&d->udev->dev,
246 "%s: i2c xfer: len=%d is too big!\n", 246 "%s: i2c xfer: len=%d is too big!\n",
247 KBUILD_MODNAME, msg[0].len); 247 KBUILD_MODNAME, msg[0].len);
248 return -EOPNOTSUPP; 248 ret = -EOPNOTSUPP;
249 goto unlock;
249 } 250 }
250 req.mbox |= ((msg[0].addr & 0x80) >> 3); 251 req.mbox |= ((msg[0].addr & 0x80) >> 3);
251 buf[0] = msg[1].len; 252 buf[0] = msg[1].len;
@@ -274,14 +275,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
274 } else { 275 } else {
275 /* I2C */ 276 /* I2C */
276 u8 buf[MAX_XFER_SIZE]; 277 u8 buf[MAX_XFER_SIZE];
277 struct usb_req req = { CMD_I2C_WR, 0, sizeof(buf), buf, 278 struct usb_req req = { CMD_I2C_WR, 0, 5 + msg[0].len,
278 0, NULL }; 279 buf, 0, NULL };
279 280
280 if (5 + msg[0].len > sizeof(buf)) { 281 if (5 + msg[0].len > sizeof(buf)) {
281 dev_warn(&d->udev->dev, 282 dev_warn(&d->udev->dev,
282 "%s: i2c xfer: len=%d is too big!\n", 283 "%s: i2c xfer: len=%d is too big!\n",
283 KBUILD_MODNAME, msg[0].len); 284 KBUILD_MODNAME, msg[0].len);
284 return -EOPNOTSUPP; 285 ret = -EOPNOTSUPP;
286 goto unlock;
285 } 287 }
286 req.mbox |= ((msg[0].addr & 0x80) >> 3); 288 req.mbox |= ((msg[0].addr & 0x80) >> 3);
287 buf[0] = msg[0].len; 289 buf[0] = msg[0].len;
@@ -319,6 +321,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
319 ret = -EOPNOTSUPP; 321 ret = -EOPNOTSUPP;
320 } 322 }
321 323
324unlock:
322 mutex_unlock(&d->i2c_mutex); 325 mutex_unlock(&d->i2c_mutex);
323 326
324 if (ret < 0) 327 if (ret < 0)
@@ -1534,6 +1537,8 @@ static const struct usb_device_id af9035_id_table[] = {
1534 /* XXX: that same ID [0ccd:0099] is used by af9015 driver too */ 1537 /* XXX: that same ID [0ccd:0099] is used by af9015 driver too */
1535 { DVB_USB_DEVICE(USB_VID_TERRATEC, 0x0099, 1538 { DVB_USB_DEVICE(USB_VID_TERRATEC, 0x0099,
1536 &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) }, 1539 &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) },
1540 { DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05,
1541 &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
1537 { } 1542 { }
1538}; 1543};
1539MODULE_DEVICE_TABLE(usb, af9035_id_table); 1544MODULE_DEVICE_TABLE(usb, af9035_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 2627553f7de1..08240e498451 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -266,7 +266,7 @@ static int mxl111sf_adap_fe_init(struct dvb_frontend *fe)
266 struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id]; 266 struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id];
267 int err; 267 int err;
268 268
269 /* exit if we didnt initialize the driver yet */ 269 /* exit if we didn't initialize the driver yet */
270 if (!state->chip_id) { 270 if (!state->chip_id) {
271 mxl_debug("driver not yet initialized, exit."); 271 mxl_debug("driver not yet initialized, exit.");
272 goto fail; 272 goto fail;
@@ -322,7 +322,7 @@ static int mxl111sf_adap_fe_sleep(struct dvb_frontend *fe)
322 struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id]; 322 struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id];
323 int err; 323 int err;
324 324
325 /* exit if we didnt initialize the driver yet */ 325 /* exit if we didn't initialize the driver yet */
326 if (!state->chip_id) { 326 if (!state->chip_id) {
327 mxl_debug("driver not yet initialized, exit."); 327 mxl_debug("driver not yet initialized, exit.");
328 goto fail; 328 goto fail;
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 40832a1aef6c..98d24aefb640 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -102,7 +102,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
102 if (rxlen > 62) { 102 if (rxlen > 62) {
103 err("i2c RX buffer can't exceed 62 bytes (dev 0x%02x)", 103 err("i2c RX buffer can't exceed 62 bytes (dev 0x%02x)",
104 device_addr); 104 device_addr);
105 txlen = 62; 105 rxlen = 62;
106 } 106 }
107 107
108 b[0] = I2C_SPEED_100KHZ_BIT; 108 b[0] = I2C_SPEED_100KHZ_BIT;
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index fc5d60efd4ab..dd19c9ff76e0 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -1664,8 +1664,8 @@ static int em28xx_v4l2_close(struct file *filp)
1664 1664
1665 em28xx_videodbg("users=%d\n", dev->users); 1665 em28xx_videodbg("users=%d\n", dev->users);
1666 1666
1667 mutex_lock(&dev->lock);
1668 vb2_fop_release(filp); 1667 vb2_fop_release(filp);
1668 mutex_lock(&dev->lock);
1669 1669
1670 if (dev->users == 1) { 1670 if (dev->users == 1) {
1671 /* the device is already disconnect, 1671 /* the device is already disconnect,
diff --git a/drivers/media/usb/gspca/gl860/gl860.c b/drivers/media/usb/gspca/gl860/gl860.c
index cb1e64ca59c9..cea8d7f51c3c 100644
--- a/drivers/media/usb/gspca/gl860/gl860.c
+++ b/drivers/media/usb/gspca/gl860/gl860.c
@@ -438,7 +438,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
438 s32 nToSkip = 438 s32 nToSkip =
439 sd->swapRB * (gspca_dev->cam.cam_mode[mode].bytesperline + 1); 439 sd->swapRB * (gspca_dev->cam.cam_mode[mode].bytesperline + 1);
440 440
441 /* Test only against 0202h, so endianess does not matter */ 441 /* Test only against 0202h, so endianness does not matter */
442 switch (*(s16 *) data) { 442 switch (*(s16 *) data) {
443 case 0x0202: /* End of frame, start a new one */ 443 case 0x0202: /* End of frame, start a new one */
444 gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); 444 gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
diff --git a/drivers/media/usb/gspca/pac207.c b/drivers/media/usb/gspca/pac207.c
index cd79c180f67b..07529e5a0c56 100644
--- a/drivers/media/usb/gspca/pac207.c
+++ b/drivers/media/usb/gspca/pac207.c
@@ -416,7 +416,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
416#if IS_ENABLED(CONFIG_INPUT) 416#if IS_ENABLED(CONFIG_INPUT)
417static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, 417static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
418 u8 *data, /* interrupt packet data */ 418 u8 *data, /* interrupt packet data */
419 int len) /* interrput packet length */ 419 int len) /* interrupt packet length */
420{ 420{
421 int ret = -EINVAL; 421 int ret = -EINVAL;
422 422
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index a91509643563..2fd1c5e31a0f 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -874,7 +874,7 @@ static int sd_dbg_s_register(struct gspca_dev *gspca_dev,
874#if IS_ENABLED(CONFIG_INPUT) 874#if IS_ENABLED(CONFIG_INPUT)
875static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, 875static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
876 u8 *data, /* interrupt packet data */ 876 u8 *data, /* interrupt packet data */
877 int len) /* interrput packet length */ 877 int len) /* interrupt packet length */
878{ 878{
879 int ret = -EINVAL; 879 int ret = -EINVAL;
880 u8 data0, data1; 880 u8 data0, data1;
diff --git a/drivers/media/usb/gspca/stk1135.c b/drivers/media/usb/gspca/stk1135.c
index 1fc80af2a189..48234c9a8b6c 100644
--- a/drivers/media/usb/gspca/stk1135.c
+++ b/drivers/media/usb/gspca/stk1135.c
@@ -361,6 +361,9 @@ static void stk1135_configure_clock(struct gspca_dev *gspca_dev)
361 361
362 /* set serial interface clock divider (30MHz/0x1f*16+2) = 60240 kHz) */ 362 /* set serial interface clock divider (30MHz/0x1f*16+2) = 60240 kHz) */
363 reg_w(gspca_dev, STK1135_REG_SICTL + 2, 0x1f); 363 reg_w(gspca_dev, STK1135_REG_SICTL + 2, 0x1f);
364
365 /* wait a while for sensor to catch up */
366 udelay(1000);
364} 367}
365 368
366static void stk1135_camera_disable(struct gspca_dev *gspca_dev) 369static void stk1135_camera_disable(struct gspca_dev *gspca_dev)
diff --git a/drivers/media/usb/gspca/stv0680.c b/drivers/media/usb/gspca/stv0680.c
index 9c0827631b9c..7f94ec74282e 100644
--- a/drivers/media/usb/gspca/stv0680.c
+++ b/drivers/media/usb/gspca/stv0680.c
@@ -139,7 +139,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
139 struct sd *sd = (struct sd *) gspca_dev; 139 struct sd *sd = (struct sd *) gspca_dev;
140 struct cam *cam = &gspca_dev->cam; 140 struct cam *cam = &gspca_dev->cam;
141 141
142 /* Give the camera some time to settle, otherwise initalization will 142 /* Give the camera some time to settle, otherwise initialization will
143 fail on hotplug, and yes it really needs a full second. */ 143 fail on hotplug, and yes it really needs a full second. */
144 msleep(1000); 144 msleep(1000);
145 145
diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
index a517d185febe..46c9f2229a18 100644
--- a/drivers/media/usb/gspca/sunplus.c
+++ b/drivers/media/usb/gspca/sunplus.c
@@ -1027,6 +1027,7 @@ static const struct usb_device_id device_table[] = {
1027 {USB_DEVICE(0x055f, 0xc650), BS(SPCA533, 0)}, 1027 {USB_DEVICE(0x055f, 0xc650), BS(SPCA533, 0)},
1028 {USB_DEVICE(0x05da, 0x1018), BS(SPCA504B, 0)}, 1028 {USB_DEVICE(0x05da, 0x1018), BS(SPCA504B, 0)},
1029 {USB_DEVICE(0x06d6, 0x0031), BS(SPCA533, 0)}, 1029 {USB_DEVICE(0x06d6, 0x0031), BS(SPCA533, 0)},
1030 {USB_DEVICE(0x06d6, 0x0041), BS(SPCA504B, 0)},
1030 {USB_DEVICE(0x0733, 0x1311), BS(SPCA533, 0)}, 1031 {USB_DEVICE(0x0733, 0x1311), BS(SPCA533, 0)},
1031 {USB_DEVICE(0x0733, 0x1314), BS(SPCA533, 0)}, 1032 {USB_DEVICE(0x0733, 0x1314), BS(SPCA533, 0)},
1032 {USB_DEVICE(0x0733, 0x2211), BS(SPCA533, 0)}, 1033 {USB_DEVICE(0x0733, 0x2211), BS(SPCA533, 0)},
diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c
index 7b95d8e88a20..d3e1b6d8bf49 100644
--- a/drivers/media/usb/gspca/zc3xx.c
+++ b/drivers/media/usb/gspca/zc3xx.c
@@ -6905,7 +6905,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
6905#if IS_ENABLED(CONFIG_INPUT) 6905#if IS_ENABLED(CONFIG_INPUT)
6906static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, 6906static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
6907 u8 *data, /* interrupt packet data */ 6907 u8 *data, /* interrupt packet data */
6908 int len) /* interrput packet length */ 6908 int len) /* interrupt packet length */
6909{ 6909{
6910 if (len == 8 && data[4] == 1) { 6910 if (len == 8 && data[4] == 1) {
6911 input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); 6911 input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1);
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 77bbf7889659..78c9bc8e7f56 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -1039,7 +1039,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1039 /* Set the leds off */ 1039 /* Set the leds off */
1040 pwc_set_leds(pdev, 0, 0); 1040 pwc_set_leds(pdev, 0, 0);
1041 1041
1042 /* Setup intial videomode */ 1042 /* Setup initial videomode */
1043 rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT, 1043 rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT,
1044 V4L2_PIX_FMT_YUV420, 30, &compression, 1); 1044 V4L2_PIX_FMT_YUV420, 30, &compression, 1);
1045 if (rc) 1045 if (rc)
diff --git a/drivers/media/usb/usbtv/usbtv.c b/drivers/media/usb/usbtv/usbtv.c
index 8a505a90d318..6222a4ab1e00 100644
--- a/drivers/media/usb/usbtv/usbtv.c
+++ b/drivers/media/usb/usbtv/usbtv.c
@@ -50,13 +50,8 @@
50#define USBTV_ISOC_TRANSFERS 16 50#define USBTV_ISOC_TRANSFERS 16
51#define USBTV_ISOC_PACKETS 8 51#define USBTV_ISOC_PACKETS 8
52 52
53#define USBTV_WIDTH 720
54#define USBTV_HEIGHT 480
55
56#define USBTV_CHUNK_SIZE 256 53#define USBTV_CHUNK_SIZE 256
57#define USBTV_CHUNK 240 54#define USBTV_CHUNK 240
58#define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \
59 / 4 / USBTV_CHUNK)
60 55
61/* Chunk header. */ 56/* Chunk header. */
62#define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \ 57#define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \
@@ -65,6 +60,27 @@
65#define USBTV_ODD(chunk) ((be32_to_cpu(chunk[0]) & 0x0000f000) >> 15) 60#define USBTV_ODD(chunk) ((be32_to_cpu(chunk[0]) & 0x0000f000) >> 15)
66#define USBTV_CHUNK_NO(chunk) (be32_to_cpu(chunk[0]) & 0x00000fff) 61#define USBTV_CHUNK_NO(chunk) (be32_to_cpu(chunk[0]) & 0x00000fff)
67 62
63#define USBTV_TV_STD (V4L2_STD_525_60 | V4L2_STD_PAL)
64
65/* parameters for supported TV norms */
66struct usbtv_norm_params {
67 v4l2_std_id norm;
68 int cap_width, cap_height;
69};
70
71static struct usbtv_norm_params norm_params[] = {
72 {
73 .norm = V4L2_STD_525_60,
74 .cap_width = 720,
75 .cap_height = 480,
76 },
77 {
78 .norm = V4L2_STD_PAL,
79 .cap_width = 720,
80 .cap_height = 576,
81 }
82};
83
68/* A single videobuf2 frame buffer. */ 84/* A single videobuf2 frame buffer. */
69struct usbtv_buf { 85struct usbtv_buf {
70 struct vb2_buffer vb; 86 struct vb2_buffer vb;
@@ -94,11 +110,38 @@ struct usbtv {
94 USBTV_COMPOSITE_INPUT, 110 USBTV_COMPOSITE_INPUT,
95 USBTV_SVIDEO_INPUT, 111 USBTV_SVIDEO_INPUT,
96 } input; 112 } input;
113 v4l2_std_id norm;
114 int width, height;
115 int n_chunks;
97 int iso_size; 116 int iso_size;
98 unsigned int sequence; 117 unsigned int sequence;
99 struct urb *isoc_urbs[USBTV_ISOC_TRANSFERS]; 118 struct urb *isoc_urbs[USBTV_ISOC_TRANSFERS];
100}; 119};
101 120
121static int usbtv_configure_for_norm(struct usbtv *usbtv, v4l2_std_id norm)
122{
123 int i, ret = 0;
124 struct usbtv_norm_params *params = NULL;
125
126 for (i = 0; i < ARRAY_SIZE(norm_params); i++) {
127 if (norm_params[i].norm & norm) {
128 params = &norm_params[i];
129 break;
130 }
131 }
132
133 if (params) {
134 usbtv->width = params->cap_width;
135 usbtv->height = params->cap_height;
136 usbtv->n_chunks = usbtv->width * usbtv->height
137 / 4 / USBTV_CHUNK;
138 usbtv->norm = params->norm;
139 } else
140 ret = -EINVAL;
141
142 return ret;
143}
144
102static int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size) 145static int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size)
103{ 146{
104 int ret; 147 int ret;
@@ -158,6 +201,57 @@ static int usbtv_select_input(struct usbtv *usbtv, int input)
158 return ret; 201 return ret;
159} 202}
160 203
204static int usbtv_select_norm(struct usbtv *usbtv, v4l2_std_id norm)
205{
206 int ret;
207 static const u16 pal[][2] = {
208 { USBTV_BASE + 0x001a, 0x0068 },
209 { USBTV_BASE + 0x010e, 0x0072 },
210 { USBTV_BASE + 0x010f, 0x00a2 },
211 { USBTV_BASE + 0x0112, 0x00b0 },
212 { USBTV_BASE + 0x0117, 0x0001 },
213 { USBTV_BASE + 0x0118, 0x002c },
214 { USBTV_BASE + 0x012d, 0x0010 },
215 { USBTV_BASE + 0x012f, 0x0020 },
216 { USBTV_BASE + 0x024f, 0x0002 },
217 { USBTV_BASE + 0x0254, 0x0059 },
218 { USBTV_BASE + 0x025a, 0x0016 },
219 { USBTV_BASE + 0x025b, 0x0035 },
220 { USBTV_BASE + 0x0263, 0x0017 },
221 { USBTV_BASE + 0x0266, 0x0016 },
222 { USBTV_BASE + 0x0267, 0x0036 }
223 };
224
225 static const u16 ntsc[][2] = {
226 { USBTV_BASE + 0x001a, 0x0079 },
227 { USBTV_BASE + 0x010e, 0x0068 },
228 { USBTV_BASE + 0x010f, 0x009c },
229 { USBTV_BASE + 0x0112, 0x00f0 },
230 { USBTV_BASE + 0x0117, 0x0000 },
231 { USBTV_BASE + 0x0118, 0x00fc },
232 { USBTV_BASE + 0x012d, 0x0004 },
233 { USBTV_BASE + 0x012f, 0x0008 },
234 { USBTV_BASE + 0x024f, 0x0001 },
235 { USBTV_BASE + 0x0254, 0x005f },
236 { USBTV_BASE + 0x025a, 0x0012 },
237 { USBTV_BASE + 0x025b, 0x0001 },
238 { USBTV_BASE + 0x0263, 0x001c },
239 { USBTV_BASE + 0x0266, 0x0011 },
240 { USBTV_BASE + 0x0267, 0x0005 }
241 };
242
243 ret = usbtv_configure_for_norm(usbtv, norm);
244
245 if (!ret) {
246 if (norm & V4L2_STD_525_60)
247 ret = usbtv_set_regs(usbtv, ntsc, ARRAY_SIZE(ntsc));
248 else if (norm & V4L2_STD_PAL)
249 ret = usbtv_set_regs(usbtv, pal, ARRAY_SIZE(pal));
250 }
251
252 return ret;
253}
254
161static int usbtv_setup_capture(struct usbtv *usbtv) 255static int usbtv_setup_capture(struct usbtv *usbtv)
162{ 256{
163 int ret; 257 int ret;
@@ -225,26 +319,11 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
225 319
226 { USBTV_BASE + 0x0284, 0x0088 }, 320 { USBTV_BASE + 0x0284, 0x0088 },
227 { USBTV_BASE + 0x0003, 0x0004 }, 321 { USBTV_BASE + 0x0003, 0x0004 },
228 { USBTV_BASE + 0x001a, 0x0079 },
229 { USBTV_BASE + 0x0100, 0x00d3 }, 322 { USBTV_BASE + 0x0100, 0x00d3 },
230 { USBTV_BASE + 0x010e, 0x0068 },
231 { USBTV_BASE + 0x010f, 0x009c },
232 { USBTV_BASE + 0x0112, 0x00f0 },
233 { USBTV_BASE + 0x0115, 0x0015 }, 323 { USBTV_BASE + 0x0115, 0x0015 },
234 { USBTV_BASE + 0x0117, 0x0000 },
235 { USBTV_BASE + 0x0118, 0x00fc },
236 { USBTV_BASE + 0x012d, 0x0004 },
237 { USBTV_BASE + 0x012f, 0x0008 },
238 { USBTV_BASE + 0x0220, 0x002e }, 324 { USBTV_BASE + 0x0220, 0x002e },
239 { USBTV_BASE + 0x0225, 0x0008 }, 325 { USBTV_BASE + 0x0225, 0x0008 },
240 { USBTV_BASE + 0x024e, 0x0002 }, 326 { USBTV_BASE + 0x024e, 0x0002 },
241 { USBTV_BASE + 0x024f, 0x0001 },
242 { USBTV_BASE + 0x0254, 0x005f },
243 { USBTV_BASE + 0x025a, 0x0012 },
244 { USBTV_BASE + 0x025b, 0x0001 },
245 { USBTV_BASE + 0x0263, 0x001c },
246 { USBTV_BASE + 0x0266, 0x0011 },
247 { USBTV_BASE + 0x0267, 0x0005 },
248 { USBTV_BASE + 0x024e, 0x0002 }, 327 { USBTV_BASE + 0x024e, 0x0002 },
249 { USBTV_BASE + 0x024f, 0x0002 }, 328 { USBTV_BASE + 0x024f, 0x0002 },
250 }; 329 };
@@ -253,6 +332,10 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
253 if (ret) 332 if (ret)
254 return ret; 333 return ret;
255 334
335 ret = usbtv_select_norm(usbtv, usbtv->norm);
336 if (ret)
337 return ret;
338
256 ret = usbtv_select_input(usbtv, usbtv->input); 339 ret = usbtv_select_input(usbtv, usbtv->input);
257 if (ret) 340 if (ret)
258 return ret; 341 return ret;
@@ -296,7 +379,7 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
296 frame_id = USBTV_FRAME_ID(chunk); 379 frame_id = USBTV_FRAME_ID(chunk);
297 odd = USBTV_ODD(chunk); 380 odd = USBTV_ODD(chunk);
298 chunk_no = USBTV_CHUNK_NO(chunk); 381 chunk_no = USBTV_CHUNK_NO(chunk);
299 if (chunk_no >= USBTV_CHUNKS) 382 if (chunk_no >= usbtv->n_chunks)
300 return; 383 return;
301 384
302 /* Beginning of a frame. */ 385 /* Beginning of a frame. */
@@ -324,10 +407,10 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
324 usbtv->chunks_done++; 407 usbtv->chunks_done++;
325 408
326 /* Last chunk in a frame, signalling an end */ 409 /* Last chunk in a frame, signalling an end */
327 if (odd && chunk_no == USBTV_CHUNKS-1) { 410 if (odd && chunk_no == usbtv->n_chunks-1) {
328 int size = vb2_plane_size(&buf->vb, 0); 411 int size = vb2_plane_size(&buf->vb, 0);
329 enum vb2_buffer_state state = usbtv->chunks_done == 412 enum vb2_buffer_state state = usbtv->chunks_done ==
330 USBTV_CHUNKS ? 413 usbtv->n_chunks ?
331 VB2_BUF_STATE_DONE : 414 VB2_BUF_STATE_DONE :
332 VB2_BUF_STATE_ERROR; 415 VB2_BUF_STATE_ERROR;
333 416
@@ -500,6 +583,8 @@ static int usbtv_querycap(struct file *file, void *priv,
500static int usbtv_enum_input(struct file *file, void *priv, 583static int usbtv_enum_input(struct file *file, void *priv,
501 struct v4l2_input *i) 584 struct v4l2_input *i)
502{ 585{
586 struct usbtv *dev = video_drvdata(file);
587
503 switch (i->index) { 588 switch (i->index) {
504 case USBTV_COMPOSITE_INPUT: 589 case USBTV_COMPOSITE_INPUT:
505 strlcpy(i->name, "Composite", sizeof(i->name)); 590 strlcpy(i->name, "Composite", sizeof(i->name));
@@ -512,7 +597,7 @@ static int usbtv_enum_input(struct file *file, void *priv,
512 } 597 }
513 598
514 i->type = V4L2_INPUT_TYPE_CAMERA; 599 i->type = V4L2_INPUT_TYPE_CAMERA;
515 i->std = V4L2_STD_525_60; 600 i->std = dev->vdev.tvnorms;
516 return 0; 601 return 0;
517} 602}
518 603
@@ -531,23 +616,37 @@ static int usbtv_enum_fmt_vid_cap(struct file *file, void *priv,
531static int usbtv_fmt_vid_cap(struct file *file, void *priv, 616static int usbtv_fmt_vid_cap(struct file *file, void *priv,
532 struct v4l2_format *f) 617 struct v4l2_format *f)
533{ 618{
534 f->fmt.pix.width = USBTV_WIDTH; 619 struct usbtv *usbtv = video_drvdata(file);
535 f->fmt.pix.height = USBTV_HEIGHT; 620
621 f->fmt.pix.width = usbtv->width;
622 f->fmt.pix.height = usbtv->height;
536 f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; 623 f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
537 f->fmt.pix.field = V4L2_FIELD_INTERLACED; 624 f->fmt.pix.field = V4L2_FIELD_INTERLACED;
538 f->fmt.pix.bytesperline = USBTV_WIDTH * 2; 625 f->fmt.pix.bytesperline = usbtv->width * 2;
539 f->fmt.pix.sizeimage = (f->fmt.pix.bytesperline * f->fmt.pix.height); 626 f->fmt.pix.sizeimage = (f->fmt.pix.bytesperline * f->fmt.pix.height);
540 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; 627 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
541 f->fmt.pix.priv = 0; 628
542 return 0; 629 return 0;
543} 630}
544 631
545static int usbtv_g_std(struct file *file, void *priv, v4l2_std_id *norm) 632static int usbtv_g_std(struct file *file, void *priv, v4l2_std_id *norm)
546{ 633{
547 *norm = V4L2_STD_525_60; 634 struct usbtv *usbtv = video_drvdata(file);
635 *norm = usbtv->norm;
548 return 0; 636 return 0;
549} 637}
550 638
639static int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm)
640{
641 int ret = -EINVAL;
642 struct usbtv *usbtv = video_drvdata(file);
643
644 if ((norm & V4L2_STD_525_60) || (norm & V4L2_STD_PAL))
645 ret = usbtv_select_norm(usbtv, norm);
646
647 return ret;
648}
649
551static int usbtv_g_input(struct file *file, void *priv, unsigned int *i) 650static int usbtv_g_input(struct file *file, void *priv, unsigned int *i)
552{ 651{
553 struct usbtv *usbtv = video_drvdata(file); 652 struct usbtv *usbtv = video_drvdata(file);
@@ -561,13 +660,6 @@ static int usbtv_s_input(struct file *file, void *priv, unsigned int i)
561 return usbtv_select_input(usbtv, i); 660 return usbtv_select_input(usbtv, i);
562} 661}
563 662
564static int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm)
565{
566 if (norm & V4L2_STD_525_60)
567 return 0;
568 return -EINVAL;
569}
570
571struct v4l2_ioctl_ops usbtv_ioctl_ops = { 663struct v4l2_ioctl_ops usbtv_ioctl_ops = {
572 .vidioc_querycap = usbtv_querycap, 664 .vidioc_querycap = usbtv_querycap,
573 .vidioc_enum_input = usbtv_enum_input, 665 .vidioc_enum_input = usbtv_enum_input,
@@ -604,10 +696,12 @@ static int usbtv_queue_setup(struct vb2_queue *vq,
604 const struct v4l2_format *v4l_fmt, unsigned int *nbuffers, 696 const struct v4l2_format *v4l_fmt, unsigned int *nbuffers,
605 unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) 697 unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[])
606{ 698{
699 struct usbtv *usbtv = vb2_get_drv_priv(vq);
700
607 if (*nbuffers < 2) 701 if (*nbuffers < 2)
608 *nbuffers = 2; 702 *nbuffers = 2;
609 *nplanes = 1; 703 *nplanes = 1;
610 sizes[0] = USBTV_WIDTH * USBTV_HEIGHT / 2 * sizeof(u32); 704 sizes[0] = USBTV_CHUNK * usbtv->n_chunks * 2 * sizeof(u32);
611 705
612 return 0; 706 return 0;
613} 707}
@@ -690,7 +784,11 @@ static int usbtv_probe(struct usb_interface *intf,
690 return -ENOMEM; 784 return -ENOMEM;
691 usbtv->dev = dev; 785 usbtv->dev = dev;
692 usbtv->udev = usb_get_dev(interface_to_usbdev(intf)); 786 usbtv->udev = usb_get_dev(interface_to_usbdev(intf));
787
693 usbtv->iso_size = size; 788 usbtv->iso_size = size;
789
790 (void)usbtv_configure_for_norm(usbtv, V4L2_STD_525_60);
791
694 spin_lock_init(&usbtv->buflock); 792 spin_lock_init(&usbtv->buflock);
695 mutex_init(&usbtv->v4l2_lock); 793 mutex_init(&usbtv->v4l2_lock);
696 mutex_init(&usbtv->vb2q_lock); 794 mutex_init(&usbtv->vb2q_lock);
@@ -727,7 +825,7 @@ static int usbtv_probe(struct usb_interface *intf,
727 usbtv->vdev.release = video_device_release_empty; 825 usbtv->vdev.release = video_device_release_empty;
728 usbtv->vdev.fops = &usbtv_fops; 826 usbtv->vdev.fops = &usbtv_fops;
729 usbtv->vdev.ioctl_ops = &usbtv_ioctl_ops; 827 usbtv->vdev.ioctl_ops = &usbtv_ioctl_ops;
730 usbtv->vdev.tvnorms = V4L2_STD_525_60; 828 usbtv->vdev.tvnorms = USBTV_TV_STD;
731 usbtv->vdev.queue = &usbtv->vb2q; 829 usbtv->vdev.queue = &usbtv->vb2q;
732 usbtv->vdev.lock = &usbtv->v4l2_lock; 830 usbtv->vdev.lock = &usbtv->v4l2_lock;
733 set_bit(V4L2_FL_USE_FH_PRIO, &usbtv->vdev.flags); 831 set_bit(V4L2_FL_USE_FH_PRIO, &usbtv->vdev.flags);
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 899cb6d1c4a4..898c208889cd 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -556,7 +556,7 @@ static u16 uvc_video_clock_host_sof(const struct uvc_clock_sample *sample)
556 * 556 *
557 * SOF = ((SOF2 - SOF1) * PTS + SOF1 * STC2 - SOF2 * STC1) / (STC2 - STC1) (1) 557 * SOF = ((SOF2 - SOF1) * PTS + SOF1 * STC2 - SOF2 * STC1) / (STC2 - STC1) (1)
558 * 558 *
559 * to avoid loosing precision in the division. Similarly, the host timestamp is 559 * to avoid losing precision in the division. Similarly, the host timestamp is
560 * computed with 560 * computed with
561 * 561 *
562 * TS = ((TS2 - TS1) * PTS + TS1 * SOF2 - TS2 * SOF1) / (SOF2 - SOF1) (2) 562 * TS = ((TS2 - TS1) * PTS + TS1 * SOF2 - TS2 * SOF1) / (SOF2 - SOF1) (2)
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 60dcc0f3b32e..fb46790d0eca 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -420,7 +420,7 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
420 "Advanced Simple", 420 "Advanced Simple",
421 "Core", 421 "Core",
422 "Simple Scalable", 422 "Simple Scalable",
423 "Advanced Coding Efficency", 423 "Advanced Coding Efficiency",
424 NULL, 424 NULL,
425 }; 425 };
426 426
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index b19b306c8f7f..0edc165f418d 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -145,6 +145,25 @@ static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
145} 145}
146 146
147/** 147/**
148 * __setup_lengths() - setup initial lengths for every plane in
149 * every buffer on the queue
150 */
151static void __setup_lengths(struct vb2_queue *q, unsigned int n)
152{
153 unsigned int buffer, plane;
154 struct vb2_buffer *vb;
155
156 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
157 vb = q->bufs[buffer];
158 if (!vb)
159 continue;
160
161 for (plane = 0; plane < vb->num_planes; ++plane)
162 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
163 }
164}
165
166/**
148 * __setup_offsets() - setup unique offsets ("cookies") for every plane in 167 * __setup_offsets() - setup unique offsets ("cookies") for every plane in
149 * every buffer on the queue 168 * every buffer on the queue
150 */ 169 */
@@ -169,7 +188,6 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n)
169 continue; 188 continue;
170 189
171 for (plane = 0; plane < vb->num_planes; ++plane) { 190 for (plane = 0; plane < vb->num_planes; ++plane) {
172 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
173 vb->v4l2_planes[plane].m.mem_offset = off; 191 vb->v4l2_planes[plane].m.mem_offset = off;
174 192
175 dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n", 193 dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
@@ -241,6 +259,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
241 q->bufs[q->num_buffers + buffer] = vb; 259 q->bufs[q->num_buffers + buffer] = vb;
242 } 260 }
243 261
262 __setup_lengths(q, buffer);
244 if (memory == V4L2_MEMORY_MMAP) 263 if (memory == V4L2_MEMORY_MMAP)
245 __setup_offsets(q, buffer); 264 __setup_offsets(q, buffer);
246 265
@@ -1824,8 +1843,8 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
1824 return -EINVAL; 1843 return -EINVAL;
1825 } 1844 }
1826 1845
1827 if (eb->flags & ~O_CLOEXEC) { 1846 if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) {
1828 dprintk(1, "Queue does support only O_CLOEXEC flag\n"); 1847 dprintk(1, "Queue does support only O_CLOEXEC and access mode flags\n");
1829 return -EINVAL; 1848 return -EINVAL;
1830 } 1849 }
1831 1850
@@ -1848,14 +1867,14 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
1848 1867
1849 vb_plane = &vb->planes[eb->plane]; 1868 vb_plane = &vb->planes[eb->plane];
1850 1869
1851 dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv); 1870 dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE);
1852 if (IS_ERR_OR_NULL(dbuf)) { 1871 if (IS_ERR_OR_NULL(dbuf)) {
1853 dprintk(1, "Failed to export buffer %d, plane %d\n", 1872 dprintk(1, "Failed to export buffer %d, plane %d\n",
1854 eb->index, eb->plane); 1873 eb->index, eb->plane);
1855 return -EINVAL; 1874 return -EINVAL;
1856 } 1875 }
1857 1876
1858 ret = dma_buf_fd(dbuf, eb->flags); 1877 ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE);
1859 if (ret < 0) { 1878 if (ret < 0) {
1860 dprintk(3, "buffer %d, plane %d failed to export (%d)\n", 1879 dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
1861 eb->index, eb->plane, ret); 1880 eb->index, eb->plane, ret);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 646f08f4f504..33d3871d1e13 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -393,7 +393,7 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
393 return sgt; 393 return sgt;
394} 394}
395 395
396static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv) 396static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
397{ 397{
398 struct vb2_dc_buf *buf = buf_priv; 398 struct vb2_dc_buf *buf = buf_priv;
399 struct dma_buf *dbuf; 399 struct dma_buf *dbuf;
@@ -404,7 +404,7 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
404 if (WARN_ON(!buf->sgt_base)) 404 if (WARN_ON(!buf->sgt_base))
405 return NULL; 405 return NULL;
406 406
407 dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0); 407 dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags);
408 if (IS_ERR(dbuf)) 408 if (IS_ERR(dbuf))
409 return NULL; 409 return NULL;
410 410
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 2f860543912c..0d3a8ffe47a3 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -178,7 +178,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
178 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), 178 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
179 GFP_KERNEL); 179 GFP_KERNEL);
180 if (!buf->pages) 180 if (!buf->pages)
181 return NULL; 181 goto userptr_fail_alloc_pages;
182 182
183 num_pages_from_user = get_user_pages(current, current->mm, 183 num_pages_from_user = get_user_pages(current, current->mm,
184 vaddr & PAGE_MASK, 184 vaddr & PAGE_MASK,
@@ -204,6 +204,7 @@ userptr_fail_get_user_pages:
204 while (--num_pages_from_user >= 0) 204 while (--num_pages_from_user >= 0)
205 put_page(buf->pages[num_pages_from_user]); 205 put_page(buf->pages[num_pages_from_user]);
206 kfree(buf->pages); 206 kfree(buf->pages);
207userptr_fail_alloc_pages:
207 kfree(buf); 208 kfree(buf);
208 return NULL; 209 return NULL;
209} 210}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 62a60caa5d1f..dd671582c9a1 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -32,7 +32,7 @@ config MFD_AS3722
32 select MFD_CORE 32 select MFD_CORE
33 select REGMAP_I2C 33 select REGMAP_I2C
34 select REGMAP_IRQ 34 select REGMAP_IRQ
35 depends on I2C && OF 35 depends on I2C=y && OF
36 help 36 help
37 The ams AS3722 is a compact system PMU suitable for mobile phones, 37 The ams AS3722 is a compact system PMU suitable for mobile phones,
38 tablets etc. It has 4 DC/DC step-down regulators, 3 DC/DC step-down 38 tablets etc. It has 4 DC/DC step-down regulators, 3 DC/DC step-down
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index da1c6566d93d..37edf9e989b0 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -506,7 +506,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
506 .iTCO_version = 2, 506 .iTCO_version = 2,
507 }, 507 },
508 [LPC_WPT_LP] = { 508 [LPC_WPT_LP] = {
509 .name = "Lynx Point_LP", 509 .name = "Wildcat Point_LP",
510 .iTCO_version = 2, 510 .iTCO_version = 2,
511 }, 511 },
512}; 512};
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 11e20afbdcac..705698fd2c7e 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -1228,8 +1228,14 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
1228 1228
1229 pcr->remove_pci = true; 1229 pcr->remove_pci = true;
1230 1230
1231 cancel_delayed_work(&pcr->carddet_work); 1231 /* Disable interrupts at the pcr level */
1232 cancel_delayed_work(&pcr->idle_work); 1232 spin_lock_irq(&pcr->lock);
1233 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1234 pcr->bier = 0;
1235 spin_unlock_irq(&pcr->lock);
1236
1237 cancel_delayed_work_sync(&pcr->carddet_work);
1238 cancel_delayed_work_sync(&pcr->idle_work);
1233 1239
1234 mfd_remove_devices(&pcidev->dev); 1240 mfd_remove_devices(&pcidev->dev);
1235 1241
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 34c18fb8c089..54cc25546592 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -81,31 +81,31 @@ static struct of_device_id sec_dt_match[] = {
81 81
82int sec_reg_read(struct sec_pmic_dev *sec_pmic, u8 reg, void *dest) 82int sec_reg_read(struct sec_pmic_dev *sec_pmic, u8 reg, void *dest)
83{ 83{
84 return regmap_read(sec_pmic->regmap, reg, dest); 84 return regmap_read(sec_pmic->regmap_pmic, reg, dest);
85} 85}
86EXPORT_SYMBOL_GPL(sec_reg_read); 86EXPORT_SYMBOL_GPL(sec_reg_read);
87 87
88int sec_bulk_read(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf) 88int sec_bulk_read(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf)
89{ 89{
90 return regmap_bulk_read(sec_pmic->regmap, reg, buf, count); 90 return regmap_bulk_read(sec_pmic->regmap_pmic, reg, buf, count);
91} 91}
92EXPORT_SYMBOL_GPL(sec_bulk_read); 92EXPORT_SYMBOL_GPL(sec_bulk_read);
93 93
94int sec_reg_write(struct sec_pmic_dev *sec_pmic, u8 reg, u8 value) 94int sec_reg_write(struct sec_pmic_dev *sec_pmic, u8 reg, u8 value)
95{ 95{
96 return regmap_write(sec_pmic->regmap, reg, value); 96 return regmap_write(sec_pmic->regmap_pmic, reg, value);
97} 97}
98EXPORT_SYMBOL_GPL(sec_reg_write); 98EXPORT_SYMBOL_GPL(sec_reg_write);
99 99
100int sec_bulk_write(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf) 100int sec_bulk_write(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf)
101{ 101{
102 return regmap_raw_write(sec_pmic->regmap, reg, buf, count); 102 return regmap_raw_write(sec_pmic->regmap_pmic, reg, buf, count);
103} 103}
104EXPORT_SYMBOL_GPL(sec_bulk_write); 104EXPORT_SYMBOL_GPL(sec_bulk_write);
105 105
106int sec_reg_update(struct sec_pmic_dev *sec_pmic, u8 reg, u8 val, u8 mask) 106int sec_reg_update(struct sec_pmic_dev *sec_pmic, u8 reg, u8 val, u8 mask)
107{ 107{
108 return regmap_update_bits(sec_pmic->regmap, reg, mask, val); 108 return regmap_update_bits(sec_pmic->regmap_pmic, reg, mask, val);
109} 109}
110EXPORT_SYMBOL_GPL(sec_reg_update); 110EXPORT_SYMBOL_GPL(sec_reg_update);
111 111
@@ -166,6 +166,11 @@ static struct regmap_config s5m8767_regmap_config = {
166 .cache_type = REGCACHE_FLAT, 166 .cache_type = REGCACHE_FLAT,
167}; 167};
168 168
169static const struct regmap_config sec_rtc_regmap_config = {
170 .reg_bits = 8,
171 .val_bits = 8,
172};
173
169#ifdef CONFIG_OF 174#ifdef CONFIG_OF
170/* 175/*
171 * Only the common platform data elements for s5m8767 are parsed here from the 176 * Only the common platform data elements for s5m8767 are parsed here from the
@@ -266,9 +271,9 @@ static int sec_pmic_probe(struct i2c_client *i2c,
266 break; 271 break;
267 } 272 }
268 273
269 sec_pmic->regmap = devm_regmap_init_i2c(i2c, regmap); 274 sec_pmic->regmap_pmic = devm_regmap_init_i2c(i2c, regmap);
270 if (IS_ERR(sec_pmic->regmap)) { 275 if (IS_ERR(sec_pmic->regmap_pmic)) {
271 ret = PTR_ERR(sec_pmic->regmap); 276 ret = PTR_ERR(sec_pmic->regmap_pmic);
272 dev_err(&i2c->dev, "Failed to allocate register map: %d\n", 277 dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
273 ret); 278 ret);
274 return ret; 279 return ret;
@@ -277,6 +282,15 @@ static int sec_pmic_probe(struct i2c_client *i2c,
277 sec_pmic->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR); 282 sec_pmic->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
278 i2c_set_clientdata(sec_pmic->rtc, sec_pmic); 283 i2c_set_clientdata(sec_pmic->rtc, sec_pmic);
279 284
285 sec_pmic->regmap_rtc = devm_regmap_init_i2c(sec_pmic->rtc,
286 &sec_rtc_regmap_config);
287 if (IS_ERR(sec_pmic->regmap_rtc)) {
288 ret = PTR_ERR(sec_pmic->regmap_rtc);
289 dev_err(&i2c->dev, "Failed to allocate RTC register map: %d\n",
290 ret);
291 return ret;
292 }
293
280 if (pdata && pdata->cfg_pmic_irq) 294 if (pdata && pdata->cfg_pmic_irq)
281 pdata->cfg_pmic_irq(); 295 pdata->cfg_pmic_irq();
282 296
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index 0dd84e99081e..b441b1be27cb 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -280,19 +280,19 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
280 280
281 switch (type) { 281 switch (type) {
282 case S5M8763X: 282 case S5M8763X:
283 ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq, 283 ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
284 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 284 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
285 sec_pmic->irq_base, &s5m8763_irq_chip, 285 sec_pmic->irq_base, &s5m8763_irq_chip,
286 &sec_pmic->irq_data); 286 &sec_pmic->irq_data);
287 break; 287 break;
288 case S5M8767X: 288 case S5M8767X:
289 ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq, 289 ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
290 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 290 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
291 sec_pmic->irq_base, &s5m8767_irq_chip, 291 sec_pmic->irq_base, &s5m8767_irq_chip,
292 &sec_pmic->irq_data); 292 &sec_pmic->irq_data);
293 break; 293 break;
294 case S2MPS11X: 294 case S2MPS11X:
295 ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq, 295 ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
296 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 296 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
297 sec_pmic->irq_base, &s2mps11_irq_chip, 297 sec_pmic->irq_base, &s2mps11_irq_chip,
298 &sec_pmic->irq_data); 298 &sec_pmic->irq_data);
diff --git a/drivers/mfd/ti-ssp.c b/drivers/mfd/ti-ssp.c
index 71e3e0c5bf73..a5424579679c 100644
--- a/drivers/mfd/ti-ssp.c
+++ b/drivers/mfd/ti-ssp.c
@@ -32,6 +32,7 @@
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/io.h> 34#include <linux/io.h>
35#include <linux/sched.h>
35#include <linux/mfd/core.h> 36#include <linux/mfd/core.h>
36#include <linux/mfd/ti_ssp.h> 37#include <linux/mfd/ti_ssp.h>
37 38
@@ -409,7 +410,6 @@ static int ti_ssp_probe(struct platform_device *pdev)
409 cells[id].id = id; 410 cells[id].id = id;
410 cells[id].name = data->dev_name; 411 cells[id].name = data->dev_name;
411 cells[id].platform_data = data->pdata; 412 cells[id].platform_data = data->pdata;
412 cells[id].data_size = data->pdata_size;
413 } 413 }
414 414
415 error = mfd_add_devices(dev, 0, cells, 2, NULL, 0, NULL); 415 error = mfd_add_devices(dev, 0, cells, 2, NULL, 0, NULL);
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 0e8df41aaf14..2cf2bbc0b927 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -198,6 +198,13 @@ static void enclosure_remove_links(struct enclosure_component *cdev)
198{ 198{
199 char name[ENCLOSURE_NAME_SIZE]; 199 char name[ENCLOSURE_NAME_SIZE];
200 200
201 /*
202 * In odd circumstances, like multipath devices, something else may
203 * already have removed the links, so check for this condition first.
204 */
205 if (!cdev->dev->kobj.sd)
206 return;
207
201 enclosure_link_name(cdev, name); 208 enclosure_link_name(cdev, name);
202 sysfs_remove_link(&cdev->dev->kobj, name); 209 sysfs_remove_link(&cdev->dev->kobj, name);
203 sysfs_remove_link(&cdev->cdev.kobj, "device"); 210 sysfs_remove_link(&cdev->cdev.kobj, "device");
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 6c0fde55270d..66f411a6e8ea 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -109,9 +109,12 @@
109#define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */ 109#define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */
110#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */ 110#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */
111 111
112#define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */ 112#define MEI_DEV_ID_LPT_H 0x8C3A /* Lynx Point H */
113#define MEI_DEV_ID_LPT_W 0x8D3A /* Lynx Point - Wellsburg */ 113#define MEI_DEV_ID_LPT_W 0x8D3A /* Lynx Point - Wellsburg */
114#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */ 114#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */
115#define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */
116
117#define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */
115/* 118/*
116 * MEI HW Section 119 * MEI HW Section
117 */ 120 */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index b96205aece0c..2cab3c0a6805 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -76,9 +76,11 @@ static DEFINE_PCI_DEVICE_TABLE(mei_me_pci_tbl) = {
76 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)}, 76 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
77 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)}, 77 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
78 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)}, 78 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
79 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)}, 79 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_H)},
80 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_W)}, 80 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_W)},
81 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)}, 81 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
82 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_HR)},
83 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_WPT_LP)},
82 84
83 /* required last entry */ 85 /* required last entry */
84 {0, } 86 {0, }
diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c
index 8aa42e738acc..653799b96bfa 100644
--- a/drivers/misc/mic/card/mic_virtio.c
+++ b/drivers/misc/mic/card/mic_virtio.c
@@ -154,14 +154,14 @@ static void mic_reset_inform_host(struct virtio_device *vdev)
154{ 154{
155 struct mic_vdev *mvdev = to_micvdev(vdev); 155 struct mic_vdev *mvdev = to_micvdev(vdev);
156 struct mic_device_ctrl __iomem *dc = mvdev->dc; 156 struct mic_device_ctrl __iomem *dc = mvdev->dc;
157 int retry = 100, i; 157 int retry;
158 158
159 iowrite8(0, &dc->host_ack); 159 iowrite8(0, &dc->host_ack);
160 iowrite8(1, &dc->vdev_reset); 160 iowrite8(1, &dc->vdev_reset);
161 mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); 161 mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
162 162
163 /* Wait till host completes all card accesses and acks the reset */ 163 /* Wait till host completes all card accesses and acks the reset */
164 for (i = retry; i--;) { 164 for (retry = 100; retry--;) {
165 if (ioread8(&dc->host_ack)) 165 if (ioread8(&dc->host_ack))
166 break; 166 break;
167 msleep(100); 167 msleep(100);
@@ -187,11 +187,12 @@ static void mic_reset(struct virtio_device *vdev)
187/* 187/*
188 * The virtio_ring code calls this API when it wants to notify the Host. 188 * The virtio_ring code calls this API when it wants to notify the Host.
189 */ 189 */
190static void mic_notify(struct virtqueue *vq) 190static bool mic_notify(struct virtqueue *vq)
191{ 191{
192 struct mic_vdev *mvdev = vq->priv; 192 struct mic_vdev *mvdev = vq->priv;
193 193
194 mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); 194 mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
195 return true;
195} 196}
196 197
197static void mic_del_vq(struct virtqueue *vq, int n) 198static void mic_del_vq(struct virtqueue *vq, int n)
@@ -247,17 +248,17 @@ static struct virtqueue *mic_find_vq(struct virtio_device *vdev,
247 /* First assign the vring's allocated in host memory */ 248 /* First assign the vring's allocated in host memory */
248 vqconfig = mic_vq_config(mvdev->desc) + index; 249 vqconfig = mic_vq_config(mvdev->desc) + index;
249 memcpy_fromio(&config, vqconfig, sizeof(config)); 250 memcpy_fromio(&config, vqconfig, sizeof(config));
250 _vr_size = vring_size(config.num, MIC_VIRTIO_RING_ALIGN); 251 _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
251 vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); 252 vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
252 va = mic_card_map(mvdev->mdev, config.address, vr_size); 253 va = mic_card_map(mvdev->mdev, le64_to_cpu(config.address), vr_size);
253 if (!va) 254 if (!va)
254 return ERR_PTR(-ENOMEM); 255 return ERR_PTR(-ENOMEM);
255 mvdev->vr[index] = va; 256 mvdev->vr[index] = va;
256 memset_io(va, 0x0, _vr_size); 257 memset_io(va, 0x0, _vr_size);
257 vq = vring_new_virtqueue(index, 258 vq = vring_new_virtqueue(index, le16_to_cpu(config.num),
258 config.num, MIC_VIRTIO_RING_ALIGN, vdev, 259 MIC_VIRTIO_RING_ALIGN, vdev, false,
259 false, 260 (void __force *)va, mic_notify, callback,
260 va, mic_notify, callback, name); 261 name);
261 if (!vq) { 262 if (!vq) {
262 err = -ENOMEM; 263 err = -ENOMEM;
263 goto unmap; 264 goto unmap;
@@ -272,7 +273,8 @@ static struct virtqueue *mic_find_vq(struct virtio_device *vdev,
272 273
273 /* Allocate and reassign used ring now */ 274 /* Allocate and reassign used ring now */
274 mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + 275 mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
275 sizeof(struct vring_used_elem) * config.num); 276 sizeof(struct vring_used_elem) *
277 le16_to_cpu(config.num));
276 used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 278 used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
277 get_order(mvdev->used_size[index])); 279 get_order(mvdev->used_size[index]));
278 if (!used) { 280 if (!used) {
@@ -309,7 +311,7 @@ static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs,
309{ 311{
310 struct mic_vdev *mvdev = to_micvdev(vdev); 312 struct mic_vdev *mvdev = to_micvdev(vdev);
311 struct mic_device_ctrl __iomem *dc = mvdev->dc; 313 struct mic_device_ctrl __iomem *dc = mvdev->dc;
312 int i, err, retry = 100; 314 int i, err, retry;
313 315
314 /* We must have this many virtqueues. */ 316 /* We must have this many virtqueues. */
315 if (nvqs > ioread8(&mvdev->desc->num_vq)) 317 if (nvqs > ioread8(&mvdev->desc->num_vq))
@@ -331,7 +333,7 @@ static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs,
331 * rings have been re-assigned. 333 * rings have been re-assigned.
332 */ 334 */
333 mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); 335 mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
334 for (i = retry; i--;) { 336 for (retry = 100; retry--;) {
335 if (!ioread8(&dc->used_address_updated)) 337 if (!ioread8(&dc->used_address_updated))
336 break; 338 break;
337 msleep(100); 339 msleep(100);
@@ -519,8 +521,8 @@ static void mic_scan_devices(struct mic_driver *mdrv, bool remove)
519 struct device *dev; 521 struct device *dev;
520 int ret; 522 int ret;
521 523
522 for (i = mic_aligned_size(struct mic_bootparam); 524 for (i = sizeof(struct mic_bootparam); i < MIC_DP_SIZE;
523 i < MIC_DP_SIZE; i += mic_total_desc_size(d)) { 525 i += mic_total_desc_size(d)) {
524 d = mdrv->dp + i; 526 d = mdrv->dp + i;
525 dc = (void __iomem *)d + mic_aligned_desc_size(d); 527 dc = (void __iomem *)d + mic_aligned_desc_size(d);
526 /* 528 /*
@@ -539,7 +541,8 @@ static void mic_scan_devices(struct mic_driver *mdrv, bool remove)
539 continue; 541 continue;
540 542
541 /* device already exists */ 543 /* device already exists */
542 dev = device_find_child(mdrv->dev, d, mic_match_desc); 544 dev = device_find_child(mdrv->dev, (void __force *)d,
545 mic_match_desc);
543 if (dev) { 546 if (dev) {
544 if (remove) 547 if (remove)
545 iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE, 548 iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE,
diff --git a/drivers/misc/mic/card/mic_virtio.h b/drivers/misc/mic/card/mic_virtio.h
index 2c5c22c93ba8..d0407ba53bb7 100644
--- a/drivers/misc/mic/card/mic_virtio.h
+++ b/drivers/misc/mic/card/mic_virtio.h
@@ -42,8 +42,8 @@
42 42
43static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc) 43static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc)
44{ 44{
45 return mic_aligned_size(*desc) 45 return sizeof(*desc)
46 + ioread8(&desc->num_vq) * mic_aligned_size(struct mic_vqconfig) 46 + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig)
47 + ioread8(&desc->feature_len) * 2 47 + ioread8(&desc->feature_len) * 2
48 + ioread8(&desc->config_len); 48 + ioread8(&desc->config_len);
49} 49}
@@ -67,8 +67,7 @@ mic_vq_configspace(struct mic_device_desc __iomem *desc)
67} 67}
68static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc) 68static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc)
69{ 69{
70 return mic_aligned_desc_size(desc) + 70 return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
71 mic_aligned_size(struct mic_device_ctrl);
72} 71}
73 72
74int mic_devices_init(struct mic_driver *mdrv); 73int mic_devices_init(struct mic_driver *mdrv);
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index 7558d9186438..b75c6b5cc20f 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -62,7 +62,7 @@ void mic_bootparam_init(struct mic_device *mdev)
62{ 62{
63 struct mic_bootparam *bootparam = mdev->dp; 63 struct mic_bootparam *bootparam = mdev->dp;
64 64
65 bootparam->magic = MIC_MAGIC; 65 bootparam->magic = cpu_to_le32(MIC_MAGIC);
66 bootparam->c2h_shutdown_db = mdev->shutdown_db; 66 bootparam->c2h_shutdown_db = mdev->shutdown_db;
67 bootparam->h2c_shutdown_db = -1; 67 bootparam->h2c_shutdown_db = -1;
68 bootparam->h2c_config_db = -1; 68 bootparam->h2c_config_db = -1;
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
index 5b8494bd1e00..e04bb4fe6823 100644
--- a/drivers/misc/mic/host/mic_virtio.c
+++ b/drivers/misc/mic/host/mic_virtio.c
@@ -41,7 +41,7 @@ static int mic_virtio_copy_to_user(struct mic_vdev *mvdev,
41 * We are copying from IO below an should ideally use something 41 * We are copying from IO below an should ideally use something
42 * like copy_to_user_fromio(..) if it existed. 42 * like copy_to_user_fromio(..) if it existed.
43 */ 43 */
44 if (copy_to_user(ubuf, dbuf, len)) { 44 if (copy_to_user(ubuf, (void __force *)dbuf, len)) {
45 err = -EFAULT; 45 err = -EFAULT;
46 dev_err(mic_dev(mvdev), "%s %d err %d\n", 46 dev_err(mic_dev(mvdev), "%s %d err %d\n",
47 __func__, __LINE__, err); 47 __func__, __LINE__, err);
@@ -66,7 +66,7 @@ static int mic_virtio_copy_from_user(struct mic_vdev *mvdev,
66 * We are copying to IO below and should ideally use something 66 * We are copying to IO below and should ideally use something
67 * like copy_from_user_toio(..) if it existed. 67 * like copy_from_user_toio(..) if it existed.
68 */ 68 */
69 if (copy_from_user(dbuf, ubuf, len)) { 69 if (copy_from_user((void __force *)dbuf, ubuf, len)) {
70 err = -EFAULT; 70 err = -EFAULT;
71 dev_err(mic_dev(mvdev), "%s %d err %d\n", 71 dev_err(mic_dev(mvdev), "%s %d err %d\n",
72 __func__, __LINE__, err); 72 __func__, __LINE__, err);
@@ -293,7 +293,7 @@ static void mic_virtio_init_post(struct mic_vdev *mvdev)
293 continue; 293 continue;
294 } 294 }
295 mvdev->mvr[i].vrh.vring.used = 295 mvdev->mvr[i].vrh.vring.used =
296 mvdev->mdev->aper.va + 296 (void __force *)mvdev->mdev->aper.va +
297 le64_to_cpu(vqconfig[i].used_address); 297 le64_to_cpu(vqconfig[i].used_address);
298 } 298 }
299 299
@@ -378,7 +378,7 @@ int mic_virtio_config_change(struct mic_vdev *mvdev,
378 void __user *argp) 378 void __user *argp)
379{ 379{
380 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); 380 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
381 int ret = 0, retry = 100, i; 381 int ret = 0, retry, i;
382 struct mic_bootparam *bootparam = mvdev->mdev->dp; 382 struct mic_bootparam *bootparam = mvdev->mdev->dp;
383 s8 db = bootparam->h2c_config_db; 383 s8 db = bootparam->h2c_config_db;
384 384
@@ -401,7 +401,7 @@ int mic_virtio_config_change(struct mic_vdev *mvdev,
401 mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED; 401 mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
402 mvdev->mdev->ops->send_intr(mvdev->mdev, db); 402 mvdev->mdev->ops->send_intr(mvdev->mdev, db);
403 403
404 for (i = retry; i--;) { 404 for (retry = 100; retry--;) {
405 ret = wait_event_timeout(wake, 405 ret = wait_event_timeout(wake,
406 mvdev->dc->guest_ack, msecs_to_jiffies(100)); 406 mvdev->dc->guest_ack, msecs_to_jiffies(100));
407 if (ret) 407 if (ret)
@@ -467,7 +467,7 @@ static int mic_copy_dp_entry(struct mic_vdev *mvdev,
467 } 467 }
468 468
469 /* Find the first free device page entry */ 469 /* Find the first free device page entry */
470 for (i = mic_aligned_size(struct mic_bootparam); 470 for (i = sizeof(struct mic_bootparam);
471 i < MIC_DP_SIZE - mic_total_desc_size(dd_config); 471 i < MIC_DP_SIZE - mic_total_desc_size(dd_config);
472 i += mic_total_desc_size(devp)) { 472 i += mic_total_desc_size(devp)) {
473 devp = mdev->dp + i; 473 devp = mdev->dp + i;
@@ -525,6 +525,7 @@ int mic_virtio_add_device(struct mic_vdev *mvdev,
525 char irqname[10]; 525 char irqname[10];
526 struct mic_bootparam *bootparam = mdev->dp; 526 struct mic_bootparam *bootparam = mdev->dp;
527 u16 num; 527 u16 num;
528 dma_addr_t vr_addr;
528 529
529 mutex_lock(&mdev->mic_mutex); 530 mutex_lock(&mdev->mic_mutex);
530 531
@@ -559,17 +560,16 @@ int mic_virtio_add_device(struct mic_vdev *mvdev,
559 } 560 }
560 vr->len = vr_size; 561 vr->len = vr_size;
561 vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN); 562 vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
562 vr->info->magic = MIC_MAGIC + mvdev->virtio_id + i; 563 vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i);
563 vqconfig[i].address = mic_map_single(mdev, 564 vr_addr = mic_map_single(mdev, vr->va, vr_size);
564 vr->va, vr_size); 565 if (mic_map_error(vr_addr)) {
565 if (mic_map_error(vqconfig[i].address)) {
566 free_pages((unsigned long)vr->va, get_order(vr_size)); 566 free_pages((unsigned long)vr->va, get_order(vr_size));
567 ret = -ENOMEM; 567 ret = -ENOMEM;
568 dev_err(mic_dev(mvdev), "%s %d err %d\n", 568 dev_err(mic_dev(mvdev), "%s %d err %d\n",
569 __func__, __LINE__, ret); 569 __func__, __LINE__, ret);
570 goto err; 570 goto err;
571 } 571 }
572 vqconfig[i].address = cpu_to_le64(vqconfig[i].address); 572 vqconfig[i].address = cpu_to_le64(vr_addr);
573 573
574 vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN); 574 vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
575 ret = vringh_init_kern(&mvr->vrh, 575 ret = vringh_init_kern(&mvr->vrh,
@@ -639,7 +639,7 @@ void mic_virtio_del_device(struct mic_vdev *mvdev)
639 struct mic_vdev *tmp_mvdev; 639 struct mic_vdev *tmp_mvdev;
640 struct mic_device *mdev = mvdev->mdev; 640 struct mic_device *mdev = mvdev->mdev;
641 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); 641 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
642 int i, ret, retry = 100; 642 int i, ret, retry;
643 struct mic_vqconfig *vqconfig; 643 struct mic_vqconfig *vqconfig;
644 struct mic_bootparam *bootparam = mdev->dp; 644 struct mic_bootparam *bootparam = mdev->dp;
645 s8 db; 645 s8 db;
@@ -652,16 +652,16 @@ void mic_virtio_del_device(struct mic_vdev *mvdev)
652 "Requesting hot remove id %d\n", mvdev->virtio_id); 652 "Requesting hot remove id %d\n", mvdev->virtio_id);
653 mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE; 653 mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
654 mdev->ops->send_intr(mdev, db); 654 mdev->ops->send_intr(mdev, db);
655 for (i = retry; i--;) { 655 for (retry = 100; retry--;) {
656 ret = wait_event_timeout(wake, 656 ret = wait_event_timeout(wake,
657 mvdev->dc->guest_ack, msecs_to_jiffies(100)); 657 mvdev->dc->guest_ack, msecs_to_jiffies(100));
658 if (ret) 658 if (ret)
659 break; 659 break;
660 } 660 }
661 dev_dbg(mdev->sdev->parent, 661 dev_dbg(mdev->sdev->parent,
662 "Device id %d config_change %d guest_ack %d\n", 662 "Device id %d config_change %d guest_ack %d retry %d\n",
663 mvdev->virtio_id, mvdev->dc->config_change, 663 mvdev->virtio_id, mvdev->dc->config_change,
664 mvdev->dc->guest_ack); 664 mvdev->dc->guest_ack, retry);
665 mvdev->dc->config_change = 0; 665 mvdev->dc->config_change = 0;
666 mvdev->dc->guest_ack = 0; 666 mvdev->dc->guest_ack = 0;
667skip_hot_remove: 667skip_hot_remove:
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
index 81e9541b784c..0dfa8a81436e 100644
--- a/drivers/misc/mic/host/mic_x100.c
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -397,8 +397,8 @@ mic_x100_load_ramdisk(struct mic_device *mdev)
397 * so copy over the ramdisk @ 128M. 397 * so copy over the ramdisk @ 128M.
398 */ 398 */
399 memcpy_toio(mdev->aper.va + (mdev->bootaddr << 1), fw->data, fw->size); 399 memcpy_toio(mdev->aper.va + (mdev->bootaddr << 1), fw->data, fw->size);
400 iowrite32(cpu_to_le32(mdev->bootaddr << 1), &bp->hdr.ramdisk_image); 400 iowrite32(mdev->bootaddr << 1, &bp->hdr.ramdisk_image);
401 iowrite32(cpu_to_le32(fw->size), &bp->hdr.ramdisk_size); 401 iowrite32(fw->size, &bp->hdr.ramdisk_size);
402 release_firmware(fw); 402 release_firmware(fw);
403error: 403error:
404 return rc; 404 return rc;
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 0b10a9030f4e..98b6b6ef7e5c 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -22,6 +22,7 @@
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/timer.h> 24#include <linux/timer.h>
25#include <linux/of.h>
25#include <linux/omap-dma.h> 26#include <linux/omap-dma.h>
26#include <linux/mmc/host.h> 27#include <linux/mmc/host.h>
27#include <linux/mmc/card.h> 28#include <linux/mmc/card.h>
@@ -90,17 +91,6 @@
90#define OMAP_MMC_CMDTYPE_AC 2 91#define OMAP_MMC_CMDTYPE_AC 2
91#define OMAP_MMC_CMDTYPE_ADTC 3 92#define OMAP_MMC_CMDTYPE_ADTC 3
92 93
93#define OMAP_DMA_MMC_TX 21
94#define OMAP_DMA_MMC_RX 22
95#define OMAP_DMA_MMC2_TX 54
96#define OMAP_DMA_MMC2_RX 55
97
98#define OMAP24XX_DMA_MMC2_TX 47
99#define OMAP24XX_DMA_MMC2_RX 48
100#define OMAP24XX_DMA_MMC1_TX 61
101#define OMAP24XX_DMA_MMC1_RX 62
102
103
104#define DRIVER_NAME "mmci-omap" 94#define DRIVER_NAME "mmci-omap"
105 95
106/* Specifies how often in millisecs to poll for card status changes 96/* Specifies how often in millisecs to poll for card status changes
@@ -1330,7 +1320,7 @@ static int mmc_omap_probe(struct platform_device *pdev)
1330 struct mmc_omap_host *host = NULL; 1320 struct mmc_omap_host *host = NULL;
1331 struct resource *res; 1321 struct resource *res;
1332 dma_cap_mask_t mask; 1322 dma_cap_mask_t mask;
1333 unsigned sig; 1323 unsigned sig = 0;
1334 int i, ret = 0; 1324 int i, ret = 0;
1335 int irq; 1325 int irq;
1336 1326
@@ -1340,7 +1330,7 @@ static int mmc_omap_probe(struct platform_device *pdev)
1340 } 1330 }
1341 if (pdata->nr_slots == 0) { 1331 if (pdata->nr_slots == 0) {
1342 dev_err(&pdev->dev, "no slots\n"); 1332 dev_err(&pdev->dev, "no slots\n");
1343 return -ENXIO; 1333 return -EPROBE_DEFER;
1344 } 1334 }
1345 1335
1346 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1336 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1407,19 +1397,20 @@ static int mmc_omap_probe(struct platform_device *pdev)
1407 host->dma_tx_burst = -1; 1397 host->dma_tx_burst = -1;
1408 host->dma_rx_burst = -1; 1398 host->dma_rx_burst = -1;
1409 1399
1410 if (mmc_omap2()) 1400 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
1411 sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX; 1401 if (res)
1412 else 1402 sig = res->start;
1413 sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX; 1403 host->dma_tx = dma_request_slave_channel_compat(mask,
1414 host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig); 1404 omap_dma_filter_fn, &sig, &pdev->dev, "tx");
1415 if (!host->dma_tx) 1405 if (!host->dma_tx)
1416 dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n", 1406 dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
1417 sig); 1407 sig);
1418 if (mmc_omap2()) 1408
1419 sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX; 1409 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
1420 else 1410 if (res)
1421 sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX; 1411 sig = res->start;
1422 host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig); 1412 host->dma_rx = dma_request_slave_channel_compat(mask,
1413 omap_dma_filter_fn, &sig, &pdev->dev, "rx");
1423 if (!host->dma_rx) 1414 if (!host->dma_rx)
1424 dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n", 1415 dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
1425 sig); 1416 sig);
@@ -1512,12 +1503,20 @@ static int mmc_omap_remove(struct platform_device *pdev)
1512 return 0; 1503 return 0;
1513} 1504}
1514 1505
1506#if IS_BUILTIN(CONFIG_OF)
1507static const struct of_device_id mmc_omap_match[] = {
1508 { .compatible = "ti,omap2420-mmc", },
1509 { },
1510};
1511#endif
1512
1515static struct platform_driver mmc_omap_driver = { 1513static struct platform_driver mmc_omap_driver = {
1516 .probe = mmc_omap_probe, 1514 .probe = mmc_omap_probe,
1517 .remove = mmc_omap_remove, 1515 .remove = mmc_omap_remove,
1518 .driver = { 1516 .driver = {
1519 .name = DRIVER_NAME, 1517 .name = DRIVER_NAME,
1520 .owner = THIS_MODULE, 1518 .owner = THIS_MODULE,
1519 .of_match_table = of_match_ptr(mmc_omap_match),
1521 }, 1520 },
1522}; 1521};
1523 1522
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index d210d131fef2..0f55589a56b8 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -73,7 +73,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
73 return -ENOMEM; 73 return -ENOMEM;
74 } 74 }
75 info->map.cached = 75 info->map.cached =
76 ioremap_cached(info->map.phys, info->map.size); 76 ioremap_cache(info->map.phys, info->map.size);
77 if (!info->map.cached) 77 if (!info->map.cached)
78 printk(KERN_WARNING "Failed to ioremap cached %s\n", 78 printk(KERN_WARNING "Failed to ioremap cached %s\n",
79 info->map.name); 79 info->map.name);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 4cabdc9fda90..4b3aaa898a8b 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -962,7 +962,7 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
962static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info) 962static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
963{ 963{
964 struct platform_device *pdev = info->pdev; 964 struct platform_device *pdev = info->pdev;
965 if (use_dma) { 965 if (info->use_dma) {
966 pxa_free_dma(info->data_dma_ch); 966 pxa_free_dma(info->data_dma_ch);
967 dma_free_coherent(&pdev->dev, info->buf_size, 967 dma_free_coherent(&pdev->dev, info->buf_size,
968 info->data_buff, info->data_buff_phys); 968 info->data_buff, info->data_buff_phys);
@@ -1259,10 +1259,6 @@ static struct of_device_id pxa3xx_nand_dt_ids[] = {
1259 .compatible = "marvell,pxa3xx-nand", 1259 .compatible = "marvell,pxa3xx-nand",
1260 .data = (void *)PXA3XX_NAND_VARIANT_PXA, 1260 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
1261 }, 1261 },
1262 {
1263 .compatible = "marvell,armada370-nand",
1264 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
1265 },
1266 {} 1262 {}
1267}; 1263};
1268MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids); 1264MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 187b1b7772ef..4ced59436558 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2201,20 +2201,25 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
2201 2201
2202 port = &(SLAVE_AD_INFO(slave).port); 2202 port = &(SLAVE_AD_INFO(slave).port);
2203 2203
2204 // if slave is null, the whole port is not initialized 2204 /* if slave is null, the whole port is not initialized */
2205 if (!port->slave) { 2205 if (!port->slave) {
2206 pr_warning("Warning: %s: speed changed for uninitialized port on %s\n", 2206 pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
2207 slave->bond->dev->name, slave->dev->name); 2207 slave->bond->dev->name, slave->dev->name);
2208 return; 2208 return;
2209 } 2209 }
2210 2210
2211 __get_state_machine_lock(port);
2212
2211 port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; 2213 port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
2212 port->actor_oper_port_key = port->actor_admin_port_key |= 2214 port->actor_oper_port_key = port->actor_admin_port_key |=
2213 (__get_link_speed(port) << 1); 2215 (__get_link_speed(port) << 1);
2214 pr_debug("Port %d changed speed\n", port->actor_port_number); 2216 pr_debug("Port %d changed speed\n", port->actor_port_number);
2215 // there is no need to reselect a new aggregator, just signal the 2217 /* there is no need to reselect a new aggregator, just signal the
2216 // state machines to reinitialize 2218 * state machines to reinitialize
2219 */
2217 port->sm_vars |= AD_PORT_BEGIN; 2220 port->sm_vars |= AD_PORT_BEGIN;
2221
2222 __release_state_machine_lock(port);
2218} 2223}
2219 2224
2220/** 2225/**
@@ -2229,20 +2234,25 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
2229 2234
2230 port = &(SLAVE_AD_INFO(slave).port); 2235 port = &(SLAVE_AD_INFO(slave).port);
2231 2236
2232 // if slave is null, the whole port is not initialized 2237 /* if slave is null, the whole port is not initialized */
2233 if (!port->slave) { 2238 if (!port->slave) {
2234 pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n", 2239 pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
2235 slave->bond->dev->name, slave->dev->name); 2240 slave->bond->dev->name, slave->dev->name);
2236 return; 2241 return;
2237 } 2242 }
2238 2243
2244 __get_state_machine_lock(port);
2245
2239 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; 2246 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
2240 port->actor_oper_port_key = port->actor_admin_port_key |= 2247 port->actor_oper_port_key = port->actor_admin_port_key |=
2241 __get_duplex(port); 2248 __get_duplex(port);
2242 pr_debug("Port %d changed duplex\n", port->actor_port_number); 2249 pr_debug("Port %d changed duplex\n", port->actor_port_number);
2243 // there is no need to reselect a new aggregator, just signal the 2250 /* there is no need to reselect a new aggregator, just signal the
2244 // state machines to reinitialize 2251 * state machines to reinitialize
2252 */
2245 port->sm_vars |= AD_PORT_BEGIN; 2253 port->sm_vars |= AD_PORT_BEGIN;
2254
2255 __release_state_machine_lock(port);
2246} 2256}
2247 2257
2248/** 2258/**
@@ -2258,15 +2268,21 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2258 2268
2259 port = &(SLAVE_AD_INFO(slave).port); 2269 port = &(SLAVE_AD_INFO(slave).port);
2260 2270
2261 // if slave is null, the whole port is not initialized 2271 /* if slave is null, the whole port is not initialized */
2262 if (!port->slave) { 2272 if (!port->slave) {
2263 pr_warning("Warning: %s: link status changed for uninitialized port on %s\n", 2273 pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
2264 slave->bond->dev->name, slave->dev->name); 2274 slave->bond->dev->name, slave->dev->name);
2265 return; 2275 return;
2266 } 2276 }
2267 2277
2268 // on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed) 2278 __get_state_machine_lock(port);
2269 // on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report 2279 /* on link down we are zeroing duplex and speed since
2280 * some of the adaptors(ce1000.lan) report full duplex/speed
2281 * instead of N/A(duplex) / 0(speed).
2282 *
2283 * on link up we are forcing recheck on the duplex and speed since
2284 * some of he adaptors(ce1000.lan) report.
2285 */
2270 if (link == BOND_LINK_UP) { 2286 if (link == BOND_LINK_UP) {
2271 port->is_enabled = true; 2287 port->is_enabled = true;
2272 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; 2288 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
@@ -2282,10 +2298,15 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2282 port->actor_oper_port_key = (port->actor_admin_port_key &= 2298 port->actor_oper_port_key = (port->actor_admin_port_key &=
2283 ~AD_SPEED_KEY_BITS); 2299 ~AD_SPEED_KEY_BITS);
2284 } 2300 }
2285 //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN"))); 2301 pr_debug("Port %d changed link status to %s",
2286 // there is no need to reselect a new aggregator, just signal the 2302 port->actor_port_number,
2287 // state machines to reinitialize 2303 (link == BOND_LINK_UP) ? "UP" : "DOWN");
2304 /* there is no need to reselect a new aggregator, just signal the
2305 * state machines to reinitialize
2306 */
2288 port->sm_vars |= AD_PORT_BEGIN; 2307 port->sm_vars |= AD_PORT_BEGIN;
2308
2309 __release_state_machine_lock(port);
2289} 2310}
2290 2311
2291/* 2312/*
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 4dd5ee2a34cc..4b8c58b0ec24 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3732,7 +3732,8 @@ static inline int bond_slave_override(struct bonding *bond,
3732} 3732}
3733 3733
3734 3734
3735static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) 3735static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
3736 void *accel_priv)
3736{ 3737{
3737 /* 3738 /*
3738 * This helper function exists to help dev_pick_tx get the correct 3739 * This helper function exists to help dev_pick_tx get the correct
@@ -4110,7 +4111,7 @@ static int bond_check_params(struct bond_params *params)
4110 if (!miimon) { 4111 if (!miimon) {
4111 pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n"); 4112 pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
4112 pr_warning("Forcing miimon to 100msec\n"); 4113 pr_warning("Forcing miimon to 100msec\n");
4113 miimon = 100; 4114 miimon = BOND_DEFAULT_MIIMON;
4114 } 4115 }
4115 } 4116 }
4116 4117
@@ -4147,7 +4148,7 @@ static int bond_check_params(struct bond_params *params)
4147 if (!miimon) { 4148 if (!miimon) {
4148 pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n"); 4149 pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n");
4149 pr_warning("Forcing miimon to 100msec\n"); 4150 pr_warning("Forcing miimon to 100msec\n");
4150 miimon = 100; 4151 miimon = BOND_DEFAULT_MIIMON;
4151 } 4152 }
4152 } 4153 }
4153 4154
@@ -4199,9 +4200,9 @@ static int bond_check_params(struct bond_params *params)
4199 (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) { 4200 (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
4200 /* not complete check, but should be good enough to 4201 /* not complete check, but should be good enough to
4201 catch mistakes */ 4202 catch mistakes */
4202 __be32 ip = in_aton(arp_ip_target[i]); 4203 __be32 ip;
4203 if (!isdigit(arp_ip_target[i][0]) || ip == 0 || 4204 if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
4204 ip == htonl(INADDR_BROADCAST)) { 4205 IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) {
4205 pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", 4206 pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
4206 arp_ip_target[i]); 4207 arp_ip_target[i]);
4207 arp_interval = 0; 4208 arp_interval = 0;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 9a5223c7b4d1..ea6f640782b7 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -45,10 +45,15 @@ int bond_option_mode_set(struct bonding *bond, int mode)
45 return -EPERM; 45 return -EPERM;
46 } 46 }
47 47
48 if (BOND_MODE_IS_LB(mode) && bond->params.arp_interval) { 48 if (BOND_NO_USES_ARP(mode) && bond->params.arp_interval) {
49 pr_err("%s: %s mode is incompatible with arp monitoring.\n", 49 pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
50 bond->dev->name, bond_mode_tbl[mode].modename); 50 bond->dev->name, bond_mode_tbl[mode].modename);
51 return -EINVAL; 51 /* disable arp monitoring */
52 bond->params.arp_interval = 0;
53 /* set miimon to default value */
54 bond->params.miimon = BOND_DEFAULT_MIIMON;
55 pr_info("%s: Setting MII monitoring interval to %d.\n",
56 bond->dev->name, bond->params.miimon);
52 } 57 }
53 58
54 /* don't cache arp_validate between modes */ 59 /* don't cache arp_validate between modes */
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 0ec2a7e8c8a9..0ae580bbc5db 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -523,9 +523,7 @@ static ssize_t bonding_store_arp_interval(struct device *d,
523 ret = -EINVAL; 523 ret = -EINVAL;
524 goto out; 524 goto out;
525 } 525 }
526 if (bond->params.mode == BOND_MODE_ALB || 526 if (BOND_NO_USES_ARP(bond->params.mode)) {
527 bond->params.mode == BOND_MODE_TLB ||
528 bond->params.mode == BOND_MODE_8023AD) {
529 pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n", 527 pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n",
530 bond->dev->name, bond->dev->name); 528 bond->dev->name, bond->dev->name);
531 ret = -EINVAL; 529 ret = -EINVAL;
@@ -1637,12 +1635,12 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
1637 char *buf) 1635 char *buf)
1638{ 1636{
1639 struct bonding *bond = to_bond(d); 1637 struct bonding *bond = to_bond(d);
1640 int packets_per_slave = bond->params.packets_per_slave; 1638 unsigned int packets_per_slave = bond->params.packets_per_slave;
1641 1639
1642 if (packets_per_slave > 1) 1640 if (packets_per_slave > 1)
1643 packets_per_slave = reciprocal_value(packets_per_slave); 1641 packets_per_slave = reciprocal_value(packets_per_slave);
1644 1642
1645 return sprintf(buf, "%d\n", packets_per_slave); 1643 return sprintf(buf, "%u\n", packets_per_slave);
1646} 1644}
1647 1645
1648static ssize_t bonding_store_packets_per_slave(struct device *d, 1646static ssize_t bonding_store_packets_per_slave(struct device *d,
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index ca31286aa028..a9f4f9f4d8ce 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -35,6 +35,8 @@
35 35
36#define BOND_MAX_ARP_TARGETS 16 36#define BOND_MAX_ARP_TARGETS 16
37 37
38#define BOND_DEFAULT_MIIMON 100
39
38#define IS_UP(dev) \ 40#define IS_UP(dev) \
39 ((((dev)->flags & IFF_UP) == IFF_UP) && \ 41 ((((dev)->flags & IFF_UP) == IFF_UP) && \
40 netif_running(dev) && \ 42 netif_running(dev) && \
@@ -55,6 +57,11 @@
55 ((mode) == BOND_MODE_TLB) || \ 57 ((mode) == BOND_MODE_TLB) || \
56 ((mode) == BOND_MODE_ALB)) 58 ((mode) == BOND_MODE_ALB))
57 59
60#define BOND_NO_USES_ARP(mode) \
61 (((mode) == BOND_MODE_8023AD) || \
62 ((mode) == BOND_MODE_TLB) || \
63 ((mode) == BOND_MODE_ALB))
64
58#define TX_QUEUE_OVERRIDE(mode) \ 65#define TX_QUEUE_OVERRIDE(mode) \
59 (((mode) == BOND_MODE_ACTIVEBACKUP) || \ 66 (((mode) == BOND_MODE_ACTIVEBACKUP) || \
60 ((mode) == BOND_MODE_ROUNDROBIN)) 67 ((mode) == BOND_MODE_ROUNDROBIN))
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index e3fc07cf2f62..77061eebb034 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -712,22 +712,31 @@ static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
712 return 0; 712 return 0;
713} 713}
714 714
715static int c_can_get_berr_counter(const struct net_device *dev, 715static int __c_can_get_berr_counter(const struct net_device *dev,
716 struct can_berr_counter *bec) 716 struct can_berr_counter *bec)
717{ 717{
718 unsigned int reg_err_counter; 718 unsigned int reg_err_counter;
719 struct c_can_priv *priv = netdev_priv(dev); 719 struct c_can_priv *priv = netdev_priv(dev);
720 720
721 c_can_pm_runtime_get_sync(priv);
722
723 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); 721 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
724 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >> 722 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
725 ERR_CNT_REC_SHIFT; 723 ERR_CNT_REC_SHIFT;
726 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK; 724 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
727 725
726 return 0;
727}
728
729static int c_can_get_berr_counter(const struct net_device *dev,
730 struct can_berr_counter *bec)
731{
732 struct c_can_priv *priv = netdev_priv(dev);
733 int err;
734
735 c_can_pm_runtime_get_sync(priv);
736 err = __c_can_get_berr_counter(dev, bec);
728 c_can_pm_runtime_put_sync(priv); 737 c_can_pm_runtime_put_sync(priv);
729 738
730 return 0; 739 return err;
731} 740}
732 741
733/* 742/*
@@ -754,6 +763,7 @@ static void c_can_do_tx(struct net_device *dev)
754 if (!(val & (1 << (msg_obj_no - 1)))) { 763 if (!(val & (1 << (msg_obj_no - 1)))) {
755 can_get_echo_skb(dev, 764 can_get_echo_skb(dev,
756 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); 765 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
766 c_can_object_get(dev, 0, msg_obj_no, IF_COMM_ALL);
757 stats->tx_bytes += priv->read_reg(priv, 767 stats->tx_bytes += priv->read_reg(priv,
758 C_CAN_IFACE(MSGCTRL_REG, 0)) 768 C_CAN_IFACE(MSGCTRL_REG, 0))
759 & IF_MCONT_DLC_MASK; 769 & IF_MCONT_DLC_MASK;
@@ -872,7 +882,7 @@ static int c_can_handle_state_change(struct net_device *dev,
872 if (unlikely(!skb)) 882 if (unlikely(!skb))
873 return 0; 883 return 0;
874 884
875 c_can_get_berr_counter(dev, &bec); 885 __c_can_get_berr_counter(dev, &bec);
876 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); 886 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
877 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >> 887 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
878 ERR_CNT_RP_SHIFT; 888 ERR_CNT_RP_SHIFT;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index ae08cf129ebb..aaed97bee471 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1020,13 +1020,13 @@ static int flexcan_probe(struct platform_device *pdev)
1020 dev_err(&pdev->dev, "no ipg clock defined\n"); 1020 dev_err(&pdev->dev, "no ipg clock defined\n");
1021 return PTR_ERR(clk_ipg); 1021 return PTR_ERR(clk_ipg);
1022 } 1022 }
1023 clock_freq = clk_get_rate(clk_ipg);
1024 1023
1025 clk_per = devm_clk_get(&pdev->dev, "per"); 1024 clk_per = devm_clk_get(&pdev->dev, "per");
1026 if (IS_ERR(clk_per)) { 1025 if (IS_ERR(clk_per)) {
1027 dev_err(&pdev->dev, "no per clock defined\n"); 1026 dev_err(&pdev->dev, "no per clock defined\n");
1028 return PTR_ERR(clk_per); 1027 return PTR_ERR(clk_per);
1029 } 1028 }
1029 clock_freq = clk_get_rate(clk_per);
1030 } 1030 }
1031 1031
1032 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1032 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 7164a999f50f..f17c3018b7c7 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -494,20 +494,20 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
494 uint8_t isrc, status; 494 uint8_t isrc, status;
495 int n = 0; 495 int n = 0;
496 496
497 /* Shared interrupts and IRQ off? */
498 if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF)
499 return IRQ_NONE;
500
501 if (priv->pre_irq) 497 if (priv->pre_irq)
502 priv->pre_irq(priv); 498 priv->pre_irq(priv);
503 499
500 /* Shared interrupts and IRQ off? */
501 if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF)
502 goto out;
503
504 while ((isrc = priv->read_reg(priv, SJA1000_IR)) && 504 while ((isrc = priv->read_reg(priv, SJA1000_IR)) &&
505 (n < SJA1000_MAX_IRQ)) { 505 (n < SJA1000_MAX_IRQ)) {
506 n++; 506
507 status = priv->read_reg(priv, SJA1000_SR); 507 status = priv->read_reg(priv, SJA1000_SR);
508 /* check for absent controller due to hw unplug */ 508 /* check for absent controller due to hw unplug */
509 if (status == 0xFF && sja1000_is_absent(priv)) 509 if (status == 0xFF && sja1000_is_absent(priv))
510 return IRQ_NONE; 510 goto out;
511 511
512 if (isrc & IRQ_WUI) 512 if (isrc & IRQ_WUI)
513 netdev_warn(dev, "wakeup interrupt\n"); 513 netdev_warn(dev, "wakeup interrupt\n");
@@ -535,7 +535,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
535 status = priv->read_reg(priv, SJA1000_SR); 535 status = priv->read_reg(priv, SJA1000_SR);
536 /* check for absent controller */ 536 /* check for absent controller */
537 if (status == 0xFF && sja1000_is_absent(priv)) 537 if (status == 0xFF && sja1000_is_absent(priv))
538 return IRQ_NONE; 538 goto out;
539 } 539 }
540 } 540 }
541 if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) { 541 if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) {
@@ -543,8 +543,9 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
543 if (sja1000_err(dev, isrc, status)) 543 if (sja1000_err(dev, isrc, status))
544 break; 544 break;
545 } 545 }
546 n++;
546 } 547 }
547 548out:
548 if (priv->post_irq) 549 if (priv->post_irq)
549 priv->post_irq(priv); 550 priv->post_irq(priv);
550 551
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 5f9a7ad9b964..8aeec0b4601a 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -625,6 +625,7 @@ static int ems_usb_start(struct ems_usb *dev)
625 usb_unanchor_urb(urb); 625 usb_unanchor_urb(urb);
626 usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, 626 usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
627 urb->transfer_dma); 627 urb->transfer_dma);
628 usb_free_urb(urb);
628 break; 629 break;
629 } 630 }
630 631
@@ -798,8 +799,8 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
798 * allowed (MAX_TX_URBS). 799 * allowed (MAX_TX_URBS).
799 */ 800 */
800 if (!context) { 801 if (!context) {
801 usb_unanchor_urb(urb);
802 usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); 802 usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
803 usb_free_urb(urb);
803 804
804 netdev_warn(netdev, "couldn't find free context\n"); 805 netdev_warn(netdev, "couldn't find free context\n");
805 806
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 8ee9d1556e6e..263dd921edc4 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -927,6 +927,9 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
927 /* set LED in default state (end of init phase) */ 927 /* set LED in default state (end of init phase) */
928 pcan_usb_pro_set_led(dev, 0, 1); 928 pcan_usb_pro_set_led(dev, 0, 1);
929 929
930 kfree(bi);
931 kfree(fi);
932
930 return 0; 933 return 0;
931 934
932 err_out: 935 err_out:
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 50b853a79d77..46dfb1378c17 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -717,8 +717,7 @@ static int emac_open(struct net_device *dev)
717 if (netif_msg_ifup(db)) 717 if (netif_msg_ifup(db))
718 dev_dbg(db->dev, "enabling %s\n", dev->name); 718 dev_dbg(db->dev, "enabling %s\n", dev->name);
719 719
720 if (devm_request_irq(db->dev, dev->irq, &emac_interrupt, 720 if (request_irq(dev->irq, &emac_interrupt, 0, dev->name, dev))
721 0, dev->name, dev))
722 return -EAGAIN; 721 return -EAGAIN;
723 722
724 /* Initialize EMAC board */ 723 /* Initialize EMAC board */
@@ -774,6 +773,8 @@ static int emac_stop(struct net_device *ndev)
774 773
775 emac_shutdown(ndev); 774 emac_shutdown(ndev);
776 775
776 free_irq(ndev->irq, ndev);
777
777 return 0; 778 return 0;
778} 779}
779 780
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index b2ffad1304d2..248baf6273fb 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -565,6 +565,8 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
565 /* Make sure pointer to data buffer is set */ 565 /* Make sure pointer to data buffer is set */
566 wmb(); 566 wmb();
567 567
568 skb_tx_timestamp(skb);
569
568 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); 570 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
569 571
570 /* Increment index to point to the next BD */ 572 /* Increment index to point to the next BD */
@@ -579,8 +581,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
579 581
580 arc_reg_set(priv, R_STATUS, TXPL_MASK); 582 arc_reg_set(priv, R_STATUS, TXPL_MASK);
581 583
582 skb_tx_timestamp(skb);
583
584 return NETDEV_TX_OK; 584 return NETDEV_TX_OK;
585} 585}
586 586
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index a36a760ada28..29801750f239 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -145,9 +145,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
145 * Mask some pcie error bits 145 * Mask some pcie error bits
146 */ 146 */
147 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 147 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
148 pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); 148 if (pos) {
149 data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); 149 pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
150 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); 150 data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
151 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
152 }
151 /* clear error status */ 153 /* clear error status */
152 pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, 154 pcie_capability_write_word(pdev, PCI_EXP_DEVSTA,
153 PCI_EXP_DEVSTA_NFED | 155 PCI_EXP_DEVSTA_NFED |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index a1f66e2c9a86..ec6119089b82 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -520,10 +520,12 @@ struct bnx2x_fastpath {
520#define BNX2X_FP_STATE_IDLE 0 520#define BNX2X_FP_STATE_IDLE 0
521#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ 521#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
522#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ 522#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
523#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */ 523#define BNX2X_FP_STATE_DISABLED (1 << 2)
524#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */ 524#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
525#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
526#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
525#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) 527#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
526#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) 528#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
527#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) 529#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
528 /* protect state */ 530 /* protect state */
529 spinlock_t lock; 531 spinlock_t lock;
@@ -613,7 +615,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
613{ 615{
614 bool rc = true; 616 bool rc = true;
615 617
616 spin_lock(&fp->lock); 618 spin_lock_bh(&fp->lock);
617 if (fp->state & BNX2X_FP_LOCKED) { 619 if (fp->state & BNX2X_FP_LOCKED) {
618 WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); 620 WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
619 fp->state |= BNX2X_FP_STATE_NAPI_YIELD; 621 fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
@@ -622,7 +624,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
622 /* we don't care if someone yielded */ 624 /* we don't care if someone yielded */
623 fp->state = BNX2X_FP_STATE_NAPI; 625 fp->state = BNX2X_FP_STATE_NAPI;
624 } 626 }
625 spin_unlock(&fp->lock); 627 spin_unlock_bh(&fp->lock);
626 return rc; 628 return rc;
627} 629}
628 630
@@ -631,14 +633,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
631{ 633{
632 bool rc = false; 634 bool rc = false;
633 635
634 spin_lock(&fp->lock); 636 spin_lock_bh(&fp->lock);
635 WARN_ON(fp->state & 637 WARN_ON(fp->state &
636 (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); 638 (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
637 639
638 if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 640 if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
639 rc = true; 641 rc = true;
640 fp->state = BNX2X_FP_STATE_IDLE; 642
641 spin_unlock(&fp->lock); 643 /* state ==> idle, unless currently disabled */
644 fp->state &= BNX2X_FP_STATE_DISABLED;
645 spin_unlock_bh(&fp->lock);
642 return rc; 646 return rc;
643} 647}
644 648
@@ -669,7 +673,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
669 673
670 if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 674 if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
671 rc = true; 675 rc = true;
672 fp->state = BNX2X_FP_STATE_IDLE; 676
677 /* state ==> idle, unless currently disabled */
678 fp->state &= BNX2X_FP_STATE_DISABLED;
673 spin_unlock_bh(&fp->lock); 679 spin_unlock_bh(&fp->lock);
674 return rc; 680 return rc;
675} 681}
@@ -677,9 +683,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
677/* true if a socket is polling, even if it did not get the lock */ 683/* true if a socket is polling, even if it did not get the lock */
678static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) 684static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
679{ 685{
680 WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); 686 WARN_ON(!(fp->state & BNX2X_FP_OWNED));
681 return fp->state & BNX2X_FP_USER_PEND; 687 return fp->state & BNX2X_FP_USER_PEND;
682} 688}
689
690/* false if fp is currently owned */
691static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
692{
693 int rc = true;
694
695 spin_lock_bh(&fp->lock);
696 if (fp->state & BNX2X_FP_OWNED)
697 rc = false;
698 fp->state |= BNX2X_FP_STATE_DISABLED;
699 spin_unlock_bh(&fp->lock);
700
701 return rc;
702}
683#else 703#else
684static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) 704static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
685{ 705{
@@ -709,6 +729,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
709{ 729{
710 return false; 730 return false;
711} 731}
732static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
733{
734 return true;
735}
712#endif /* CONFIG_NET_RX_BUSY_POLL */ 736#endif /* CONFIG_NET_RX_BUSY_POLL */
713 737
714/* Use 2500 as a mini-jumbo MTU for FCoE */ 738/* Use 2500 as a mini-jumbo MTU for FCoE */
@@ -1250,7 +1274,10 @@ struct bnx2x_slowpath {
1250 * Therefore, if they would have been defined in the same union, 1274 * Therefore, if they would have been defined in the same union,
1251 * data can get corrupted. 1275 * data can get corrupted.
1252 */ 1276 */
1253 struct afex_vif_list_ramrod_data func_afex_rdata; 1277 union {
1278 struct afex_vif_list_ramrod_data viflist_data;
1279 struct function_update_data func_update;
1280 } func_afex_rdata;
1254 1281
1255 /* used by dmae command executer */ 1282 /* used by dmae command executer */
1256 struct dmae_command dmae[MAX_DMAE_C]; 1283 struct dmae_command dmae[MAX_DMAE_C];
@@ -2499,4 +2526,6 @@ void bnx2x_set_local_cmng(struct bnx2x *bp);
2499#define MCPR_SCRATCH_BASE(bp) \ 2526#define MCPR_SCRATCH_BASE(bp) \
2500 (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) 2527 (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
2501 2528
2529#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
2530
2502#endif /* bnx2x.h */ 2531#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ec96130533cc..bf811565ee24 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -160,6 +160,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
160 struct sk_buff *skb = tx_buf->skb; 160 struct sk_buff *skb = tx_buf->skb;
161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; 161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
162 int nbd; 162 int nbd;
163 u16 split_bd_len = 0;
163 164
164 /* prefetch skb end pointer to speedup dev_kfree_skb() */ 165 /* prefetch skb end pointer to speedup dev_kfree_skb() */
165 prefetch(&skb->end); 166 prefetch(&skb->end);
@@ -167,10 +168,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
167 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", 168 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
168 txdata->txq_index, idx, tx_buf, skb); 169 txdata->txq_index, idx, tx_buf, skb);
169 170
170 /* unmap first bd */
171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; 171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
172 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
173 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
174 172
175 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 173 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
176#ifdef BNX2X_STOP_ON_ERROR 174#ifdef BNX2X_STOP_ON_ERROR
@@ -188,12 +186,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
188 --nbd; 186 --nbd;
189 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 187 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
190 188
191 /* ...and the TSO split header bd since they have no mapping */ 189 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
192 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { 190 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
191 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
192 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
193 --nbd; 193 --nbd;
194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
195 } 195 }
196 196
197 /* unmap first bd */
198 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
199 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
200 DMA_TO_DEVICE);
201
197 /* now free frags */ 202 /* now free frags */
198 while (nbd > 0) { 203 while (nbd > 0) {
199 204
@@ -1790,26 +1795,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1790{ 1795{
1791 int i; 1796 int i;
1792 1797
1793 local_bh_disable();
1794 for_each_rx_queue_cnic(bp, i) { 1798 for_each_rx_queue_cnic(bp, i) {
1795 napi_disable(&bnx2x_fp(bp, i, napi)); 1799 napi_disable(&bnx2x_fp(bp, i, napi));
1796 while (!bnx2x_fp_lock_napi(&bp->fp[i])) 1800 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1797 mdelay(1); 1801 usleep_range(1000, 2000);
1798 } 1802 }
1799 local_bh_enable();
1800} 1803}
1801 1804
1802static void bnx2x_napi_disable(struct bnx2x *bp) 1805static void bnx2x_napi_disable(struct bnx2x *bp)
1803{ 1806{
1804 int i; 1807 int i;
1805 1808
1806 local_bh_disable();
1807 for_each_eth_queue(bp, i) { 1809 for_each_eth_queue(bp, i) {
1808 napi_disable(&bnx2x_fp(bp, i, napi)); 1810 napi_disable(&bnx2x_fp(bp, i, napi));
1809 while (!bnx2x_fp_lock_napi(&bp->fp[i])) 1811 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1810 mdelay(1); 1812 usleep_range(1000, 2000);
1811 } 1813 }
1812 local_bh_enable();
1813} 1814}
1814 1815
1815void bnx2x_netif_start(struct bnx2x *bp) 1816void bnx2x_netif_start(struct bnx2x *bp)
@@ -1832,7 +1833,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1832 bnx2x_napi_disable_cnic(bp); 1833 bnx2x_napi_disable_cnic(bp);
1833} 1834}
1834 1835
1835u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) 1836u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1837 void *accel_priv)
1836{ 1838{
1837 struct bnx2x *bp = netdev_priv(dev); 1839 struct bnx2x *bp = netdev_priv(dev);
1838 1840
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index da8fcaa74495..41f3ca5ad972 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -524,7 +524,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
524int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); 524int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
525 525
526/* select_queue callback */ 526/* select_queue callback */
527u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); 527u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
528 void *accel_priv);
528 529
529static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 530static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
530 struct bnx2x_fastpath *fp, 531 struct bnx2x_fastpath *fp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 20dcc02431ca..11fc79585491 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -3865,6 +3865,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3865 3865
3866 bnx2x_warpcore_enable_AN_KR2(phy, params, vars); 3866 bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
3867 } else { 3867 } else {
3868 /* Enable Auto-Detect to support 1G over CL37 as well */
3869 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3870 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
3871
3872 /* Force cl48 sync_status LOW to avoid getting stuck in CL73
3873 * parallel-detect loop when CL73 and CL37 are enabled.
3874 */
3875 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
3876 MDIO_AER_BLOCK_AER_REG, 0);
3877 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3878 MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800);
3879 bnx2x_set_aer_mmd(params, phy);
3880
3868 bnx2x_disable_kr2(params, vars, phy); 3881 bnx2x_disable_kr2(params, vars, phy);
3869 } 3882 }
3870 3883
@@ -8120,17 +8133,20 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8120 *edc_mode = EDC_MODE_ACTIVE_DAC; 8133 *edc_mode = EDC_MODE_ACTIVE_DAC;
8121 else 8134 else
8122 check_limiting_mode = 1; 8135 check_limiting_mode = 1;
8123 } else if (copper_module_type & 8136 } else {
8124 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { 8137 *edc_mode = EDC_MODE_PASSIVE_DAC;
8138 /* Even in case PASSIVE_DAC indication is not set,
8139 * treat it as a passive DAC cable, since some cables
8140 * don't have this indication.
8141 */
8142 if (copper_module_type &
8143 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
8125 DP(NETIF_MSG_LINK, 8144 DP(NETIF_MSG_LINK,
8126 "Passive Copper cable detected\n"); 8145 "Passive Copper cable detected\n");
8127 *edc_mode = 8146 } else {
8128 EDC_MODE_PASSIVE_DAC; 8147 DP(NETIF_MSG_LINK,
8129 } else { 8148 "Unknown copper-cable-type\n");
8130 DP(NETIF_MSG_LINK, 8149 }
8131 "Unknown copper-cable-type 0x%x !!!\n",
8132 copper_module_type);
8133 return -EINVAL;
8134 } 8150 }
8135 break; 8151 break;
8136 } 8152 }
@@ -10825,9 +10841,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10825 (1<<11)); 10841 (1<<11));
10826 10842
10827 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 10843 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
10828 (phy->speed_cap_mask & 10844 (phy->speed_cap_mask &
10829 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || 10845 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
10830 (phy->req_line_speed == SPEED_1000)) { 10846 (phy->req_line_speed == SPEED_1000)) {
10831 an_1000_val |= (1<<8); 10847 an_1000_val |= (1<<8);
10832 autoneg_val |= (1<<9 | 1<<12); 10848 autoneg_val |= (1<<9 | 1<<12);
10833 if (phy->req_duplex == DUPLEX_FULL) 10849 if (phy->req_duplex == DUPLEX_FULL)
@@ -10843,30 +10859,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10843 0x09, 10859 0x09,
10844 &an_1000_val); 10860 &an_1000_val);
10845 10861
10846 /* Set 100 speed advertisement */ 10862 /* Advertise 10/100 link speed */
10847 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 10863 if (phy->req_line_speed == SPEED_AUTO_NEG) {
10848 (phy->speed_cap_mask & 10864 if (phy->speed_cap_mask &
10849 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | 10865 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
10850 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { 10866 an_10_100_val |= (1<<5);
10851 an_10_100_val |= (1<<7); 10867 autoneg_val |= (1<<9 | 1<<12);
10852 /* Enable autoneg and restart autoneg for legacy speeds */ 10868 DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
10853 autoneg_val |= (1<<9 | 1<<12); 10869 }
10854 10870 if (phy->speed_cap_mask &
10855 if (phy->req_duplex == DUPLEX_FULL) 10871 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
10856 an_10_100_val |= (1<<8);
10857 DP(NETIF_MSG_LINK, "Advertising 100M\n");
10858 }
10859
10860 /* Set 10 speed advertisement */
10861 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
10862 (phy->speed_cap_mask &
10863 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
10864 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
10865 an_10_100_val |= (1<<5);
10866 autoneg_val |= (1<<9 | 1<<12);
10867 if (phy->req_duplex == DUPLEX_FULL)
10868 an_10_100_val |= (1<<6); 10872 an_10_100_val |= (1<<6);
10869 DP(NETIF_MSG_LINK, "Advertising 10M\n"); 10873 autoneg_val |= (1<<9 | 1<<12);
10874 DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
10875 }
10876 if (phy->speed_cap_mask &
10877 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
10878 an_10_100_val |= (1<<7);
10879 autoneg_val |= (1<<9 | 1<<12);
10880 DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
10881 }
10882 if (phy->speed_cap_mask &
10883 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
10884 an_10_100_val |= (1<<8);
10885 autoneg_val |= (1<<9 | 1<<12);
10886 DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
10887 }
10870 } 10888 }
10871 10889
10872 /* Only 10/100 are allowed to work in FORCE mode */ 10890 /* Only 10/100 are allowed to work in FORCE mode */
@@ -13342,6 +13360,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
13342 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, 13360 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
13343 old_status, status); 13361 old_status, status);
13344 13362
13363 /* Do not touch the link in case physical link down */
13364 if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
13365 return 1;
13366
13345 /* a. Update shmem->link_status accordingly 13367 /* a. Update shmem->link_status accordingly
13346 * b. Update link_vars->link_up 13368 * b. Update link_vars->link_up
13347 */ 13369 */
@@ -13550,7 +13572,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13550 */ 13572 */
13551 not_kr2_device = (((base_page & 0x8000) == 0) || 13573 not_kr2_device = (((base_page & 0x8000) == 0) ||
13552 (((base_page & 0x8000) && 13574 (((base_page & 0x8000) &&
13553 ((next_page & 0xe0) == 0x2)))); 13575 ((next_page & 0xe0) == 0x20))));
13554 13576
13555 /* In case KR2 is already disabled, check if we need to re-enable it */ 13577 /* In case KR2 is already disabled, check if we need to re-enable it */
13556 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { 13578 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 814d0eca9b33..8b3107b2fcc1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -11447,9 +11447,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
11447 } 11447 }
11448 } 11448 }
11449 11449
11450 /* adjust igu_sb_cnt to MF for E1x */ 11450 /* adjust igu_sb_cnt to MF for E1H */
11451 if (CHIP_IS_E1x(bp) && IS_MF(bp)) 11451 if (CHIP_IS_E1H(bp) && IS_MF(bp))
11452 bp->igu_sb_cnt /= E1HVN_MAX; 11452 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
11453 11453
11454 /* port info */ 11454 /* port info */
11455 bnx2x_get_port_hwinfo(bp); 11455 bnx2x_get_port_hwinfo(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 3efbb35267c8..14ffb6e56e59 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -7179,6 +7179,7 @@ Theotherbitsarereservedandshouldbezero*/
7179#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca 7179#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
7180#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da 7180#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
7181#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea 7181#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
7182#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa
7182#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 7183#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
7183#define MDIO_WC_REG_XGXS_STATUS3 0x8129 7184#define MDIO_WC_REG_XGXS_STATUS3 0x8129
7184#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 7185#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 32c92abf5094..18438a504d57 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2038,6 +2038,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2038 struct bnx2x_vlan_mac_ramrod_params p; 2038 struct bnx2x_vlan_mac_ramrod_params p;
2039 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 2039 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
2040 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; 2040 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
2041 unsigned long flags;
2041 int read_lock; 2042 int read_lock;
2042 int rc = 0; 2043 int rc = 0;
2043 2044
@@ -2046,8 +2047,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2046 spin_lock_bh(&exeq->lock); 2047 spin_lock_bh(&exeq->lock);
2047 2048
2048 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { 2049 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
2049 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == 2050 flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
2050 *vlan_mac_flags) { 2051 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
2052 BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2051 rc = exeq->remove(bp, exeq->owner, exeq_pos); 2053 rc = exeq->remove(bp, exeq->owner, exeq_pos);
2052 if (rc) { 2054 if (rc) {
2053 BNX2X_ERR("Failed to remove command\n"); 2055 BNX2X_ERR("Failed to remove command\n");
@@ -2080,7 +2082,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2080 return read_lock; 2082 return read_lock;
2081 2083
2082 list_for_each_entry(pos, &o->head, link) { 2084 list_for_each_entry(pos, &o->head, link) {
2083 if (pos->vlan_mac_flags == *vlan_mac_flags) { 2085 flags = pos->vlan_mac_flags;
2086 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
2087 BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2084 p.user_req.vlan_mac_flags = pos->vlan_mac_flags; 2088 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2085 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); 2089 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
2086 rc = bnx2x_config_vlan_mac(bp, &p); 2090 rc = bnx2x_config_vlan_mac(bp, &p);
@@ -4382,8 +4386,11 @@ int bnx2x_config_rss(struct bnx2x *bp,
4382 struct bnx2x_raw_obj *r = &o->raw; 4386 struct bnx2x_raw_obj *r = &o->raw;
4383 4387
4384 /* Do nothing if only driver cleanup was requested */ 4388 /* Do nothing if only driver cleanup was requested */
4385 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) 4389 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
4390 DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
4391 p->ramrod_flags);
4386 return 0; 4392 return 0;
4393 }
4387 4394
4388 r->set_pending(r); 4395 r->set_pending(r);
4389 4396
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 658f4e33abf9..6a53c15c85a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -266,6 +266,13 @@ enum {
266 BNX2X_DONT_CONSUME_CAM_CREDIT, 266 BNX2X_DONT_CONSUME_CAM_CREDIT,
267 BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, 267 BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
268}; 268};
269/* When looking for matching filters, some flags are not interesting */
270#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
271 1 << BNX2X_ETH_MAC | \
272 1 << BNX2X_ISCSI_ETH_MAC | \
273 1 << BNX2X_NETQ_ETH_MAC)
274#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
275 ((flags) & BNX2X_VLAN_MAC_CMP_MASK)
269 276
270struct bnx2x_vlan_mac_ramrod_params { 277struct bnx2x_vlan_mac_ramrod_params {
271 /* Object to run the command from */ 278 /* Object to run the command from */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 0216d592d0ce..e7845e5be1c7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1209,6 +1209,11 @@ static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
1209 /* next state */ 1209 /* next state */
1210 vfop->state = BNX2X_VFOP_RXMODE_DONE; 1210 vfop->state = BNX2X_VFOP_RXMODE_DONE;
1211 1211
1212 /* record the accept flags in vfdb so hypervisor can modify them
1213 * if necessary
1214 */
1215 bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
1216 ramrod->rx_accept_flags;
1212 vfop->rc = bnx2x_config_rx_mode(bp, ramrod); 1217 vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
1213 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1218 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1214op_err: 1219op_err:
@@ -1224,39 +1229,43 @@ op_pending:
1224 return; 1229 return;
1225} 1230}
1226 1231
1232static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
1233 struct bnx2x_rx_mode_ramrod_params *ramrod,
1234 struct bnx2x_virtf *vf,
1235 unsigned long accept_flags)
1236{
1237 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1238
1239 memset(ramrod, 0, sizeof(*ramrod));
1240 ramrod->cid = vfq->cid;
1241 ramrod->cl_id = vfq_cl_id(vf, vfq);
1242 ramrod->rx_mode_obj = &bp->rx_mode_obj;
1243 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1244 ramrod->rx_accept_flags = accept_flags;
1245 ramrod->tx_accept_flags = accept_flags;
1246 ramrod->pstate = &vf->filter_state;
1247 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1248
1249 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1250 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1251 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1252
1253 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1254 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1255}
1256
1227int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 1257int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
1228 struct bnx2x_virtf *vf, 1258 struct bnx2x_virtf *vf,
1229 struct bnx2x_vfop_cmd *cmd, 1259 struct bnx2x_vfop_cmd *cmd,
1230 int qid, unsigned long accept_flags) 1260 int qid, unsigned long accept_flags)
1231{ 1261{
1232 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1233 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1262 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1234 1263
1235 if (vfop) { 1264 if (vfop) {
1236 struct bnx2x_rx_mode_ramrod_params *ramrod = 1265 struct bnx2x_rx_mode_ramrod_params *ramrod =
1237 &vf->op_params.rx_mode; 1266 &vf->op_params.rx_mode;
1238 1267
1239 memset(ramrod, 0, sizeof(*ramrod)); 1268 bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
1240
1241 /* Prepare ramrod parameters */
1242 ramrod->cid = vfq->cid;
1243 ramrod->cl_id = vfq_cl_id(vf, vfq);
1244 ramrod->rx_mode_obj = &bp->rx_mode_obj;
1245 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1246
1247 ramrod->rx_accept_flags = accept_flags;
1248 ramrod->tx_accept_flags = accept_flags;
1249 ramrod->pstate = &vf->filter_state;
1250 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1251
1252 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1253 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1254 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1255
1256 ramrod->rdata =
1257 bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1258 ramrod->rdata_mapping =
1259 bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1260 1269
1261 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, 1270 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
1262 bnx2x_vfop_rxmode, cmd->done); 1271 bnx2x_vfop_rxmode, cmd->done);
@@ -3114,6 +3123,11 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
3114{ 3123{
3115 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 3124 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
3116 3125
3126 if (!IS_SRIOV(bp)) {
3127 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
3128 return -EINVAL;
3129 }
3130
3117 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 3131 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
3118 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3132 num_vfs_param, BNX2X_NR_VIRTFN(bp));
3119 3133
@@ -3197,13 +3211,16 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
3197 bnx2x_iov_static_resc(bp, vf); 3211 bnx2x_iov_static_resc(bp, vf);
3198 } 3212 }
3199 3213
3200 /* prepare msix vectors in VF configuration space */ 3214 /* prepare msix vectors in VF configuration space - the value in the
3215 * PCI configuration space should be the index of the last entry,
3216 * namely one less than the actual size of the table
3217 */
3201 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 3218 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3202 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 3219 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
3203 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 3220 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
3204 num_vf_queues); 3221 num_vf_queues - 1);
3205 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", 3222 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
3206 vf_idx, num_vf_queues); 3223 vf_idx, num_vf_queues - 1);
3207 } 3224 }
3208 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 3225 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
3209 3226
@@ -3431,10 +3448,18 @@ out:
3431 3448
3432int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 3449int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3433{ 3450{
3451 struct bnx2x_queue_state_params q_params = {NULL};
3452 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3453 struct bnx2x_queue_update_params *update_params;
3454 struct pf_vf_bulletin_content *bulletin = NULL;
3455 struct bnx2x_rx_mode_ramrod_params rx_ramrod;
3434 struct bnx2x *bp = netdev_priv(dev); 3456 struct bnx2x *bp = netdev_priv(dev);
3435 int rc, q_logical_state; 3457 struct bnx2x_vlan_mac_obj *vlan_obj;
3458 unsigned long vlan_mac_flags = 0;
3459 unsigned long ramrod_flags = 0;
3436 struct bnx2x_virtf *vf = NULL; 3460 struct bnx2x_virtf *vf = NULL;
3437 struct pf_vf_bulletin_content *bulletin = NULL; 3461 unsigned long accept_flags;
3462 int rc;
3438 3463
3439 /* sanity and init */ 3464 /* sanity and init */
3440 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3465 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
@@ -3452,104 +3477,118 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3452 /* update PF's copy of the VF's bulletin. No point in posting the vlan 3477 /* update PF's copy of the VF's bulletin. No point in posting the vlan
3453 * to the VF since it doesn't have anything to do with it. But it useful 3478 * to the VF since it doesn't have anything to do with it. But it useful
3454 * to store it here in case the VF is not up yet and we can only 3479 * to store it here in case the VF is not up yet and we can only
3455 * configure the vlan later when it does. 3480 * configure the vlan later when it does. Treat vlan id 0 as remove the
3481 * Host tag.
3456 */ 3482 */
3457 bulletin->valid_bitmap |= 1 << VLAN_VALID; 3483 if (vlan > 0)
3484 bulletin->valid_bitmap |= 1 << VLAN_VALID;
3485 else
3486 bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
3458 bulletin->vlan = vlan; 3487 bulletin->vlan = vlan;
3459 3488
3460 /* is vf initialized and queue set up? */ 3489 /* is vf initialized and queue set up? */
3461 q_logical_state = 3490 if (vf->state != VF_ENABLED ||
3462 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 3491 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
3463 if (vf->state == VF_ENABLED && 3492 BNX2X_Q_LOGICAL_STATE_ACTIVE)
3464 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3493 return rc;
3465 /* configure the vlan in device on this vf's queue */
3466 unsigned long ramrod_flags = 0;
3467 unsigned long vlan_mac_flags = 0;
3468 struct bnx2x_vlan_mac_obj *vlan_obj =
3469 &bnx2x_leading_vfq(vf, vlan_obj);
3470 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3471 struct bnx2x_queue_state_params q_params = {NULL};
3472 struct bnx2x_queue_update_params *update_params;
3473 3494
3474 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); 3495 /* configure the vlan in device on this vf's queue */
3475 if (rc) 3496 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
3476 return rc; 3497 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
3477 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3498 if (rc)
3499 return rc;
3478 3500
3479 /* must lock vfpf channel to protect against vf flows */ 3501 /* must lock vfpf channel to protect against vf flows */
3480 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3502 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3481 3503
3482 /* remove existing vlans */ 3504 /* remove existing vlans */
3483 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3505 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3484 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 3506 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
3485 &ramrod_flags); 3507 &ramrod_flags);
3486 if (rc) { 3508 if (rc) {
3487 BNX2X_ERR("failed to delete vlans\n"); 3509 BNX2X_ERR("failed to delete vlans\n");
3488 rc = -EINVAL; 3510 rc = -EINVAL;
3489 goto out; 3511 goto out;
3490 } 3512 }
3513
3514 /* need to remove/add the VF's accept_any_vlan bit */
3515 accept_flags = bnx2x_leading_vfq(vf, accept_flags);
3516 if (vlan)
3517 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
3518 else
3519 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
3520
3521 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
3522 accept_flags);
3523 bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
3524 bnx2x_config_rx_mode(bp, &rx_ramrod);
3525
3526 /* configure the new vlan to device */
3527 memset(&ramrod_param, 0, sizeof(ramrod_param));
3528 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3529 ramrod_param.vlan_mac_obj = vlan_obj;
3530 ramrod_param.ramrod_flags = ramrod_flags;
3531 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
3532 &ramrod_param.user_req.vlan_mac_flags);
3533 ramrod_param.user_req.u.vlan.vlan = vlan;
3534 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3535 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3536 if (rc) {
3537 BNX2X_ERR("failed to configure vlan\n");
3538 rc = -EINVAL;
3539 goto out;
3540 }
3491 3541
3492 /* send queue update ramrod to configure default vlan and silent 3542 /* send queue update ramrod to configure default vlan and silent
3493 * vlan removal 3543 * vlan removal
3544 */
3545 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3546 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3547 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
3548 update_params = &q_params.params.update;
3549 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3550 &update_params->update_flags);
3551 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
3552 &update_params->update_flags);
3553 if (vlan == 0) {
3554 /* if vlan is 0 then we want to leave the VF traffic
3555 * untagged, and leave the incoming traffic untouched
3556 * (i.e. do not remove any vlan tags).
3557 */
3558 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3559 &update_params->update_flags);
3560 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3561 &update_params->update_flags);
3562 } else {
3563 /* configure default vlan to vf queue and set silent
3564 * vlan removal (the vf remains unaware of this vlan).
3494 */ 3565 */
3495 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3566 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3496 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3497 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
3498 update_params = &q_params.params.update;
3499 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3500 &update_params->update_flags); 3567 &update_params->update_flags);
3501 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 3568 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3502 &update_params->update_flags); 3569 &update_params->update_flags);
3570 update_params->def_vlan = vlan;
3571 update_params->silent_removal_value =
3572 vlan & VLAN_VID_MASK;
3573 update_params->silent_removal_mask = VLAN_VID_MASK;
3574 }
3503 3575
3504 if (vlan == 0) { 3576 /* Update the Queue state */
3505 /* if vlan is 0 then we want to leave the VF traffic 3577 rc = bnx2x_queue_state_change(bp, &q_params);
3506 * untagged, and leave the incoming traffic untouched 3578 if (rc) {
3507 * (i.e. do not remove any vlan tags). 3579 BNX2X_ERR("Failed to configure default VLAN\n");
3508 */ 3580 goto out;
3509 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3581 }
3510 &update_params->update_flags);
3511 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3512 &update_params->update_flags);
3513 } else {
3514 /* configure the new vlan to device */
3515 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3516 ramrod_param.vlan_mac_obj = vlan_obj;
3517 ramrod_param.ramrod_flags = ramrod_flags;
3518 ramrod_param.user_req.u.vlan.vlan = vlan;
3519 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3520 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3521 if (rc) {
3522 BNX2X_ERR("failed to configure vlan\n");
3523 rc = -EINVAL;
3524 goto out;
3525 }
3526
3527 /* configure default vlan to vf queue and set silent
3528 * vlan removal (the vf remains unaware of this vlan).
3529 */
3530 update_params = &q_params.params.update;
3531 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3532 &update_params->update_flags);
3533 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3534 &update_params->update_flags);
3535 update_params->def_vlan = vlan;
3536 }
3537 3582
3538 /* Update the Queue state */
3539 rc = bnx2x_queue_state_change(bp, &q_params);
3540 if (rc) {
3541 BNX2X_ERR("Failed to configure default VLAN\n");
3542 goto out;
3543 }
3544 3583
3545 /* clear the flag indicating that this VF needs its vlan 3584 /* clear the flag indicating that this VF needs its vlan
3546 * (will only be set if the HV configured the Vlan before vf was 3585 * (will only be set if the HV configured the Vlan before vf was
3547 * up and we were called because the VF came up later 3586 * up and we were called because the VF came up later
3548 */ 3587 */
3549out: 3588out:
3550 vf->cfg_flags &= ~VF_CFG_VLAN; 3589 vf->cfg_flags &= ~VF_CFG_VLAN;
3551 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3590 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3552 } 3591
3553 return rc; 3592 return rc;
3554} 3593}
3555 3594
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 1ff6a9366629..8c213fa52174 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -74,6 +74,7 @@ struct bnx2x_vf_queue {
74 /* VLANs object */ 74 /* VLANs object */
75 struct bnx2x_vlan_mac_obj vlan_obj; 75 struct bnx2x_vlan_mac_obj vlan_obj;
76 atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ 76 atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
77 unsigned long accept_flags; /* last accept flags configured */
77 78
78 /* Queue Slow-path State object */ 79 /* Queue Slow-path State object */
79 struct bnx2x_queue_sp_obj sp_obj; 80 struct bnx2x_queue_sp_obj sp_obj;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index efa8a151d789..0756d7dabdd5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -208,7 +208,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
208 return -EINVAL; 208 return -EINVAL;
209 } 209 }
210 210
211 BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg); 211 DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
212 212
213 *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT; 213 *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
214 214
@@ -1598,6 +1598,8 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1598 1598
1599 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { 1599 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
1600 unsigned long accept = 0; 1600 unsigned long accept = 0;
1601 struct pf_vf_bulletin_content *bulletin =
1602 BP_VF_BULLETIN(bp, vf->index);
1601 1603
1602 /* covert VF-PF if mask to bnx2x accept flags */ 1604 /* covert VF-PF if mask to bnx2x accept flags */
1603 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) 1605 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
@@ -1617,9 +1619,11 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1617 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); 1619 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
1618 1620
1619 /* A packet arriving the vf's mac should be accepted 1621 /* A packet arriving the vf's mac should be accepted
1620 * with any vlan 1622 * with any vlan, unless a vlan has already been
1623 * configured.
1621 */ 1624 */
1622 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); 1625 if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
1626 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
1623 1627
1624 /* set rx-mode */ 1628 /* set rx-mode */
1625 rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, 1629 rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
@@ -1710,6 +1714,21 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1710 goto response; 1714 goto response;
1711 } 1715 }
1712 } 1716 }
1717 /* if vlan was set by hypervisor we don't allow guest to config vlan */
1718 if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
1719 int i;
1720
1721 /* search for vlan filters */
1722 for (i = 0; i < filters->n_mac_vlan_filters; i++) {
1723 if (filters->filters[i].flags &
1724 VFPF_Q_FILTER_VLAN_TAG_VALID) {
1725 BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
1726 vf->abs_vfid);
1727 vf->op_rc = -EPERM;
1728 goto response;
1729 }
1730 }
1731 }
1713 1732
1714 /* verify vf_qid */ 1733 /* verify vf_qid */
1715 if (filters->vf_qid > vf_rxq_count(vf)) 1734 if (filters->vf_qid > vf_rxq_count(vf))
@@ -1805,6 +1824,9 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
1805 vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; 1824 vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
1806 1825
1807 /* flags handled individually for backward/forward compatability */ 1826 /* flags handled individually for backward/forward compatability */
1827 vf_op_params->rss_flags = 0;
1828 vf_op_params->ramrod_flags = 0;
1829
1808 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) 1830 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
1809 __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); 1831 __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
1810 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) 1832 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a9e068423ba0..15a66e4b1f57 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7622,7 +7622,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7622{ 7622{
7623 u32 base = (u32) mapping & 0xffffffff; 7623 u32 base = (u32) mapping & 0xffffffff;
7624 7624
7625 return (base > 0xffffdcc0) && (base + len + 8 < base); 7625 return base + len + 8 < base;
7626} 7626}
7627 7627
7628/* Test for TSO DMA buffers that cross into regions which are within MSS bytes 7628/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
@@ -8932,6 +8932,9 @@ static int tg3_chip_reset(struct tg3 *tp)
8932 void (*write_op)(struct tg3 *, u32, u32); 8932 void (*write_op)(struct tg3 *, u32, u32);
8933 int i, err; 8933 int i, err;
8934 8934
8935 if (!pci_device_is_present(tp->pdev))
8936 return -ENODEV;
8937
8935 tg3_nvram_lock(tp); 8938 tg3_nvram_lock(tp);
8936 8939
8937 tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 8940 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
@@ -10629,10 +10632,8 @@ static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10629static ssize_t tg3_show_temp(struct device *dev, 10632static ssize_t tg3_show_temp(struct device *dev,
10630 struct device_attribute *devattr, char *buf) 10633 struct device_attribute *devattr, char *buf)
10631{ 10634{
10632 struct pci_dev *pdev = to_pci_dev(dev);
10633 struct net_device *netdev = pci_get_drvdata(pdev);
10634 struct tg3 *tp = netdev_priv(netdev);
10635 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 10635 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10636 struct tg3 *tp = dev_get_drvdata(dev);
10636 u32 temperature; 10637 u32 temperature;
10637 10638
10638 spin_lock_bh(&tp->lock); 10639 spin_lock_bh(&tp->lock);
@@ -10650,29 +10651,25 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10650static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL, 10651static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10651 TG3_TEMP_MAX_OFFSET); 10652 TG3_TEMP_MAX_OFFSET);
10652 10653
10653static struct attribute *tg3_attributes[] = { 10654static struct attribute *tg3_attrs[] = {
10654 &sensor_dev_attr_temp1_input.dev_attr.attr, 10655 &sensor_dev_attr_temp1_input.dev_attr.attr,
10655 &sensor_dev_attr_temp1_crit.dev_attr.attr, 10656 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10656 &sensor_dev_attr_temp1_max.dev_attr.attr, 10657 &sensor_dev_attr_temp1_max.dev_attr.attr,
10657 NULL 10658 NULL
10658}; 10659};
10659 10660ATTRIBUTE_GROUPS(tg3);
10660static const struct attribute_group tg3_group = {
10661 .attrs = tg3_attributes,
10662};
10663 10661
10664static void tg3_hwmon_close(struct tg3 *tp) 10662static void tg3_hwmon_close(struct tg3 *tp)
10665{ 10663{
10666 if (tp->hwmon_dev) { 10664 if (tp->hwmon_dev) {
10667 hwmon_device_unregister(tp->hwmon_dev); 10665 hwmon_device_unregister(tp->hwmon_dev);
10668 tp->hwmon_dev = NULL; 10666 tp->hwmon_dev = NULL;
10669 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10670 } 10667 }
10671} 10668}
10672 10669
10673static void tg3_hwmon_open(struct tg3 *tp) 10670static void tg3_hwmon_open(struct tg3 *tp)
10674{ 10671{
10675 int i, err; 10672 int i;
10676 u32 size = 0; 10673 u32 size = 0;
10677 struct pci_dev *pdev = tp->pdev; 10674 struct pci_dev *pdev = tp->pdev;
10678 struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; 10675 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
@@ -10690,18 +10687,11 @@ static void tg3_hwmon_open(struct tg3 *tp)
10690 if (!size) 10687 if (!size)
10691 return; 10688 return;
10692 10689
10693 /* Register hwmon sysfs hooks */ 10690 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10694 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group); 10691 tp, tg3_groups);
10695 if (err) {
10696 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10697 return;
10698 }
10699
10700 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10701 if (IS_ERR(tp->hwmon_dev)) { 10692 if (IS_ERR(tp->hwmon_dev)) {
10702 tp->hwmon_dev = NULL; 10693 tp->hwmon_dev = NULL;
10703 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); 10694 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10704 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10705 } 10695 }
10706} 10696}
10707 10697
@@ -11594,10 +11584,11 @@ static int tg3_close(struct net_device *dev)
11594 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); 11584 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11595 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); 11585 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11596 11586
11597 tg3_power_down_prepare(tp); 11587 if (pci_device_is_present(tp->pdev)) {
11598 11588 tg3_power_down_prepare(tp);
11599 tg3_carrier_off(tp);
11600 11589
11590 tg3_carrier_off(tp);
11591 }
11601 return 0; 11592 return 0;
11602} 11593}
11603 11594
@@ -16512,6 +16503,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16512 /* Clear this out for sanity. */ 16503 /* Clear this out for sanity. */
16513 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 16504 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16514 16505
16506 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16507 tw32(TG3PCI_REG_BASE_ADDR, 0);
16508
16515 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16509 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16516 &pci_state_reg); 16510 &pci_state_reg);
16517 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && 16511 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
@@ -17739,10 +17733,12 @@ static int tg3_suspend(struct device *device)
17739 struct pci_dev *pdev = to_pci_dev(device); 17733 struct pci_dev *pdev = to_pci_dev(device);
17740 struct net_device *dev = pci_get_drvdata(pdev); 17734 struct net_device *dev = pci_get_drvdata(pdev);
17741 struct tg3 *tp = netdev_priv(dev); 17735 struct tg3 *tp = netdev_priv(dev);
17742 int err; 17736 int err = 0;
17737
17738 rtnl_lock();
17743 17739
17744 if (!netif_running(dev)) 17740 if (!netif_running(dev))
17745 return 0; 17741 goto unlock;
17746 17742
17747 tg3_reset_task_cancel(tp); 17743 tg3_reset_task_cancel(tp);
17748 tg3_phy_stop(tp); 17744 tg3_phy_stop(tp);
@@ -17784,6 +17780,8 @@ out:
17784 tg3_phy_start(tp); 17780 tg3_phy_start(tp);
17785 } 17781 }
17786 17782
17783unlock:
17784 rtnl_unlock();
17787 return err; 17785 return err;
17788} 17786}
17789 17787
@@ -17792,10 +17790,12 @@ static int tg3_resume(struct device *device)
17792 struct pci_dev *pdev = to_pci_dev(device); 17790 struct pci_dev *pdev = to_pci_dev(device);
17793 struct net_device *dev = pci_get_drvdata(pdev); 17791 struct net_device *dev = pci_get_drvdata(pdev);
17794 struct tg3 *tp = netdev_priv(dev); 17792 struct tg3 *tp = netdev_priv(dev);
17795 int err; 17793 int err = 0;
17794
17795 rtnl_lock();
17796 17796
17797 if (!netif_running(dev)) 17797 if (!netif_running(dev))
17798 return 0; 17798 goto unlock;
17799 17799
17800 netif_device_attach(dev); 17800 netif_device_attach(dev);
17801 17801
@@ -17819,6 +17819,8 @@ out:
17819 if (!err) 17819 if (!err)
17820 tg3_phy_start(tp); 17820 tg3_phy_start(tp);
17821 17821
17822unlock:
17823 rtnl_unlock();
17822 return err; 17824 return err;
17823} 17825}
17824#endif /* CONFIG_PM_SLEEP */ 17826#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index ecd2fb3ef695..56e0415f8cdf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -49,13 +49,15 @@
49#include <asm/io.h> 49#include <asm/io.h>
50#include "cxgb4_uld.h" 50#include "cxgb4_uld.h"
51 51
52#define FW_VERSION_MAJOR 1 52#define T4FW_VERSION_MAJOR 0x01
53#define FW_VERSION_MINOR 4 53#define T4FW_VERSION_MINOR 0x06
54#define FW_VERSION_MICRO 0 54#define T4FW_VERSION_MICRO 0x18
55#define T4FW_VERSION_BUILD 0x00
55 56
56#define FW_VERSION_MAJOR_T5 0 57#define T5FW_VERSION_MAJOR 0x01
57#define FW_VERSION_MINOR_T5 0 58#define T5FW_VERSION_MINOR 0x08
58#define FW_VERSION_MICRO_T5 0 59#define T5FW_VERSION_MICRO 0x1C
60#define T5FW_VERSION_BUILD 0x00
59 61
60#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) 62#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
61 63
@@ -226,6 +228,25 @@ struct tp_params {
226 228
227 uint32_t dack_re; /* DACK timer resolution */ 229 uint32_t dack_re; /* DACK timer resolution */
228 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ 230 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
231
232 u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
233 u32 ingress_config; /* cached TP_INGRESS_CONFIG */
234
235 /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
236 * subset of the set of fields which may be present in the Compressed
237 * Filter Tuple portion of filters and TCP TCB connections. The
238 * fields which are present are controlled by the TP_VLAN_PRI_MAP.
239 * Since a variable number of fields may or may not be present, their
240 * shifted field positions within the Compressed Filter Tuple may
241 * vary, or not even be present if the field isn't selected in
242 * TP_VLAN_PRI_MAP. Since some of these fields are needed in various
243 * places we store their offsets here, or a -1 if the field isn't
244 * present.
245 */
246 int vlan_shift;
247 int vnic_shift;
248 int port_shift;
249 int protocol_shift;
229}; 250};
230 251
231struct vpd_params { 252struct vpd_params {
@@ -240,6 +261,26 @@ struct pci_params {
240 unsigned char width; 261 unsigned char width;
241}; 262};
242 263
264#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
265#define CHELSIO_CHIP_FPGA 0x100
266#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
267#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
268
269#define CHELSIO_T4 0x4
270#define CHELSIO_T5 0x5
271
272enum chip_type {
273 T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
274 T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
275 T4_FIRST_REV = T4_A1,
276 T4_LAST_REV = T4_A2,
277
278 T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
279 T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
280 T5_FIRST_REV = T5_A0,
281 T5_LAST_REV = T5_A1,
282};
283
243struct adapter_params { 284struct adapter_params {
244 struct tp_params tp; 285 struct tp_params tp;
245 struct vpd_params vpd; 286 struct vpd_params vpd;
@@ -259,7 +300,7 @@ struct adapter_params {
259 300
260 unsigned char nports; /* # of ethernet ports */ 301 unsigned char nports; /* # of ethernet ports */
261 unsigned char portvec; 302 unsigned char portvec;
262 unsigned char rev; /* chip revision */ 303 enum chip_type chip; /* chip code */
263 unsigned char offload; 304 unsigned char offload;
264 305
265 unsigned char bypass; 306 unsigned char bypass;
@@ -267,6 +308,23 @@ struct adapter_params {
267 unsigned int ofldq_wr_cred; 308 unsigned int ofldq_wr_cred;
268}; 309};
269 310
311#include "t4fw_api.h"
312
313#define FW_VERSION(chip) ( \
314 FW_HDR_FW_VER_MAJOR_GET(chip##FW_VERSION_MAJOR) | \
315 FW_HDR_FW_VER_MINOR_GET(chip##FW_VERSION_MINOR) | \
316 FW_HDR_FW_VER_MICRO_GET(chip##FW_VERSION_MICRO) | \
317 FW_HDR_FW_VER_BUILD_GET(chip##FW_VERSION_BUILD))
318#define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
319
320struct fw_info {
321 u8 chip;
322 char *fs_name;
323 char *fw_mod_name;
324 struct fw_hdr fw_hdr;
325};
326
327
270struct trace_params { 328struct trace_params {
271 u32 data[TRACE_LEN / 4]; 329 u32 data[TRACE_LEN / 4];
272 u32 mask[TRACE_LEN / 4]; 330 u32 mask[TRACE_LEN / 4];
@@ -512,25 +570,6 @@ struct sge {
512 570
513struct l2t_data; 571struct l2t_data;
514 572
515#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
516#define CHELSIO_CHIP_VERSION(code) ((code) >> 4)
517#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
518
519#define CHELSIO_T4 0x4
520#define CHELSIO_T5 0x5
521
522enum chip_type {
523 T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0),
524 T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
525 T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
526 T4_FIRST_REV = T4_A1,
527 T4_LAST_REV = T4_A3,
528
529 T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
530 T5_FIRST_REV = T5_A1,
531 T5_LAST_REV = T5_A1,
532};
533
534#ifdef CONFIG_PCI_IOV 573#ifdef CONFIG_PCI_IOV
535 574
536/* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial 575/* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial
@@ -715,12 +754,12 @@ enum {
715 754
716static inline int is_t5(enum chip_type chip) 755static inline int is_t5(enum chip_type chip)
717{ 756{
718 return (chip >= T5_FIRST_REV && chip <= T5_LAST_REV); 757 return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5;
719} 758}
720 759
721static inline int is_t4(enum chip_type chip) 760static inline int is_t4(enum chip_type chip)
722{ 761{
723 return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV); 762 return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4;
724} 763}
725 764
726static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) 765static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
@@ -900,8 +939,14 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
900int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); 939int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
901unsigned int t4_flash_cfg_addr(struct adapter *adapter); 940unsigned int t4_flash_cfg_addr(struct adapter *adapter);
902int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size); 941int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
903int t4_check_fw_version(struct adapter *adapter); 942int t4_get_fw_version(struct adapter *adapter, u32 *vers);
943int t4_get_tp_version(struct adapter *adapter, u32 *vers);
944int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
945 const u8 *fw_data, unsigned int fw_size,
946 struct fw_hdr *card_fw, enum dev_state state, int *reset);
904int t4_prep_adapter(struct adapter *adapter); 947int t4_prep_adapter(struct adapter *adapter);
948int t4_init_tp_params(struct adapter *adap);
949int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
905int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); 950int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
906void t4_fatal_err(struct adapter *adapter); 951void t4_fatal_err(struct adapter *adapter);
907int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 952int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 8b929eeecd2d..fff02ed1295e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -276,9 +276,9 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
276 { 0, } 276 { 0, }
277}; 277};
278 278
279#define FW_FNAME "cxgb4/t4fw.bin" 279#define FW4_FNAME "cxgb4/t4fw.bin"
280#define FW5_FNAME "cxgb4/t5fw.bin" 280#define FW5_FNAME "cxgb4/t5fw.bin"
281#define FW_CFNAME "cxgb4/t4-config.txt" 281#define FW4_CFNAME "cxgb4/t4-config.txt"
282#define FW5_CFNAME "cxgb4/t5-config.txt" 282#define FW5_CFNAME "cxgb4/t5-config.txt"
283 283
284MODULE_DESCRIPTION(DRV_DESC); 284MODULE_DESCRIPTION(DRV_DESC);
@@ -286,7 +286,7 @@ MODULE_AUTHOR("Chelsio Communications");
286MODULE_LICENSE("Dual BSD/GPL"); 286MODULE_LICENSE("Dual BSD/GPL");
287MODULE_VERSION(DRV_VERSION); 287MODULE_VERSION(DRV_VERSION);
288MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); 288MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
289MODULE_FIRMWARE(FW_FNAME); 289MODULE_FIRMWARE(FW4_FNAME);
290MODULE_FIRMWARE(FW5_FNAME); 290MODULE_FIRMWARE(FW5_FNAME);
291 291
292/* 292/*
@@ -1071,72 +1071,6 @@ freeout: t4_free_sge_resources(adap);
1071} 1071}
1072 1072
1073/* 1073/*
1074 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
1075 * started but failed, and a negative errno if flash load couldn't start.
1076 */
1077static int upgrade_fw(struct adapter *adap)
1078{
1079 int ret;
1080 u32 vers, exp_major;
1081 const struct fw_hdr *hdr;
1082 const struct firmware *fw;
1083 struct device *dev = adap->pdev_dev;
1084 char *fw_file_name;
1085
1086 switch (CHELSIO_CHIP_VERSION(adap->chip)) {
1087 case CHELSIO_T4:
1088 fw_file_name = FW_FNAME;
1089 exp_major = FW_VERSION_MAJOR;
1090 break;
1091 case CHELSIO_T5:
1092 fw_file_name = FW5_FNAME;
1093 exp_major = FW_VERSION_MAJOR_T5;
1094 break;
1095 default:
1096 dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
1097 return -EINVAL;
1098 }
1099
1100 ret = request_firmware(&fw, fw_file_name, dev);
1101 if (ret < 0) {
1102 dev_err(dev, "unable to load firmware image %s, error %d\n",
1103 fw_file_name, ret);
1104 return ret;
1105 }
1106
1107 hdr = (const struct fw_hdr *)fw->data;
1108 vers = ntohl(hdr->fw_ver);
1109 if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
1110 ret = -EINVAL; /* wrong major version, won't do */
1111 goto out;
1112 }
1113
1114 /*
1115 * If the flash FW is unusable or we found something newer, load it.
1116 */
1117 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
1118 vers > adap->params.fw_vers) {
1119 dev_info(dev, "upgrading firmware ...\n");
1120 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
1121 /*force=*/false);
1122 if (!ret)
1123 dev_info(dev,
1124 "firmware upgraded to version %pI4 from %s\n",
1125 &hdr->fw_ver, fw_file_name);
1126 else
1127 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
1128 } else {
1129 /*
1130 * Tell our caller that we didn't upgrade the firmware.
1131 */
1132 ret = -EINVAL;
1133 }
1134
1135out: release_firmware(fw);
1136 return ret;
1137}
1138
1139/*
1140 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. 1074 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1141 * The allocated memory is cleared. 1075 * The allocated memory is cleared.
1142 */ 1076 */
@@ -1415,7 +1349,7 @@ static int get_sset_count(struct net_device *dev, int sset)
1415static int get_regs_len(struct net_device *dev) 1349static int get_regs_len(struct net_device *dev)
1416{ 1350{
1417 struct adapter *adap = netdev2adap(dev); 1351 struct adapter *adap = netdev2adap(dev);
1418 if (is_t4(adap->chip)) 1352 if (is_t4(adap->params.chip))
1419 return T4_REGMAP_SIZE; 1353 return T4_REGMAP_SIZE;
1420 else 1354 else
1421 return T5_REGMAP_SIZE; 1355 return T5_REGMAP_SIZE;
@@ -1499,7 +1433,7 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1499 data += sizeof(struct port_stats) / sizeof(u64); 1433 data += sizeof(struct port_stats) / sizeof(u64);
1500 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); 1434 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1501 data += sizeof(struct queue_port_stats) / sizeof(u64); 1435 data += sizeof(struct queue_port_stats) / sizeof(u64);
1502 if (!is_t4(adapter->chip)) { 1436 if (!is_t4(adapter->params.chip)) {
1503 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7)); 1437 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1504 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL); 1438 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1505 val2 = t4_read_reg(adapter, SGE_STAT_MATCH); 1439 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
@@ -1521,8 +1455,8 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1521 */ 1455 */
1522static inline unsigned int mk_adap_vers(const struct adapter *ap) 1456static inline unsigned int mk_adap_vers(const struct adapter *ap)
1523{ 1457{
1524 return CHELSIO_CHIP_VERSION(ap->chip) | 1458 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1525 (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16); 1459 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1526} 1460}
1527 1461
1528static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, 1462static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
@@ -2189,7 +2123,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
2189 static const unsigned int *reg_ranges; 2123 static const unsigned int *reg_ranges;
2190 int arr_size = 0, buf_size = 0; 2124 int arr_size = 0, buf_size = 0;
2191 2125
2192 if (is_t4(ap->chip)) { 2126 if (is_t4(ap->params.chip)) {
2193 reg_ranges = &t4_reg_ranges[0]; 2127 reg_ranges = &t4_reg_ranges[0];
2194 arr_size = ARRAY_SIZE(t4_reg_ranges); 2128 arr_size = ARRAY_SIZE(t4_reg_ranges);
2195 buf_size = T4_REGMAP_SIZE; 2129 buf_size = T4_REGMAP_SIZE;
@@ -2967,7 +2901,7 @@ static int setup_debugfs(struct adapter *adap)
2967 size = t4_read_reg(adap, MA_EDRAM1_BAR); 2901 size = t4_read_reg(adap, MA_EDRAM1_BAR);
2968 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size)); 2902 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
2969 } 2903 }
2970 if (is_t4(adap->chip)) { 2904 if (is_t4(adap->params.chip)) {
2971 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); 2905 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2972 if (i & EXT_MEM_ENABLE) 2906 if (i & EXT_MEM_ENABLE)
2973 add_debugfs_mem(adap, "mc", MEM_MC, 2907 add_debugfs_mem(adap, "mc", MEM_MC,
@@ -3052,7 +2986,14 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3052 if (stid >= 0) { 2986 if (stid >= 0) {
3053 t->stid_tab[stid].data = data; 2987 t->stid_tab[stid].data = data;
3054 stid += t->stid_base; 2988 stid += t->stid_base;
3055 t->stids_in_use++; 2989 /* IPv6 requires max of 520 bits or 16 cells in TCAM
2990 * This is equivalent to 4 TIDs. With CLIP enabled it
2991 * needs 2 TIDs.
2992 */
2993 if (family == PF_INET)
2994 t->stids_in_use++;
2995 else
2996 t->stids_in_use += 4;
3056 } 2997 }
3057 spin_unlock_bh(&t->stid_lock); 2998 spin_unlock_bh(&t->stid_lock);
3058 return stid; 2999 return stid;
@@ -3078,7 +3019,8 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3078 } 3019 }
3079 if (stid >= 0) { 3020 if (stid >= 0) {
3080 t->stid_tab[stid].data = data; 3021 t->stid_tab[stid].data = data;
3081 stid += t->stid_base; 3022 stid -= t->nstids;
3023 stid += t->sftid_base;
3082 t->stids_in_use++; 3024 t->stids_in_use++;
3083 } 3025 }
3084 spin_unlock_bh(&t->stid_lock); 3026 spin_unlock_bh(&t->stid_lock);
@@ -3090,14 +3032,24 @@ EXPORT_SYMBOL(cxgb4_alloc_sftid);
3090 */ 3032 */
3091void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) 3033void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3092{ 3034{
3093 stid -= t->stid_base; 3035 /* Is it a server filter TID? */
3036 if (t->nsftids && (stid >= t->sftid_base)) {
3037 stid -= t->sftid_base;
3038 stid += t->nstids;
3039 } else {
3040 stid -= t->stid_base;
3041 }
3042
3094 spin_lock_bh(&t->stid_lock); 3043 spin_lock_bh(&t->stid_lock);
3095 if (family == PF_INET) 3044 if (family == PF_INET)
3096 __clear_bit(stid, t->stid_bmap); 3045 __clear_bit(stid, t->stid_bmap);
3097 else 3046 else
3098 bitmap_release_region(t->stid_bmap, stid, 2); 3047 bitmap_release_region(t->stid_bmap, stid, 2);
3099 t->stid_tab[stid].data = NULL; 3048 t->stid_tab[stid].data = NULL;
3100 t->stids_in_use--; 3049 if (family == PF_INET)
3050 t->stids_in_use--;
3051 else
3052 t->stids_in_use -= 4;
3101 spin_unlock_bh(&t->stid_lock); 3053 spin_unlock_bh(&t->stid_lock);
3102} 3054}
3103EXPORT_SYMBOL(cxgb4_free_stid); 3055EXPORT_SYMBOL(cxgb4_free_stid);
@@ -3200,6 +3152,7 @@ static int tid_init(struct tid_info *t)
3200 size_t size; 3152 size_t size;
3201 unsigned int stid_bmap_size; 3153 unsigned int stid_bmap_size;
3202 unsigned int natids = t->natids; 3154 unsigned int natids = t->natids;
3155 struct adapter *adap = container_of(t, struct adapter, tids);
3203 3156
3204 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); 3157 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3205 size = t->ntids * sizeof(*t->tid_tab) + 3158 size = t->ntids * sizeof(*t->tid_tab) +
@@ -3233,6 +3186,11 @@ static int tid_init(struct tid_info *t)
3233 t->afree = t->atid_tab; 3186 t->afree = t->atid_tab;
3234 } 3187 }
3235 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); 3188 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3189 /* Reserve stid 0 for T4/T5 adapters */
3190 if (!t->stid_base &&
3191 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3192 __set_bit(0, t->stid_bmap);
3193
3236 return 0; 3194 return 0;
3237} 3195}
3238 3196
@@ -3419,7 +3377,7 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3419 3377
3420 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); 3378 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3421 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); 3379 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3422 if (is_t4(adap->chip)) { 3380 if (is_t4(adap->params.chip)) {
3423 lp_count = G_LP_COUNT(v1); 3381 lp_count = G_LP_COUNT(v1);
3424 hp_count = G_HP_COUNT(v1); 3382 hp_count = G_HP_COUNT(v1);
3425 } else { 3383 } else {
@@ -3588,7 +3546,7 @@ static void drain_db_fifo(struct adapter *adap, int usecs)
3588 do { 3546 do {
3589 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); 3547 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3590 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); 3548 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3591 if (is_t4(adap->chip)) { 3549 if (is_t4(adap->params.chip)) {
3592 lp_count = G_LP_COUNT(v1); 3550 lp_count = G_LP_COUNT(v1);
3593 hp_count = G_HP_COUNT(v1); 3551 hp_count = G_HP_COUNT(v1);
3594 } else { 3552 } else {
@@ -3708,7 +3666,7 @@ static void process_db_drop(struct work_struct *work)
3708 3666
3709 adap = container_of(work, struct adapter, db_drop_task); 3667 adap = container_of(work, struct adapter, db_drop_task);
3710 3668
3711 if (is_t4(adap->chip)) { 3669 if (is_t4(adap->params.chip)) {
3712 disable_dbs(adap); 3670 disable_dbs(adap);
3713 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); 3671 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3714 drain_db_fifo(adap, 1); 3672 drain_db_fifo(adap, 1);
@@ -3753,7 +3711,7 @@ static void process_db_drop(struct work_struct *work)
3753 3711
3754void t4_db_full(struct adapter *adap) 3712void t4_db_full(struct adapter *adap)
3755{ 3713{
3756 if (is_t4(adap->chip)) { 3714 if (is_t4(adap->params.chip)) {
3757 t4_set_reg_field(adap, SGE_INT_ENABLE3, 3715 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3758 DBFIFO_HP_INT | DBFIFO_LP_INT, 0); 3716 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3759 queue_work(workq, &adap->db_full_task); 3717 queue_work(workq, &adap->db_full_task);
@@ -3762,7 +3720,7 @@ void t4_db_full(struct adapter *adap)
3762 3720
3763void t4_db_dropped(struct adapter *adap) 3721void t4_db_dropped(struct adapter *adap)
3764{ 3722{
3765 if (is_t4(adap->chip)) 3723 if (is_t4(adap->params.chip))
3766 queue_work(workq, &adap->db_drop_task); 3724 queue_work(workq, &adap->db_drop_task);
3767} 3725}
3768 3726
@@ -3789,7 +3747,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
3789 lli.nchan = adap->params.nports; 3747 lli.nchan = adap->params.nports;
3790 lli.nports = adap->params.nports; 3748 lli.nports = adap->params.nports;
3791 lli.wr_cred = adap->params.ofldq_wr_cred; 3749 lli.wr_cred = adap->params.ofldq_wr_cred;
3792 lli.adapter_type = adap->params.rev; 3750 lli.adapter_type = adap->params.chip;
3793 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); 3751 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3794 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( 3752 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
3795 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> 3753 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
@@ -3797,7 +3755,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
3797 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( 3755 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
3798 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> 3756 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3799 (adap->fn * 4)); 3757 (adap->fn * 4));
3800 lli.filt_mode = adap->filter_mode; 3758 lli.filt_mode = adap->params.tp.vlan_pri_map;
3801 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ 3759 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3802 for (i = 0; i < NCHAN; i++) 3760 for (i = 0; i < NCHAN; i++)
3803 lli.tx_modq[i] = i; 3761 lli.tx_modq[i] = i;
@@ -4245,7 +4203,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4245 adap = netdev2adap(dev); 4203 adap = netdev2adap(dev);
4246 4204
4247 /* Adjust stid to correct filter index */ 4205 /* Adjust stid to correct filter index */
4248 stid -= adap->tids.nstids; 4206 stid -= adap->tids.sftid_base;
4249 stid += adap->tids.nftids; 4207 stid += adap->tids.nftids;
4250 4208
4251 /* Check to make sure the filter requested is writable ... 4209 /* Check to make sure the filter requested is writable ...
@@ -4271,12 +4229,17 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4271 f->fs.val.lip[i] = val[i]; 4229 f->fs.val.lip[i] = val[i];
4272 f->fs.mask.lip[i] = ~0; 4230 f->fs.mask.lip[i] = ~0;
4273 } 4231 }
4274 if (adap->filter_mode & F_PORT) { 4232 if (adap->params.tp.vlan_pri_map & F_PORT) {
4275 f->fs.val.iport = port; 4233 f->fs.val.iport = port;
4276 f->fs.mask.iport = mask; 4234 f->fs.mask.iport = mask;
4277 } 4235 }
4278 } 4236 }
4279 4237
4238 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4239 f->fs.val.proto = IPPROTO_TCP;
4240 f->fs.mask.proto = ~0;
4241 }
4242
4280 f->fs.dirsteer = 1; 4243 f->fs.dirsteer = 1;
4281 f->fs.iq = queue; 4244 f->fs.iq = queue;
4282 /* Mark filter as locked */ 4245 /* Mark filter as locked */
@@ -4303,7 +4266,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4303 adap = netdev2adap(dev); 4266 adap = netdev2adap(dev);
4304 4267
4305 /* Adjust stid to correct filter index */ 4268 /* Adjust stid to correct filter index */
4306 stid -= adap->tids.nstids; 4269 stid -= adap->tids.sftid_base;
4307 stid += adap->tids.nftids; 4270 stid += adap->tids.nftids;
4308 4271
4309 f = &adap->tids.ftid_tab[stid]; 4272 f = &adap->tids.ftid_tab[stid];
@@ -4483,7 +4446,7 @@ static void setup_memwin(struct adapter *adap)
4483 u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base; 4446 u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
4484 4447
4485 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ 4448 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
4486 if (is_t4(adap->chip)) { 4449 if (is_t4(adap->params.chip)) {
4487 mem_win0_base = bar0 + MEMWIN0_BASE; 4450 mem_win0_base = bar0 + MEMWIN0_BASE;
4488 mem_win1_base = bar0 + MEMWIN1_BASE; 4451 mem_win1_base = bar0 + MEMWIN1_BASE;
4489 mem_win2_base = bar0 + MEMWIN2_BASE; 4452 mem_win2_base = bar0 + MEMWIN2_BASE;
@@ -4668,8 +4631,10 @@ static int adap_init0_config(struct adapter *adapter, int reset)
4668 const struct firmware *cf; 4631 const struct firmware *cf;
4669 unsigned long mtype = 0, maddr = 0; 4632 unsigned long mtype = 0, maddr = 0;
4670 u32 finiver, finicsum, cfcsum; 4633 u32 finiver, finicsum, cfcsum;
4671 int ret, using_flash; 4634 int ret;
4635 int config_issued = 0;
4672 char *fw_config_file, fw_config_file_path[256]; 4636 char *fw_config_file, fw_config_file_path[256];
4637 char *config_name = NULL;
4673 4638
4674 /* 4639 /*
4675 * Reset device if necessary. 4640 * Reset device if necessary.
@@ -4686,9 +4651,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
4686 * then use that. Otherwise, use the configuration file stored 4651 * then use that. Otherwise, use the configuration file stored
4687 * in the adapter flash ... 4652 * in the adapter flash ...
4688 */ 4653 */
4689 switch (CHELSIO_CHIP_VERSION(adapter->chip)) { 4654 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
4690 case CHELSIO_T4: 4655 case CHELSIO_T4:
4691 fw_config_file = FW_CFNAME; 4656 fw_config_file = FW4_CFNAME;
4692 break; 4657 break;
4693 case CHELSIO_T5: 4658 case CHELSIO_T5:
4694 fw_config_file = FW5_CFNAME; 4659 fw_config_file = FW5_CFNAME;
@@ -4702,13 +4667,16 @@ static int adap_init0_config(struct adapter *adapter, int reset)
4702 4667
4703 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev); 4668 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4704 if (ret < 0) { 4669 if (ret < 0) {
4705 using_flash = 1; 4670 config_name = "On FLASH";
4706 mtype = FW_MEMTYPE_CF_FLASH; 4671 mtype = FW_MEMTYPE_CF_FLASH;
4707 maddr = t4_flash_cfg_addr(adapter); 4672 maddr = t4_flash_cfg_addr(adapter);
4708 } else { 4673 } else {
4709 u32 params[7], val[7]; 4674 u32 params[7], val[7];
4710 4675
4711 using_flash = 0; 4676 sprintf(fw_config_file_path,
4677 "/lib/firmware/%s", fw_config_file);
4678 config_name = fw_config_file_path;
4679
4712 if (cf->size >= FLASH_CFG_MAX_SIZE) 4680 if (cf->size >= FLASH_CFG_MAX_SIZE)
4713 ret = -ENOMEM; 4681 ret = -ENOMEM;
4714 else { 4682 else {
@@ -4776,6 +4744,26 @@ static int adap_init0_config(struct adapter *adapter, int reset)
4776 FW_LEN16(caps_cmd)); 4744 FW_LEN16(caps_cmd));
4777 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), 4745 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4778 &caps_cmd); 4746 &caps_cmd);
4747
4748 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4749 * Configuration File in FLASH), our last gasp effort is to use the
4750 * Firmware Configuration File which is embedded in the firmware. A
4751 * very few early versions of the firmware didn't have one embedded
4752 * but we can ignore those.
4753 */
4754 if (ret == -ENOENT) {
4755 memset(&caps_cmd, 0, sizeof(caps_cmd));
4756 caps_cmd.op_to_write =
4757 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4758 FW_CMD_REQUEST |
4759 FW_CMD_READ);
4760 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4761 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4762 sizeof(caps_cmd), &caps_cmd);
4763 config_name = "Firmware Default";
4764 }
4765
4766 config_issued = 1;
4779 if (ret < 0) 4767 if (ret < 0)
4780 goto bye; 4768 goto bye;
4781 4769
@@ -4816,7 +4804,6 @@ static int adap_init0_config(struct adapter *adapter, int reset)
4816 if (ret < 0) 4804 if (ret < 0)
4817 goto bye; 4805 goto bye;
4818 4806
4819 sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
4820 /* 4807 /*
4821 * Return successfully and note that we're operating with parameters 4808 * Return successfully and note that we're operating with parameters
4822 * not supplied by the driver, rather than from hard-wired 4809 * not supplied by the driver, rather than from hard-wired
@@ -4824,11 +4811,8 @@ static int adap_init0_config(struct adapter *adapter, int reset)
4824 */ 4811 */
4825 adapter->flags |= USING_SOFT_PARAMS; 4812 adapter->flags |= USING_SOFT_PARAMS;
4826 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ 4813 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4827 "Configuration File %s, version %#x, computed checksum %#x\n", 4814 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4828 (using_flash 4815 config_name, finiver, cfcsum);
4829 ? "in device FLASH"
4830 : fw_config_file_path),
4831 finiver, cfcsum);
4832 return 0; 4816 return 0;
4833 4817
4834 /* 4818 /*
@@ -4837,9 +4821,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
4837 * want to issue a warning since this is fairly common.) 4821 * want to issue a warning since this is fairly common.)
4838 */ 4822 */
4839bye: 4823bye:
4840 if (ret != -ENOENT) 4824 if (config_issued && ret != -ENOENT)
4841 dev_warn(adapter->pdev_dev, "Configuration file error %d\n", 4825 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4842 -ret); 4826 config_name, -ret);
4843 return ret; 4827 return ret;
4844} 4828}
4845 4829
@@ -5086,6 +5070,47 @@ bye:
5086 return ret; 5070 return ret;
5087} 5071}
5088 5072
5073static struct fw_info fw_info_array[] = {
5074 {
5075 .chip = CHELSIO_T4,
5076 .fs_name = FW4_CFNAME,
5077 .fw_mod_name = FW4_FNAME,
5078 .fw_hdr = {
5079 .chip = FW_HDR_CHIP_T4,
5080 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5081 .intfver_nic = FW_INTFVER(T4, NIC),
5082 .intfver_vnic = FW_INTFVER(T4, VNIC),
5083 .intfver_ri = FW_INTFVER(T4, RI),
5084 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5085 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5086 },
5087 }, {
5088 .chip = CHELSIO_T5,
5089 .fs_name = FW5_CFNAME,
5090 .fw_mod_name = FW5_FNAME,
5091 .fw_hdr = {
5092 .chip = FW_HDR_CHIP_T5,
5093 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5094 .intfver_nic = FW_INTFVER(T5, NIC),
5095 .intfver_vnic = FW_INTFVER(T5, VNIC),
5096 .intfver_ri = FW_INTFVER(T5, RI),
5097 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5098 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5099 },
5100 }
5101};
5102
5103static struct fw_info *find_fw_info(int chip)
5104{
5105 int i;
5106
5107 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5108 if (fw_info_array[i].chip == chip)
5109 return &fw_info_array[i];
5110 }
5111 return NULL;
5112}
5113
5089/* 5114/*
5090 * Phase 0 of initialization: contact FW, obtain config, perform basic init. 5115 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5091 */ 5116 */
@@ -5096,7 +5121,7 @@ static int adap_init0(struct adapter *adap)
5096 enum dev_state state; 5121 enum dev_state state;
5097 u32 params[7], val[7]; 5122 u32 params[7], val[7];
5098 struct fw_caps_config_cmd caps_cmd; 5123 struct fw_caps_config_cmd caps_cmd;
5099 int reset = 1, j; 5124 int reset = 1;
5100 5125
5101 /* 5126 /*
5102 * Contact FW, advertising Master capability (and potentially forcing 5127 * Contact FW, advertising Master capability (and potentially forcing
@@ -5123,44 +5148,54 @@ static int adap_init0(struct adapter *adap)
5123 * later reporting and B. to warn if the currently loaded firmware 5148 * later reporting and B. to warn if the currently loaded firmware
5124 * is excessively mismatched relative to the driver.) 5149 * is excessively mismatched relative to the driver.)
5125 */ 5150 */
5126 ret = t4_check_fw_version(adap); 5151 t4_get_fw_version(adap, &adap->params.fw_vers);
5127 5152 t4_get_tp_version(adap, &adap->params.tp_vers);
5128 /* The error code -EFAULT is returned by t4_check_fw_version() if
5129 * firmware on adapter < supported firmware. If firmware on adapter
5130 * is too old (not supported by driver) and we're the MASTER_PF set
5131 * adapter state to DEV_STATE_UNINIT to force firmware upgrade
5132 * and reinitialization.
5133 */
5134 if ((adap->flags & MASTER_PF) && ret == -EFAULT)
5135 state = DEV_STATE_UNINIT;
5136 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { 5153 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5137 if (ret == -EINVAL || ret == -EFAULT || ret > 0) { 5154 struct fw_info *fw_info;
5138 if (upgrade_fw(adap) >= 0) { 5155 struct fw_hdr *card_fw;
5139 /* 5156 const struct firmware *fw;
5140 * Note that the chip was reset as part of the 5157 const u8 *fw_data = NULL;
5141 * firmware upgrade so we don't reset it again 5158 unsigned int fw_size = 0;
5142 * below and grab the new firmware version. 5159
5143 */ 5160 /* This is the firmware whose headers the driver was compiled
5144 reset = 0; 5161 * against
5145 ret = t4_check_fw_version(adap); 5162 */
5146 } else 5163 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5147 if (ret == -EFAULT) { 5164 if (fw_info == NULL) {
5148 /* 5165 dev_err(adap->pdev_dev,
5149 * Firmware is old but still might 5166 "unable to get firmware info for chip %d.\n",
5150 * work if we force reinitialization 5167 CHELSIO_CHIP_VERSION(adap->params.chip));
5151 * of the adapter. Ignoring FW upgrade 5168 return -EINVAL;
5152 * failure.
5153 */
5154 dev_warn(adap->pdev_dev,
5155 "Ignoring firmware upgrade "
5156 "failure, and forcing driver "
5157 "to reinitialize the "
5158 "adapter.\n");
5159 ret = 0;
5160 }
5161 } 5169 }
5170
5171 /* allocate memory to read the header of the firmware on the
5172 * card
5173 */
5174 card_fw = t4_alloc_mem(sizeof(*card_fw));
5175
5176 /* Get FW from from /lib/firmware/ */
5177 ret = request_firmware(&fw, fw_info->fw_mod_name,
5178 adap->pdev_dev);
5179 if (ret < 0) {
5180 dev_err(adap->pdev_dev,
5181 "unable to load firmware image %s, error %d\n",
5182 fw_info->fw_mod_name, ret);
5183 } else {
5184 fw_data = fw->data;
5185 fw_size = fw->size;
5186 }
5187
5188 /* upgrade FW logic */
5189 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5190 state, &reset);
5191
5192 /* Cleaning up */
5193 if (fw != NULL)
5194 release_firmware(fw);
5195 t4_free_mem(card_fw);
5196
5162 if (ret < 0) 5197 if (ret < 0)
5163 return ret; 5198 goto bye;
5164 } 5199 }
5165 5200
5166 /* 5201 /*
@@ -5245,7 +5280,7 @@ static int adap_init0(struct adapter *adap)
5245 if (ret == -ENOENT) { 5280 if (ret == -ENOENT) {
5246 dev_info(adap->pdev_dev, 5281 dev_info(adap->pdev_dev,
5247 "No Configuration File present " 5282 "No Configuration File present "
5248 "on adapter. Using hard-wired " 5283 "on adapter. Using hard-wired "
5249 "configuration parameters.\n"); 5284 "configuration parameters.\n");
5250 ret = adap_init0_no_config(adap, reset); 5285 ret = adap_init0_no_config(adap, reset);
5251 } 5286 }
@@ -5428,21 +5463,11 @@ static int adap_init0(struct adapter *adap)
5428 /* 5463 /*
5429 * These are finalized by FW initialization, load their values now. 5464 * These are finalized by FW initialization, load their values now.
5430 */ 5465 */
5431 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
5432 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
5433 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
5434 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); 5466 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5435 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 5467 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5436 adap->params.b_wnd); 5468 adap->params.b_wnd);
5437 5469
5438 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 5470 t4_init_tp_params(adap);
5439 for (j = 0; j < NCHAN; j++)
5440 adap->params.tp.tx_modq[j] = j;
5441
5442 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5443 &adap->filter_mode, 1,
5444 TP_VLAN_PRI_MAP);
5445
5446 adap->flags |= FW_OK; 5471 adap->flags |= FW_OK;
5447 return 0; 5472 return 0;
5448 5473
@@ -5787,7 +5812,7 @@ static void print_port_info(const struct net_device *dev)
5787 5812
5788 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", 5813 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
5789 adap->params.vpd.id, 5814 adap->params.vpd.id,
5790 CHELSIO_CHIP_RELEASE(adap->params.rev), buf, 5815 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
5791 is_offload(adap) ? "R" : "", adap->params.pci.width, spd, 5816 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5792 (adap->flags & USING_MSIX) ? " MSI-X" : 5817 (adap->flags & USING_MSIX) ? " MSI-X" :
5793 (adap->flags & USING_MSI) ? " MSI" : ""); 5818 (adap->flags & USING_MSI) ? " MSI" : "");
@@ -5910,7 +5935,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5910 if (err) 5935 if (err)
5911 goto out_unmap_bar0; 5936 goto out_unmap_bar0;
5912 5937
5913 if (!is_t4(adapter->chip)) { 5938 if (!is_t4(adapter->params.chip)) {
5914 s_qpp = QUEUESPERPAGEPF1 * adapter->fn; 5939 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
5915 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter, 5940 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
5916 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); 5941 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
@@ -6064,7 +6089,7 @@ sriov:
6064 out_free_dev: 6089 out_free_dev:
6065 free_some_resources(adapter); 6090 free_some_resources(adapter);
6066 out_unmap_bar: 6091 out_unmap_bar:
6067 if (!is_t4(adapter->chip)) 6092 if (!is_t4(adapter->params.chip))
6068 iounmap(adapter->bar2); 6093 iounmap(adapter->bar2);
6069 out_unmap_bar0: 6094 out_unmap_bar0:
6070 iounmap(adapter->regs); 6095 iounmap(adapter->regs);
@@ -6116,7 +6141,7 @@ static void remove_one(struct pci_dev *pdev)
6116 6141
6117 free_some_resources(adapter); 6142 free_some_resources(adapter);
6118 iounmap(adapter->regs); 6143 iounmap(adapter->regs);
6119 if (!is_t4(adapter->chip)) 6144 if (!is_t4(adapter->params.chip))
6120 iounmap(adapter->bar2); 6145 iounmap(adapter->bar2);
6121 kfree(adapter); 6146 kfree(adapter);
6122 pci_disable_pcie_error_reporting(pdev); 6147 pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 6f21f2451c30..4dd0a82533e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -131,7 +131,14 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
131 131
132static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) 132static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
133{ 133{
134 stid -= t->stid_base; 134 /* Is it a server filter TID? */
135 if (t->nsftids && (stid >= t->sftid_base)) {
136 stid -= t->sftid_base;
137 stid += t->nstids;
138 } else {
139 stid -= t->stid_base;
140 }
141
135 return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL; 142 return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
136} 143}
137 144
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 29878098101e..cb05be905def 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -45,6 +45,7 @@
45#include "l2t.h" 45#include "l2t.h"
46#include "t4_msg.h" 46#include "t4_msg.h"
47#include "t4fw_api.h" 47#include "t4fw_api.h"
48#include "t4_regs.h"
48 49
49#define VLAN_NONE 0xfff 50#define VLAN_NONE 0xfff
50 51
@@ -411,6 +412,40 @@ done:
411} 412}
412EXPORT_SYMBOL(cxgb4_l2t_get); 413EXPORT_SYMBOL(cxgb4_l2t_get);
413 414
415u64 cxgb4_select_ntuple(struct net_device *dev,
416 const struct l2t_entry *l2t)
417{
418 struct adapter *adap = netdev2adap(dev);
419 struct tp_params *tp = &adap->params.tp;
420 u64 ntuple = 0;
421
422 /* Initialize each of the fields which we care about which are present
423 * in the Compressed Filter Tuple.
424 */
425 if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
426 ntuple |= (F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
427
428 if (tp->port_shift >= 0)
429 ntuple |= (u64)l2t->lport << tp->port_shift;
430
431 if (tp->protocol_shift >= 0)
432 ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
433
434 if (tp->vnic_shift >= 0) {
435 u32 viid = cxgb4_port_viid(dev);
436 u32 vf = FW_VIID_VIN_GET(viid);
437 u32 pf = FW_VIID_PFN_GET(viid);
438 u32 vld = FW_VIID_VIVLD_GET(viid);
439
440 ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
441 V_FT_VNID_ID_PF(pf) |
442 V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
443 }
444
445 return ntuple;
446}
447EXPORT_SYMBOL(cxgb4_select_ntuple);
448
414/* 449/*
415 * Called when address resolution fails for an L2T entry to handle packets 450 * Called when address resolution fails for an L2T entry to handle packets
416 * on the arpq head. If a packet specifies a failure handler it is invoked, 451 * on the arpq head. If a packet specifies a failure handler it is invoked,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 108c0f1fce1c..85eb5c71358d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -98,7 +98,8 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
98struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, 98struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
99 const struct net_device *physdev, 99 const struct net_device *physdev,
100 unsigned int priority); 100 unsigned int priority);
101 101u64 cxgb4_select_ntuple(struct net_device *dev,
102 const struct l2t_entry *l2t);
102void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); 103void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
103struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); 104struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
104int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, 105int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ac311f5f3eb9..cc3511a5cd0c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -509,7 +509,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
509 u32 val; 509 u32 val;
510 if (q->pend_cred >= 8) { 510 if (q->pend_cred >= 8) {
511 val = PIDX(q->pend_cred / 8); 511 val = PIDX(q->pend_cred / 8);
512 if (!is_t4(adap->chip)) 512 if (!is_t4(adap->params.chip))
513 val |= DBTYPE(1); 513 val |= DBTYPE(1);
514 wmb(); 514 wmb();
515 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) | 515 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) |
@@ -847,7 +847,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
847 wmb(); /* write descriptors before telling HW */ 847 wmb(); /* write descriptors before telling HW */
848 spin_lock(&q->db_lock); 848 spin_lock(&q->db_lock);
849 if (!q->db_disabled) { 849 if (!q->db_disabled) {
850 if (is_t4(adap->chip)) { 850 if (is_t4(adap->params.chip)) {
851 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 851 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
852 QID(q->cntxt_id) | PIDX(n)); 852 QID(q->cntxt_id) | PIDX(n));
853 } else { 853 } else {
@@ -1596,7 +1596,7 @@ static noinline int handle_trace_pkt(struct adapter *adap,
1596 return 0; 1596 return 0;
1597 } 1597 }
1598 1598
1599 if (is_t4(adap->chip)) 1599 if (is_t4(adap->params.chip))
1600 __skb_pull(skb, sizeof(struct cpl_trace_pkt)); 1600 __skb_pull(skb, sizeof(struct cpl_trace_pkt));
1601 else 1601 else
1602 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt)); 1602 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
@@ -1661,7 +1661,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1661 const struct cpl_rx_pkt *pkt; 1661 const struct cpl_rx_pkt *pkt;
1662 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 1662 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1663 struct sge *s = &q->adap->sge; 1663 struct sge *s = &q->adap->sge;
1664 int cpl_trace_pkt = is_t4(q->adap->chip) ? 1664 int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
1665 CPL_TRACE_PKT : CPL_TRACE_PKT_T5; 1665 CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
1666 1666
1667 if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) 1667 if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
@@ -2182,7 +2182,7 @@ err:
2182static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) 2182static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2183{ 2183{
2184 q->cntxt_id = id; 2184 q->cntxt_id = id;
2185 if (!is_t4(adap->chip)) { 2185 if (!is_t4(adap->params.chip)) {
2186 unsigned int s_qpp; 2186 unsigned int s_qpp;
2187 unsigned short udb_density; 2187 unsigned short udb_density;
2188 unsigned long qpshift; 2188 unsigned long qpshift;
@@ -2581,7 +2581,7 @@ static int t4_sge_init_soft(struct adapter *adap)
2581 #undef READ_FL_BUF 2581 #undef READ_FL_BUF
2582 2582
2583 if (fl_small_pg != PAGE_SIZE || 2583 if (fl_small_pg != PAGE_SIZE ||
2584 (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg || 2584 (fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
2585 (fl_large_pg & (fl_large_pg-1)) != 0))) { 2585 (fl_large_pg & (fl_large_pg-1)) != 0))) {
2586 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", 2586 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
2587 fl_small_pg, fl_large_pg); 2587 fl_small_pg, fl_large_pg);
@@ -2641,7 +2641,7 @@ static int t4_sge_init_hard(struct adapter *adap)
2641 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows 2641 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
2642 * and generate an interrupt when this occurs so we can recover. 2642 * and generate an interrupt when this occurs so we can recover.
2643 */ 2643 */
2644 if (is_t4(adap->chip)) { 2644 if (is_t4(adap->params.chip)) {
2645 t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS, 2645 t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
2646 V_HP_INT_THRESH(M_HP_INT_THRESH) | 2646 V_HP_INT_THRESH(M_HP_INT_THRESH) |
2647 V_LP_INT_THRESH(M_LP_INT_THRESH), 2647 V_LP_INT_THRESH(M_LP_INT_THRESH),
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4cbb2f9850be..e1413eacdbd2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -296,7 +296,7 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
296 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len; 296 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
297 u32 mc_bist_status_rdata, mc_bist_data_pattern; 297 u32 mc_bist_status_rdata, mc_bist_data_pattern;
298 298
299 if (is_t4(adap->chip)) { 299 if (is_t4(adap->params.chip)) {
300 mc_bist_cmd = MC_BIST_CMD; 300 mc_bist_cmd = MC_BIST_CMD;
301 mc_bist_cmd_addr = MC_BIST_CMD_ADDR; 301 mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
302 mc_bist_cmd_len = MC_BIST_CMD_LEN; 302 mc_bist_cmd_len = MC_BIST_CMD_LEN;
@@ -349,7 +349,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
349 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len; 349 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
350 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata; 350 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
351 351
352 if (is_t4(adap->chip)) { 352 if (is_t4(adap->params.chip)) {
353 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx); 353 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
354 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx); 354 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
355 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx); 355 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
@@ -402,7 +402,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
402static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) 402static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
403{ 403{
404 int i; 404 int i;
405 u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); 405 u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
406 406
407 /* 407 /*
408 * Setup offset into PCIE memory window. Address must be a 408 * Setup offset into PCIE memory window. Address must be a
@@ -863,104 +863,169 @@ unlock:
863} 863}
864 864
865/** 865/**
866 * get_fw_version - read the firmware version 866 * t4_get_fw_version - read the firmware version
867 * @adapter: the adapter 867 * @adapter: the adapter
868 * @vers: where to place the version 868 * @vers: where to place the version
869 * 869 *
870 * Reads the FW version from flash. 870 * Reads the FW version from flash.
871 */ 871 */
872static int get_fw_version(struct adapter *adapter, u32 *vers) 872int t4_get_fw_version(struct adapter *adapter, u32 *vers)
873{ 873{
874 return t4_read_flash(adapter, adapter->params.sf_fw_start + 874 return t4_read_flash(adapter, FLASH_FW_START +
875 offsetof(struct fw_hdr, fw_ver), 1, vers, 0); 875 offsetof(struct fw_hdr, fw_ver), 1,
876 vers, 0);
876} 877}
877 878
878/** 879/**
879 * get_tp_version - read the TP microcode version 880 * t4_get_tp_version - read the TP microcode version
880 * @adapter: the adapter 881 * @adapter: the adapter
881 * @vers: where to place the version 882 * @vers: where to place the version
882 * 883 *
883 * Reads the TP microcode version from flash. 884 * Reads the TP microcode version from flash.
884 */ 885 */
885static int get_tp_version(struct adapter *adapter, u32 *vers) 886int t4_get_tp_version(struct adapter *adapter, u32 *vers)
886{ 887{
887 return t4_read_flash(adapter, adapter->params.sf_fw_start + 888 return t4_read_flash(adapter, FLASH_FW_START +
888 offsetof(struct fw_hdr, tp_microcode_ver), 889 offsetof(struct fw_hdr, tp_microcode_ver),
889 1, vers, 0); 890 1, vers, 0);
890} 891}
891 892
892/** 893/* Is the given firmware API compatible with the one the driver was compiled
893 * t4_check_fw_version - check if the FW is compatible with this driver 894 * with?
894 * @adapter: the adapter
895 *
896 * Checks if an adapter's FW is compatible with the driver. Returns 0
897 * if there's exact match, a negative error if the version could not be
898 * read or there's a major version mismatch, and a positive value if the
899 * expected major version is found but there's a minor version mismatch.
900 */ 895 */
901int t4_check_fw_version(struct adapter *adapter) 896static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
902{ 897{
903 u32 api_vers[2];
904 int ret, major, minor, micro;
905 int exp_major, exp_minor, exp_micro;
906 898
907 ret = get_fw_version(adapter, &adapter->params.fw_vers); 899 /* short circuit if it's the exact same firmware version */
908 if (!ret) 900 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
909 ret = get_tp_version(adapter, &adapter->params.tp_vers); 901 return 1;
910 if (!ret)
911 ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
912 offsetof(struct fw_hdr, intfver_nic),
913 2, api_vers, 1);
914 if (ret)
915 return ret;
916 902
917 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); 903#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
918 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); 904 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
919 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); 905 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
906 return 1;
907#undef SAME_INTF
920 908
921 switch (CHELSIO_CHIP_VERSION(adapter->chip)) { 909 return 0;
922 case CHELSIO_T4: 910}
923 exp_major = FW_VERSION_MAJOR;
924 exp_minor = FW_VERSION_MINOR;
925 exp_micro = FW_VERSION_MICRO;
926 break;
927 case CHELSIO_T5:
928 exp_major = FW_VERSION_MAJOR_T5;
929 exp_minor = FW_VERSION_MINOR_T5;
930 exp_micro = FW_VERSION_MICRO_T5;
931 break;
932 default:
933 dev_err(adapter->pdev_dev, "Unsupported chip type, %x\n",
934 adapter->chip);
935 return -EINVAL;
936 }
937 911
938 memcpy(adapter->params.api_vers, api_vers, 912/* The firmware in the filesystem is usable, but should it be installed?
939 sizeof(adapter->params.api_vers)); 913 * This routine explains itself in detail if it indicates the filesystem
914 * firmware should be installed.
915 */
916static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
917 int k, int c)
918{
919 const char *reason;
940 920
941 if (major < exp_major || (major == exp_major && minor < exp_minor) || 921 if (!card_fw_usable) {
942 (major == exp_major && minor == exp_minor && micro < exp_micro)) { 922 reason = "incompatible or unusable";
943 dev_err(adapter->pdev_dev, 923 goto install;
944 "Card has firmware version %u.%u.%u, minimum "
945 "supported firmware is %u.%u.%u.\n", major, minor,
946 micro, exp_major, exp_minor, exp_micro);
947 return -EFAULT;
948 } 924 }
949 925
950 if (major != exp_major) { /* major mismatch - fail */ 926 if (k > c) {
951 dev_err(adapter->pdev_dev, 927 reason = "older than the version supported with this driver";
952 "card FW has major version %u, driver wants %u\n", 928 goto install;
953 major, exp_major);
954 return -EINVAL;
955 } 929 }
956 930
957 if (minor == exp_minor && micro == exp_micro) 931 return 0;
958 return 0; /* perfect match */ 932
933install:
934 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
935 "installing firmware %u.%u.%u.%u on card.\n",
936 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
937 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason,
938 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
939 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
959 940
960 /* Minor/micro version mismatch. Report it but often it's OK. */
961 return 1; 941 return 1;
962} 942}
963 943
944int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
945 const u8 *fw_data, unsigned int fw_size,
946 struct fw_hdr *card_fw, enum dev_state state,
947 int *reset)
948{
949 int ret, card_fw_usable, fs_fw_usable;
950 const struct fw_hdr *fs_fw;
951 const struct fw_hdr *drv_fw;
952
953 drv_fw = &fw_info->fw_hdr;
954
955 /* Read the header of the firmware on the card */
956 ret = -t4_read_flash(adap, FLASH_FW_START,
957 sizeof(*card_fw) / sizeof(uint32_t),
958 (uint32_t *)card_fw, 1);
959 if (ret == 0) {
960 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
961 } else {
962 dev_err(adap->pdev_dev,
963 "Unable to read card's firmware header: %d\n", ret);
964 card_fw_usable = 0;
965 }
966
967 if (fw_data != NULL) {
968 fs_fw = (const void *)fw_data;
969 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
970 } else {
971 fs_fw = NULL;
972 fs_fw_usable = 0;
973 }
974
975 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
976 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
977 /* Common case: the firmware on the card is an exact match and
978 * the filesystem one is an exact match too, or the filesystem
979 * one is absent/incompatible.
980 */
981 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
982 should_install_fs_fw(adap, card_fw_usable,
983 be32_to_cpu(fs_fw->fw_ver),
984 be32_to_cpu(card_fw->fw_ver))) {
985 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
986 fw_size, 0);
987 if (ret != 0) {
988 dev_err(adap->pdev_dev,
989 "failed to install firmware: %d\n", ret);
990 goto bye;
991 }
992
993 /* Installed successfully, update the cached header too. */
994 memcpy(card_fw, fs_fw, sizeof(*card_fw));
995 card_fw_usable = 1;
996 *reset = 0; /* already reset as part of load_fw */
997 }
998
999 if (!card_fw_usable) {
1000 uint32_t d, c, k;
1001
1002 d = be32_to_cpu(drv_fw->fw_ver);
1003 c = be32_to_cpu(card_fw->fw_ver);
1004 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
1005
1006 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
1007 "chip state %d, "
1008 "driver compiled with %d.%d.%d.%d, "
1009 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1010 state,
1011 FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d),
1012 FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d),
1013 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
1014 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c),
1015 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
1016 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
1017 ret = EINVAL;
1018 goto bye;
1019 }
1020
1021 /* We're using whatever's on the card and it's known to be good. */
1022 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
1023 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
1024
1025bye:
1026 return ret;
1027}
1028
964/** 1029/**
965 * t4_flash_erase_sectors - erase a range of flash sectors 1030 * t4_flash_erase_sectors - erase a range of flash sectors
966 * @adapter: the adapter 1031 * @adapter: the adapter
@@ -1368,7 +1433,7 @@ static void pcie_intr_handler(struct adapter *adapter)
1368 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 1433 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1369 pcie_port_intr_info) + 1434 pcie_port_intr_info) +
1370 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, 1435 t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1371 is_t4(adapter->chip) ? 1436 is_t4(adapter->params.chip) ?
1372 pcie_intr_info : t5_pcie_intr_info); 1437 pcie_intr_info : t5_pcie_intr_info);
1373 1438
1374 if (fat) 1439 if (fat)
@@ -1782,7 +1847,7 @@ static void xgmac_intr_handler(struct adapter *adap, int port)
1782{ 1847{
1783 u32 v, int_cause_reg; 1848 u32 v, int_cause_reg;
1784 1849
1785 if (is_t4(adap->chip)) 1850 if (is_t4(adap->params.chip))
1786 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE); 1851 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
1787 else 1852 else
1788 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE); 1853 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
@@ -2250,7 +2315,7 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2250 2315
2251#define GET_STAT(name) \ 2316#define GET_STAT(name) \
2252 t4_read_reg64(adap, \ 2317 t4_read_reg64(adap, \
2253 (is_t4(adap->chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ 2318 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
2254 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L))) 2319 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
2255#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) 2320#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2256 2321
@@ -2332,7 +2397,7 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2332{ 2397{
2333 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; 2398 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
2334 2399
2335 if (is_t4(adap->chip)) { 2400 if (is_t4(adap->params.chip)) {
2336 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); 2401 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2337 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); 2402 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2338 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); 2403 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
@@ -2374,7 +2439,7 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2374 int i; 2439 int i;
2375 u32 port_cfg_reg; 2440 u32 port_cfg_reg;
2376 2441
2377 if (is_t4(adap->chip)) 2442 if (is_t4(adap->params.chip))
2378 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); 2443 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2379 else 2444 else
2380 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); 2445 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
@@ -2387,7 +2452,7 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2387 return -EINVAL; 2452 return -EINVAL;
2388 2453
2389#define EPIO_REG(name) \ 2454#define EPIO_REG(name) \
2390 (is_t4(adap->chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \ 2455 (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
2391 T5_PORT_REG(port, MAC_PORT_EPIO_##name)) 2456 T5_PORT_REG(port, MAC_PORT_EPIO_##name))
2392 2457
2393 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 2458 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
@@ -2474,7 +2539,7 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2474int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) 2539int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
2475{ 2540{
2476 int i, off; 2541 int i, off;
2477 u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); 2542 u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
2478 2543
2479 /* Align on a 2KB boundary. 2544 /* Align on a 2KB boundary.
2480 */ 2545 */
@@ -3306,7 +3371,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3306 int i, ret; 3371 int i, ret;
3307 struct fw_vi_mac_cmd c; 3372 struct fw_vi_mac_cmd c;
3308 struct fw_vi_mac_exact *p; 3373 struct fw_vi_mac_exact *p;
3309 unsigned int max_naddr = is_t4(adap->chip) ? 3374 unsigned int max_naddr = is_t4(adap->params.chip) ?
3310 NUM_MPS_CLS_SRAM_L_INSTANCES : 3375 NUM_MPS_CLS_SRAM_L_INSTANCES :
3311 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 3376 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3312 3377
@@ -3368,7 +3433,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3368 int ret, mode; 3433 int ret, mode;
3369 struct fw_vi_mac_cmd c; 3434 struct fw_vi_mac_cmd c;
3370 struct fw_vi_mac_exact *p = c.u.exact; 3435 struct fw_vi_mac_exact *p = c.u.exact;
3371 unsigned int max_mac_addr = is_t4(adap->chip) ? 3436 unsigned int max_mac_addr = is_t4(adap->params.chip) ?
3372 NUM_MPS_CLS_SRAM_L_INSTANCES : 3437 NUM_MPS_CLS_SRAM_L_INSTANCES :
3373 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 3438 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3374 3439
@@ -3699,13 +3764,14 @@ int t4_prep_adapter(struct adapter *adapter)
3699{ 3764{
3700 int ret, ver; 3765 int ret, ver;
3701 uint16_t device_id; 3766 uint16_t device_id;
3767 u32 pl_rev;
3702 3768
3703 ret = t4_wait_dev_ready(adapter); 3769 ret = t4_wait_dev_ready(adapter);
3704 if (ret < 0) 3770 if (ret < 0)
3705 return ret; 3771 return ret;
3706 3772
3707 get_pci_mode(adapter, &adapter->params.pci); 3773 get_pci_mode(adapter, &adapter->params.pci);
3708 adapter->params.rev = t4_read_reg(adapter, PL_REV); 3774 pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
3709 3775
3710 ret = get_flash_params(adapter); 3776 ret = get_flash_params(adapter);
3711 if (ret < 0) { 3777 if (ret < 0) {
@@ -3717,14 +3783,13 @@ int t4_prep_adapter(struct adapter *adapter)
3717 */ 3783 */
3718 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id); 3784 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
3719 ver = device_id >> 12; 3785 ver = device_id >> 12;
3786 adapter->params.chip = 0;
3720 switch (ver) { 3787 switch (ver) {
3721 case CHELSIO_T4: 3788 case CHELSIO_T4:
3722 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 3789 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
3723 adapter->params.rev);
3724 break; 3790 break;
3725 case CHELSIO_T5: 3791 case CHELSIO_T5:
3726 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 3792 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
3727 adapter->params.rev);
3728 break; 3793 break;
3729 default: 3794 default:
3730 dev_err(adapter->pdev_dev, "Device %d is not supported\n", 3795 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
@@ -3732,9 +3797,6 @@ int t4_prep_adapter(struct adapter *adapter)
3732 return -EINVAL; 3797 return -EINVAL;
3733 } 3798 }
3734 3799
3735 /* Reassign the updated revision field */
3736 adapter->params.rev = adapter->chip;
3737
3738 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 3800 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3739 3801
3740 /* 3802 /*
@@ -3746,6 +3808,109 @@ int t4_prep_adapter(struct adapter *adapter)
3746 return 0; 3808 return 0;
3747} 3809}
3748 3810
3811/**
3812 * t4_init_tp_params - initialize adap->params.tp
3813 * @adap: the adapter
3814 *
3815 * Initialize various fields of the adapter's TP Parameters structure.
3816 */
3817int t4_init_tp_params(struct adapter *adap)
3818{
3819 int chan;
3820 u32 v;
3821
3822 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3823 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3824 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
3825
3826 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
3827 for (chan = 0; chan < NCHAN; chan++)
3828 adap->params.tp.tx_modq[chan] = chan;
3829
3830 /* Cache the adapter's Compressed Filter Mode and global Incress
3831 * Configuration.
3832 */
3833 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3834 &adap->params.tp.vlan_pri_map, 1,
3835 TP_VLAN_PRI_MAP);
3836 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3837 &adap->params.tp.ingress_config, 1,
3838 TP_INGRESS_CONFIG);
3839
3840 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
3841 * shift positions of several elements of the Compressed Filter Tuple
3842 * for this adapter which we need frequently ...
3843 */
3844 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
3845 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
3846 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
3847 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
3848 F_PROTOCOL);
3849
3850 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
3851 * represents the presense of an Outer VLAN instead of a VNIC ID.
3852 */
3853 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
3854 adap->params.tp.vnic_shift = -1;
3855
3856 return 0;
3857}
3858
3859/**
3860 * t4_filter_field_shift - calculate filter field shift
3861 * @adap: the adapter
3862 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
3863 *
3864 * Return the shift position of a filter field within the Compressed
3865 * Filter Tuple. The filter field is specified via its selection bit
3866 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
3867 */
3868int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
3869{
3870 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
3871 unsigned int sel;
3872 int field_shift;
3873
3874 if ((filter_mode & filter_sel) == 0)
3875 return -1;
3876
3877 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
3878 switch (filter_mode & sel) {
3879 case F_FCOE:
3880 field_shift += W_FT_FCOE;
3881 break;
3882 case F_PORT:
3883 field_shift += W_FT_PORT;
3884 break;
3885 case F_VNIC_ID:
3886 field_shift += W_FT_VNIC_ID;
3887 break;
3888 case F_VLAN:
3889 field_shift += W_FT_VLAN;
3890 break;
3891 case F_TOS:
3892 field_shift += W_FT_TOS;
3893 break;
3894 case F_PROTOCOL:
3895 field_shift += W_FT_PROTOCOL;
3896 break;
3897 case F_ETHERTYPE:
3898 field_shift += W_FT_ETHERTYPE;
3899 break;
3900 case F_MACMATCH:
3901 field_shift += W_FT_MACMATCH;
3902 break;
3903 case F_MPSHITTYPE:
3904 field_shift += W_FT_MPSHITTYPE;
3905 break;
3906 case F_FRAGMENTATION:
3907 field_shift += W_FT_FRAGMENTATION;
3908 break;
3909 }
3910 }
3911 return field_shift;
3912}
3913
3749int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) 3914int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3750{ 3915{
3751 u8 addr[6]; 3916 u8 addr[6];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index ef146c0ba481..4082522d8140 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1092,6 +1092,11 @@
1092 1092
1093#define PL_REV 0x1943c 1093#define PL_REV 0x1943c
1094 1094
1095#define S_REV 0
1096#define M_REV 0xfU
1097#define V_REV(x) ((x) << S_REV)
1098#define G_REV(x) (((x) >> S_REV) & M_REV)
1099
1095#define LE_DB_CONFIG 0x19c04 1100#define LE_DB_CONFIG 0x19c04
1096#define HASHEN 0x00100000U 1101#define HASHEN 0x00100000U
1097 1102
@@ -1166,10 +1171,50 @@
1166 1171
1167#define A_TP_TX_SCHED_PCMD 0x25 1172#define A_TP_TX_SCHED_PCMD 0x25
1168 1173
1174#define S_VNIC 11
1175#define V_VNIC(x) ((x) << S_VNIC)
1176#define F_VNIC V_VNIC(1U)
1177
1178#define S_FRAGMENTATION 9
1179#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
1180#define F_FRAGMENTATION V_FRAGMENTATION(1U)
1181
1182#define S_MPSHITTYPE 8
1183#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
1184#define F_MPSHITTYPE V_MPSHITTYPE(1U)
1185
1186#define S_MACMATCH 7
1187#define V_MACMATCH(x) ((x) << S_MACMATCH)
1188#define F_MACMATCH V_MACMATCH(1U)
1189
1190#define S_ETHERTYPE 6
1191#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
1192#define F_ETHERTYPE V_ETHERTYPE(1U)
1193
1194#define S_PROTOCOL 5
1195#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
1196#define F_PROTOCOL V_PROTOCOL(1U)
1197
1198#define S_TOS 4
1199#define V_TOS(x) ((x) << S_TOS)
1200#define F_TOS V_TOS(1U)
1201
1202#define S_VLAN 3
1203#define V_VLAN(x) ((x) << S_VLAN)
1204#define F_VLAN V_VLAN(1U)
1205
1206#define S_VNIC_ID 2
1207#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
1208#define F_VNIC_ID V_VNIC_ID(1U)
1209
1169#define S_PORT 1 1210#define S_PORT 1
1170#define V_PORT(x) ((x) << S_PORT) 1211#define V_PORT(x) ((x) << S_PORT)
1171#define F_PORT V_PORT(1U) 1212#define F_PORT V_PORT(1U)
1172 1213
1214#define S_FCOE 0
1215#define V_FCOE(x) ((x) << S_FCOE)
1216#define F_FCOE V_FCOE(1U)
1217
1173#define NUM_MPS_CLS_SRAM_L_INSTANCES 336 1218#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
1174#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 1219#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
1175 1220
@@ -1199,4 +1244,46 @@
1199#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) 1244#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
1200#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) 1245#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
1201 1246
1247#define A_PL_VF_REV 0x4
1248#define A_PL_VF_WHOAMI 0x0
1249#define A_PL_VF_REVISION 0x8
1250
1251#define S_CHIPID 4
1252#define M_CHIPID 0xfU
1253#define V_CHIPID(x) ((x) << S_CHIPID)
1254#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID)
1255
1256/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
1257 * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
1258 * selects for a particular field being present. These fields, when present
1259 * in the Compressed Filter Tuple, have the following widths in bits.
1260 */
1261#define W_FT_FCOE 1
1262#define W_FT_PORT 3
1263#define W_FT_VNIC_ID 17
1264#define W_FT_VLAN 17
1265#define W_FT_TOS 8
1266#define W_FT_PROTOCOL 8
1267#define W_FT_ETHERTYPE 16
1268#define W_FT_MACMATCH 9
1269#define W_FT_MPSHITTYPE 3
1270#define W_FT_FRAGMENTATION 1
1271
1272/* Some of the Compressed Filter Tuple fields have internal structure. These
1273 * bit shifts/masks describe those structures. All shifts are relative to the
1274 * base position of the fields within the Compressed Filter Tuple
1275 */
1276#define S_FT_VLAN_VLD 16
1277#define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD)
1278#define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U)
1279
1280#define S_FT_VNID_ID_VF 0
1281#define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF)
1282
1283#define S_FT_VNID_ID_PF 7
1284#define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF)
1285
1286#define S_FT_VNID_ID_VLD 16
1287#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD)
1288
1202#endif /* __T4_REGS_H */ 1289#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 6f77ac487743..74fea74ce0aa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2157,7 +2157,7 @@ struct fw_debug_cmd {
2157 2157
2158struct fw_hdr { 2158struct fw_hdr {
2159 u8 ver; 2159 u8 ver;
2160 u8 reserved1; 2160 u8 chip; /* terminator chip type */
2161 __be16 len512; /* bin length in units of 512-bytes */ 2161 __be16 len512; /* bin length in units of 512-bytes */
2162 __be32 fw_ver; /* firmware version */ 2162 __be32 fw_ver; /* firmware version */
2163 __be32 tp_microcode_ver; 2163 __be32 tp_microcode_ver;
@@ -2176,6 +2176,11 @@ struct fw_hdr {
2176 __be32 reserved6[23]; 2176 __be32 reserved6[23];
2177}; 2177};
2178 2178
2179enum fw_hdr_chip {
2180 FW_HDR_CHIP_T4,
2181 FW_HDR_CHIP_T5
2182};
2183
2179#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) 2184#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff)
2180#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) 2185#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff)
2181#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) 2186#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index be5c7ef6ca93..68eaa9c88c7d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -344,7 +344,6 @@ struct adapter {
344 unsigned long registered_device_map; 344 unsigned long registered_device_map;
345 unsigned long open_device_map; 345 unsigned long open_device_map;
346 unsigned long flags; 346 unsigned long flags;
347 enum chip_type chip;
348 struct adapter_params params; 347 struct adapter_params params;
349 348
350 /* queue and interrupt resources */ 349 /* queue and interrupt resources */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 5f90ec5f7519..0899c0983594 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1064,7 +1064,7 @@ static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1064 /* 1064 /*
1065 * Chip version 4, revision 0x3f (cxgb4vf). 1065 * Chip version 4, revision 0x3f (cxgb4vf).
1066 */ 1066 */
1067 return CHELSIO_CHIP_VERSION(adapter->chip) | (0x3f << 10); 1067 return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
1068} 1068}
1069 1069
1070/* 1070/*
@@ -1551,9 +1551,13 @@ static void cxgb4vf_get_regs(struct net_device *dev,
1551 reg_block_dump(adapter, regbuf, 1551 reg_block_dump(adapter, regbuf,
1552 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST, 1552 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
1553 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST); 1553 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
1554
1555 /* T5 adds new registers in the PL Register map.
1556 */
1554 reg_block_dump(adapter, regbuf, 1557 reg_block_dump(adapter, regbuf,
1555 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST, 1558 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
1556 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_LAST); 1559 T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
1560 ? A_PL_VF_WHOAMI : A_PL_VF_REVISION));
1557 reg_block_dump(adapter, regbuf, 1561 reg_block_dump(adapter, regbuf,
1558 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST, 1562 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1559 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST); 1563 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
@@ -2087,6 +2091,7 @@ static int adap_init0(struct adapter *adapter)
2087 unsigned int ethqsets; 2091 unsigned int ethqsets;
2088 int err; 2092 int err;
2089 u32 param, val = 0; 2093 u32 param, val = 0;
2094 unsigned int chipid;
2090 2095
2091 /* 2096 /*
2092 * Wait for the device to become ready before proceeding ... 2097 * Wait for the device to become ready before proceeding ...
@@ -2114,12 +2119,14 @@ static int adap_init0(struct adapter *adapter)
2114 return err; 2119 return err;
2115 } 2120 }
2116 2121
2122 adapter->params.chip = 0;
2117 switch (adapter->pdev->device >> 12) { 2123 switch (adapter->pdev->device >> 12) {
2118 case CHELSIO_T4: 2124 case CHELSIO_T4:
2119 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0); 2125 adapter->params.chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
2120 break; 2126 break;
2121 case CHELSIO_T5: 2127 case CHELSIO_T5:
2122 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 0); 2128 chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV));
2129 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
2123 break; 2130 break;
2124 } 2131 }
2125 2132
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 8475c4cda9e4..0a89963c48ce 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -537,7 +537,7 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
537 */ 537 */
538 if (fl->pend_cred >= FL_PER_EQ_UNIT) { 538 if (fl->pend_cred >= FL_PER_EQ_UNIT) {
539 val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); 539 val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
540 if (!is_t4(adapter->chip)) 540 if (!is_t4(adapter->params.chip))
541 val |= DBTYPE(1); 541 val |= DBTYPE(1);
542 wmb(); 542 wmb();
543 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, 543 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 53cbfed21d0b..61362450d05b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -39,21 +39,28 @@
39#include "../cxgb4/t4fw_api.h" 39#include "../cxgb4/t4fw_api.h"
40 40
41#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) 41#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
42#define CHELSIO_CHIP_VERSION(code) ((code) >> 4) 42#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
43#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) 43#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
44 44
45/* All T4 and later chips have their PCI-E Device IDs encoded as 0xVFPP where:
46 *
47 * V = "4" for T4; "5" for T5, etc. or
48 * = "a" for T4 FPGA; "b" for T4 FPGA, etc.
49 * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs
50 * PP = adapter product designation
51 */
45#define CHELSIO_T4 0x4 52#define CHELSIO_T4 0x4
46#define CHELSIO_T5 0x5 53#define CHELSIO_T5 0x5
47 54
48enum chip_type { 55enum chip_type {
49 T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0), 56 T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
50 T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), 57 T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
51 T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
52 T4_FIRST_REV = T4_A1, 58 T4_FIRST_REV = T4_A1,
53 T4_LAST_REV = T4_A3, 59 T4_LAST_REV = T4_A2,
54 60
55 T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), 61 T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
56 T5_FIRST_REV = T5_A1, 62 T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
63 T5_FIRST_REV = T5_A0,
57 T5_LAST_REV = T5_A1, 64 T5_LAST_REV = T5_A1,
58}; 65};
59 66
@@ -203,6 +210,7 @@ struct adapter_params {
203 struct vpd_params vpd; /* Vital Product Data */ 210 struct vpd_params vpd; /* Vital Product Data */
204 struct rss_params rss; /* Receive Side Scaling */ 211 struct rss_params rss; /* Receive Side Scaling */
205 struct vf_resources vfres; /* Virtual Function Resource limits */ 212 struct vf_resources vfres; /* Virtual Function Resource limits */
213 enum chip_type chip; /* chip code */
206 u8 nports; /* # of Ethernet "ports" */ 214 u8 nports; /* # of Ethernet "ports" */
207}; 215};
208 216
@@ -253,7 +261,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
253 261
254static inline int is_t4(enum chip_type chip) 262static inline int is_t4(enum chip_type chip)
255{ 263{
256 return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV); 264 return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4;
257} 265}
258 266
259int t4vf_wait_dev_ready(struct adapter *); 267int t4vf_wait_dev_ready(struct adapter *);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 9f96dc3bb112..d958c44341b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -1027,7 +1027,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
1027 unsigned nfilters = 0; 1027 unsigned nfilters = 0;
1028 unsigned int rem = naddr; 1028 unsigned int rem = naddr;
1029 struct fw_vi_mac_cmd cmd, rpl; 1029 struct fw_vi_mac_cmd cmd, rpl;
1030 unsigned int max_naddr = is_t4(adapter->chip) ? 1030 unsigned int max_naddr = is_t4(adapter->params.chip) ?
1031 NUM_MPS_CLS_SRAM_L_INSTANCES : 1031 NUM_MPS_CLS_SRAM_L_INSTANCES :
1032 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 1032 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
1033 1033
@@ -1121,7 +1121,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
1121 struct fw_vi_mac_exact *p = &cmd.u.exact[0]; 1121 struct fw_vi_mac_exact *p = &cmd.u.exact[0];
1122 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1122 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1123 u.exact[1]), 16); 1123 u.exact[1]), 16);
1124 unsigned int max_naddr = is_t4(adapter->chip) ? 1124 unsigned int max_naddr = is_t4(adapter->params.chip) ?
1125 NUM_MPS_CLS_SRAM_L_INSTANCES : 1125 NUM_MPS_CLS_SRAM_L_INSTANCES :
1126 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 1126 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
1127 1127
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index f4825db5d179..4ccaf9af6fc9 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -104,6 +104,7 @@ static inline char *nic_name(struct pci_dev *pdev)
104#define BE3_MAX_RSS_QS 16 104#define BE3_MAX_RSS_QS 16
105#define BE3_MAX_TX_QS 16 105#define BE3_MAX_TX_QS 16
106#define BE3_MAX_EVT_QS 16 106#define BE3_MAX_EVT_QS 16
107#define BE3_SRIOV_MAX_EVT_QS 8
107 108
108#define MAX_RX_QS 32 109#define MAX_RX_QS 32
109#define MAX_EVT_QS 32 110#define MAX_EVT_QS 32
@@ -480,7 +481,7 @@ struct be_adapter {
480 struct list_head entry; 481 struct list_head entry;
481 482
482 u32 flash_status; 483 u32 flash_status;
483 struct completion flash_compl; 484 struct completion et_cmd_compl;
484 485
485 struct be_resources res; /* resources available for the func */ 486 struct be_resources res; /* resources available for the func */
486 u16 num_vfs; /* Number of VFs provisioned by PF */ 487 u16 num_vfs; /* Number of VFs provisioned by PF */
@@ -503,6 +504,7 @@ struct be_adapter {
503}; 504};
504 505
505#define be_physfn(adapter) (!adapter->virtfn) 506#define be_physfn(adapter) (!adapter->virtfn)
507#define be_virtfn(adapter) (adapter->virtfn)
506#define sriov_enabled(adapter) (adapter->num_vfs > 0) 508#define sriov_enabled(adapter) (adapter->num_vfs > 0)
507#define sriov_want(adapter) (be_physfn(adapter) && \ 509#define sriov_want(adapter) (be_physfn(adapter) && \
508 (num_vfs || pci_num_vf(adapter->pdev))) 510 (num_vfs || pci_num_vf(adapter->pdev)))
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index dbcd5262c016..94c35c8d799d 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -141,11 +141,17 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
141 subsystem = resp_hdr->subsystem; 141 subsystem = resp_hdr->subsystem;
142 } 142 }
143 143
144 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
145 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
146 complete(&adapter->et_cmd_compl);
147 return 0;
148 }
149
144 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || 150 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
145 (opcode == OPCODE_COMMON_WRITE_OBJECT)) && 151 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
146 (subsystem == CMD_SUBSYSTEM_COMMON)) { 152 (subsystem == CMD_SUBSYSTEM_COMMON)) {
147 adapter->flash_status = compl_status; 153 adapter->flash_status = compl_status;
148 complete(&adapter->flash_compl); 154 complete(&adapter->et_cmd_compl);
149 } 155 }
150 156
151 if (compl_status == MCC_STATUS_SUCCESS) { 157 if (compl_status == MCC_STATUS_SUCCESS) {
@@ -1032,6 +1038,13 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1032 } else { 1038 } else {
1033 req->hdr.version = 2; 1039 req->hdr.version = 2;
1034 req->page_size = 1; /* 1 for 4K */ 1040 req->page_size = 1; /* 1 for 4K */
1041
1042 /* coalesce-wm field in this cmd is not relevant to Lancer.
1043 * Lancer uses COMMON_MODIFY_CQ to set this field
1044 */
1045 if (!lancer_chip(adapter))
1046 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1047 ctxt, coalesce_wm);
1035 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, 1048 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1036 no_delay); 1049 no_delay);
1037 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, 1050 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
@@ -2010,6 +2023,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2010 0x3ea83c02, 0x4a110304}; 2023 0x3ea83c02, 0x4a110304};
2011 int status; 2024 int status;
2012 2025
2026 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2027 return 0;
2028
2013 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2029 if (mutex_lock_interruptible(&adapter->mbox_lock))
2014 return -1; 2030 return -1;
2015 2031
@@ -2153,7 +2169,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2153 be_mcc_notify(adapter); 2169 be_mcc_notify(adapter);
2154 spin_unlock_bh(&adapter->mcc_lock); 2170 spin_unlock_bh(&adapter->mcc_lock);
2155 2171
2156 if (!wait_for_completion_timeout(&adapter->flash_compl, 2172 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2157 msecs_to_jiffies(60000))) 2173 msecs_to_jiffies(60000)))
2158 status = -1; 2174 status = -1;
2159 else 2175 else
@@ -2248,8 +2264,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2248 be_mcc_notify(adapter); 2264 be_mcc_notify(adapter);
2249 spin_unlock_bh(&adapter->mcc_lock); 2265 spin_unlock_bh(&adapter->mcc_lock);
2250 2266
2251 if (!wait_for_completion_timeout(&adapter->flash_compl, 2267 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2252 msecs_to_jiffies(40000))) 2268 msecs_to_jiffies(40000)))
2253 status = -1; 2269 status = -1;
2254 else 2270 else
2255 status = adapter->flash_status; 2271 status = adapter->flash_status;
@@ -2360,6 +2376,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2360{ 2376{
2361 struct be_mcc_wrb *wrb; 2377 struct be_mcc_wrb *wrb;
2362 struct be_cmd_req_loopback_test *req; 2378 struct be_cmd_req_loopback_test *req;
2379 struct be_cmd_resp_loopback_test *resp;
2363 int status; 2380 int status;
2364 2381
2365 spin_lock_bh(&adapter->mcc_lock); 2382 spin_lock_bh(&adapter->mcc_lock);
@@ -2374,8 +2391,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2374 2391
2375 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2392 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2376 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); 2393 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2377 req->hdr.timeout = cpu_to_le32(4);
2378 2394
2395 req->hdr.timeout = cpu_to_le32(15);
2379 req->pattern = cpu_to_le64(pattern); 2396 req->pattern = cpu_to_le64(pattern);
2380 req->src_port = cpu_to_le32(port_num); 2397 req->src_port = cpu_to_le32(port_num);
2381 req->dest_port = cpu_to_le32(port_num); 2398 req->dest_port = cpu_to_le32(port_num);
@@ -2383,12 +2400,15 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2383 req->num_pkts = cpu_to_le32(num_pkts); 2400 req->num_pkts = cpu_to_le32(num_pkts);
2384 req->loopback_type = cpu_to_le32(loopback_type); 2401 req->loopback_type = cpu_to_le32(loopback_type);
2385 2402
2386 status = be_mcc_notify_wait(adapter); 2403 be_mcc_notify(adapter);
2387 if (!status) { 2404
2388 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb); 2405 spin_unlock_bh(&adapter->mcc_lock);
2389 status = le32_to_cpu(resp->status); 2406
2390 } 2407 wait_for_completion(&adapter->et_cmd_compl);
2408 resp = embedded_payload(wrb);
2409 status = le32_to_cpu(resp->status);
2391 2410
2411 return status;
2392err: 2412err:
2393 spin_unlock_bh(&adapter->mcc_lock); 2413 spin_unlock_bh(&adapter->mcc_lock);
2394 return status; 2414 return status;
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 3e2162121601..dc88782185f2 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -64,6 +64,9 @@
64#define SLIPORT_ERROR_NO_RESOURCE1 0x2 64#define SLIPORT_ERROR_NO_RESOURCE1 0x2
65#define SLIPORT_ERROR_NO_RESOURCE2 0x9 65#define SLIPORT_ERROR_NO_RESOURCE2 0x9
66 66
67#define SLIPORT_ERROR_FW_RESET1 0x2
68#define SLIPORT_ERROR_FW_RESET2 0x0
69
67/********* Memory BAR register ************/ 70/********* Memory BAR register ************/
68#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc 71#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
69/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt 72/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index abde97471636..bf40fdaecfa3 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2464,8 +2464,16 @@ void be_detect_error(struct be_adapter *adapter)
2464 */ 2464 */
2465 if (sliport_status & SLIPORT_STATUS_ERR_MASK) { 2465 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2466 adapter->hw_error = true; 2466 adapter->hw_error = true;
2467 dev_err(&adapter->pdev->dev, 2467 /* Do not log error messages if its a FW reset */
2468 "Error detected in the card\n"); 2468 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2469 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2470 dev_info(&adapter->pdev->dev,
2471 "Firmware update in progress\n");
2472 return;
2473 } else {
2474 dev_err(&adapter->pdev->dev,
2475 "Error detected in the card\n");
2476 }
2469 } 2477 }
2470 2478
2471 if (sliport_status & SLIPORT_STATUS_ERR_MASK) { 2479 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
@@ -2658,8 +2666,8 @@ static int be_close(struct net_device *netdev)
2658 2666
2659 be_roce_dev_close(adapter); 2667 be_roce_dev_close(adapter);
2660 2668
2661 for_all_evt_queues(adapter, eqo, i) { 2669 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2662 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { 2670 for_all_evt_queues(adapter, eqo, i) {
2663 napi_disable(&eqo->napi); 2671 napi_disable(&eqo->napi);
2664 be_disable_busy_poll(eqo); 2672 be_disable_busy_poll(eqo);
2665 } 2673 }
@@ -2736,13 +2744,16 @@ static int be_rx_qs_create(struct be_adapter *adapter)
2736 if (!BEx_chip(adapter)) 2744 if (!BEx_chip(adapter))
2737 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 | 2745 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2738 RSS_ENABLE_UDP_IPV6; 2746 RSS_ENABLE_UDP_IPV6;
2747 } else {
2748 /* Disable RSS, if only default RX Q is created */
2749 adapter->rss_flags = RSS_ENABLE_NONE;
2750 }
2739 2751
2740 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, 2752 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2741 128); 2753 128);
2742 if (rc) { 2754 if (rc) {
2743 adapter->rss_flags = 0; 2755 adapter->rss_flags = RSS_ENABLE_NONE;
2744 return rc; 2756 return rc;
2745 }
2746 } 2757 }
2747 2758
2748 /* First time posting */ 2759 /* First time posting */
@@ -2932,28 +2943,35 @@ static void be_cancel_worker(struct be_adapter *adapter)
2932 } 2943 }
2933} 2944}
2934 2945
2935static int be_clear(struct be_adapter *adapter) 2946static void be_mac_clear(struct be_adapter *adapter)
2936{ 2947{
2937 int i; 2948 int i;
2938 2949
2950 if (adapter->pmac_id) {
2951 for (i = 0; i < (adapter->uc_macs + 1); i++)
2952 be_cmd_pmac_del(adapter, adapter->if_handle,
2953 adapter->pmac_id[i], 0);
2954 adapter->uc_macs = 0;
2955
2956 kfree(adapter->pmac_id);
2957 adapter->pmac_id = NULL;
2958 }
2959}
2960
2961static int be_clear(struct be_adapter *adapter)
2962{
2939 be_cancel_worker(adapter); 2963 be_cancel_worker(adapter);
2940 2964
2941 if (sriov_enabled(adapter)) 2965 if (sriov_enabled(adapter))
2942 be_vf_clear(adapter); 2966 be_vf_clear(adapter);
2943 2967
2944 /* delete the primary mac along with the uc-mac list */ 2968 /* delete the primary mac along with the uc-mac list */
2945 for (i = 0; i < (adapter->uc_macs + 1); i++) 2969 be_mac_clear(adapter);
2946 be_cmd_pmac_del(adapter, adapter->if_handle,
2947 adapter->pmac_id[i], 0);
2948 adapter->uc_macs = 0;
2949 2970
2950 be_cmd_if_destroy(adapter, adapter->if_handle, 0); 2971 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2951 2972
2952 be_clear_queues(adapter); 2973 be_clear_queues(adapter);
2953 2974
2954 kfree(adapter->pmac_id);
2955 adapter->pmac_id = NULL;
2956
2957 be_msix_disable(adapter); 2975 be_msix_disable(adapter);
2958 return 0; 2976 return 0;
2959} 2977}
@@ -3109,11 +3127,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
3109{ 3127{
3110 struct pci_dev *pdev = adapter->pdev; 3128 struct pci_dev *pdev = adapter->pdev;
3111 bool use_sriov = false; 3129 bool use_sriov = false;
3130 int max_vfs;
3112 3131
3113 if (BE3_chip(adapter) && sriov_want(adapter)) { 3132 max_vfs = pci_sriov_get_totalvfs(pdev);
3114 int max_vfs;
3115 3133
3116 max_vfs = pci_sriov_get_totalvfs(pdev); 3134 if (BE3_chip(adapter) && sriov_want(adapter)) {
3117 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; 3135 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3118 use_sriov = res->max_vfs; 3136 use_sriov = res->max_vfs;
3119 } 3137 }
@@ -3144,7 +3162,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
3144 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; 3162 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3145 res->max_rx_qs = res->max_rss_qs + 1; 3163 res->max_rx_qs = res->max_rss_qs + 1;
3146 3164
3147 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1; 3165 if (be_physfn(adapter))
3166 res->max_evt_qs = (max_vfs > 0) ?
3167 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3168 else
3169 res->max_evt_qs = 1;
3148 3170
3149 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT; 3171 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3150 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) 3172 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
@@ -3253,12 +3275,10 @@ static int be_mac_setup(struct be_adapter *adapter)
3253 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); 3275 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3254 } 3276 }
3255 3277
3256 /* On BE3 VFs this cmd may fail due to lack of privilege. 3278 /* For BE3-R VFs, the PF programs the initial MAC address */
3257 * Ignore the failure as in this case pmac_id is fetched 3279 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3258 * in the IFACE_CREATE cmd. 3280 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3259 */ 3281 &adapter->pmac_id[0], 0);
3260 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3261 &adapter->pmac_id[0], 0);
3262 return 0; 3282 return 0;
3263} 3283}
3264 3284
@@ -3814,6 +3834,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
3814 } 3834 }
3815 3835
3816 if (change_status == LANCER_FW_RESET_NEEDED) { 3836 if (change_status == LANCER_FW_RESET_NEEDED) {
3837 dev_info(&adapter->pdev->dev,
3838 "Resetting adapter to activate new FW\n");
3817 status = lancer_physdev_ctrl(adapter, 3839 status = lancer_physdev_ctrl(adapter,
3818 PHYSDEV_CONTROL_FW_RESET_MASK); 3840 PHYSDEV_CONTROL_FW_RESET_MASK);
3819 if (status) { 3841 if (status) {
@@ -4190,7 +4212,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
4190 spin_lock_init(&adapter->mcc_lock); 4212 spin_lock_init(&adapter->mcc_lock);
4191 spin_lock_init(&adapter->mcc_cq_lock); 4213 spin_lock_init(&adapter->mcc_cq_lock);
4192 4214
4193 init_completion(&adapter->flash_compl); 4215 init_completion(&adapter->et_cmd_compl);
4194 pci_save_state(adapter->pdev); 4216 pci_save_state(adapter->pdev);
4195 return 0; 4217 return 0;
4196 4218
@@ -4365,13 +4387,13 @@ static int lancer_recover_func(struct be_adapter *adapter)
4365 goto err; 4387 goto err;
4366 } 4388 }
4367 4389
4368 dev_err(dev, "Error recovery successful\n"); 4390 dev_err(dev, "Adapter recovery successful\n");
4369 return 0; 4391 return 0;
4370err: 4392err:
4371 if (status == -EAGAIN) 4393 if (status == -EAGAIN)
4372 dev_err(dev, "Waiting for resource provisioning\n"); 4394 dev_err(dev, "Waiting for resource provisioning\n");
4373 else 4395 else
4374 dev_err(dev, "Error recovery failed\n"); 4396 dev_err(dev, "Adapter recovery failed\n");
4375 4397
4376 return status; 4398 return status;
4377} 4399}
@@ -4599,6 +4621,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4599 if (adapter->wol) 4621 if (adapter->wol)
4600 be_setup_wol(adapter, true); 4622 be_setup_wol(adapter, true);
4601 4623
4624 be_intr_set(adapter, false);
4602 cancel_delayed_work_sync(&adapter->func_recovery_work); 4625 cancel_delayed_work_sync(&adapter->func_recovery_work);
4603 4626
4604 netif_device_detach(netdev); 4627 netif_device_detach(netdev);
@@ -4634,6 +4657,7 @@ static int be_resume(struct pci_dev *pdev)
4634 if (status) 4657 if (status)
4635 return status; 4658 return status;
4636 4659
4660 be_intr_set(adapter, true);
4637 /* tell fw we're ready to fire cmds */ 4661 /* tell fw we're ready to fire cmds */
4638 status = be_cmd_fw_init(adapter); 4662 status = be_cmd_fw_init(adapter);
4639 if (status) 4663 if (status)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 4cbebf3d80eb..50bb71c663e2 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -98,10 +98,6 @@ static void set_multicast_list(struct net_device *ndev);
98 * detected as not set during a prior frame transmission, then the 98 * detected as not set during a prior frame transmission, then the
99 * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs 99 * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
100 * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in 100 * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
101 * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
102 * detected as not set during a prior frame transmission, then the
103 * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
104 * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
105 * frames not being transmitted until there is a 0-to-1 transition on 101 * frames not being transmitted until there is a 0-to-1 transition on
106 * ENET_TDAR[TDAR]. 102 * ENET_TDAR[TDAR].
107 */ 103 */
@@ -385,7 +381,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
385 * data. 381 * data.
386 */ 382 */
387 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, 383 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
388 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 384 skb->len, DMA_TO_DEVICE);
389 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { 385 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
390 bdp->cbd_bufaddr = 0; 386 bdp->cbd_bufaddr = 0;
391 fep->tx_skbuff[index] = NULL; 387 fep->tx_skbuff[index] = NULL;
@@ -432,6 +428,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
432 /* If this was the last BD in the ring, start at the beginning again. */ 428 /* If this was the last BD in the ring, start at the beginning again. */
433 bdp = fec_enet_get_nextdesc(bdp, fep); 429 bdp = fec_enet_get_nextdesc(bdp, fep);
434 430
431 skb_tx_timestamp(skb);
432
435 fep->cur_tx = bdp; 433 fep->cur_tx = bdp;
436 434
437 if (fep->cur_tx == fep->dirty_tx) 435 if (fep->cur_tx == fep->dirty_tx)
@@ -440,8 +438,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
440 /* Trigger transmission start */ 438 /* Trigger transmission start */
441 writel(0, fep->hwp + FEC_X_DES_ACTIVE); 439 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
442 440
443 skb_tx_timestamp(skb);
444
445 return NETDEV_TX_OK; 441 return NETDEV_TX_OK;
446} 442}
447 443
@@ -779,11 +775,10 @@ fec_enet_tx(struct net_device *ndev)
779 else 775 else
780 index = bdp - fep->tx_bd_base; 776 index = bdp - fep->tx_bd_base;
781 777
782 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
783 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
784 bdp->cbd_bufaddr = 0;
785
786 skb = fep->tx_skbuff[index]; 778 skb = fep->tx_skbuff[index];
779 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len,
780 DMA_TO_DEVICE);
781 bdp->cbd_bufaddr = 0;
787 782
788 /* Check for errors. */ 783 /* Check for errors. */
789 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 784 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 2d1c6bdd3618..7628e0fd8455 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3033,7 +3033,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3033 3033
3034 dev->hw_features = NETIF_F_SG | NETIF_F_TSO | 3034 dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
3035 NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX; 3035 NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
3036 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO | 3036 dev->features = NETIF_F_SG | NETIF_F_TSO |
3037 NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | 3037 NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
3038 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 3038 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3039 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM; 3039 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 58c147271a36..f9313b36c887 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -83,6 +83,11 @@ struct e1000_adapter;
83 83
84#define E1000_MAX_INTR 10 84#define E1000_MAX_INTR 10
85 85
86/*
87 * Count for polling __E1000_RESET condition every 10-20msec.
88 */
89#define E1000_CHECK_RESET_COUNT 50
90
86/* TX/RX descriptor defines */ 91/* TX/RX descriptor defines */
87#define E1000_DEFAULT_TXD 256 92#define E1000_DEFAULT_TXD 256
88#define E1000_MAX_TXD 256 93#define E1000_MAX_TXD 256
@@ -312,8 +317,6 @@ struct e1000_adapter {
312 struct delayed_work watchdog_task; 317 struct delayed_work watchdog_task;
313 struct delayed_work fifo_stall_task; 318 struct delayed_work fifo_stall_task;
314 struct delayed_work phy_info_task; 319 struct delayed_work phy_info_task;
315
316 struct mutex mutex;
317}; 320};
318 321
319enum e1000_state_t { 322enum e1000_state_t {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index e38622825fa7..46e6544ed1b7 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -494,13 +494,20 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter)
494{ 494{
495 set_bit(__E1000_DOWN, &adapter->flags); 495 set_bit(__E1000_DOWN, &adapter->flags);
496 496
497 /* Only kill reset task if adapter is not resetting */
498 if (!test_bit(__E1000_RESETTING, &adapter->flags))
499 cancel_work_sync(&adapter->reset_task);
500
501 cancel_delayed_work_sync(&adapter->watchdog_task); 497 cancel_delayed_work_sync(&adapter->watchdog_task);
498
499 /*
500 * Since the watchdog task can reschedule other tasks, we should cancel
501 * it first, otherwise we can run into the situation when a work is
502 * still running after the adapter has been turned down.
503 */
504
502 cancel_delayed_work_sync(&adapter->phy_info_task); 505 cancel_delayed_work_sync(&adapter->phy_info_task);
503 cancel_delayed_work_sync(&adapter->fifo_stall_task); 506 cancel_delayed_work_sync(&adapter->fifo_stall_task);
507
508 /* Only kill reset task if adapter is not resetting */
509 if (!test_bit(__E1000_RESETTING, &adapter->flags))
510 cancel_work_sync(&adapter->reset_task);
504} 511}
505 512
506void e1000_down(struct e1000_adapter *adapter) 513void e1000_down(struct e1000_adapter *adapter)
@@ -544,21 +551,8 @@ void e1000_down(struct e1000_adapter *adapter)
544 e1000_clean_all_rx_rings(adapter); 551 e1000_clean_all_rx_rings(adapter);
545} 552}
546 553
547static void e1000_reinit_safe(struct e1000_adapter *adapter)
548{
549 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
550 msleep(1);
551 mutex_lock(&adapter->mutex);
552 e1000_down(adapter);
553 e1000_up(adapter);
554 mutex_unlock(&adapter->mutex);
555 clear_bit(__E1000_RESETTING, &adapter->flags);
556}
557
558void e1000_reinit_locked(struct e1000_adapter *adapter) 554void e1000_reinit_locked(struct e1000_adapter *adapter)
559{ 555{
560 /* if rtnl_lock is not held the call path is bogus */
561 ASSERT_RTNL();
562 WARN_ON(in_interrupt()); 556 WARN_ON(in_interrupt());
563 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 557 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
564 msleep(1); 558 msleep(1);
@@ -1316,7 +1310,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
1316 e1000_irq_disable(adapter); 1310 e1000_irq_disable(adapter);
1317 1311
1318 spin_lock_init(&adapter->stats_lock); 1312 spin_lock_init(&adapter->stats_lock);
1319 mutex_init(&adapter->mutex);
1320 1313
1321 set_bit(__E1000_DOWN, &adapter->flags); 1314 set_bit(__E1000_DOWN, &adapter->flags);
1322 1315
@@ -1440,6 +1433,10 @@ static int e1000_close(struct net_device *netdev)
1440{ 1433{
1441 struct e1000_adapter *adapter = netdev_priv(netdev); 1434 struct e1000_adapter *adapter = netdev_priv(netdev);
1442 struct e1000_hw *hw = &adapter->hw; 1435 struct e1000_hw *hw = &adapter->hw;
1436 int count = E1000_CHECK_RESET_COUNT;
1437
1438 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1439 usleep_range(10000, 20000);
1443 1440
1444 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 1441 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1445 e1000_down(adapter); 1442 e1000_down(adapter);
@@ -2325,11 +2322,8 @@ static void e1000_update_phy_info_task(struct work_struct *work)
2325 struct e1000_adapter *adapter = container_of(work, 2322 struct e1000_adapter *adapter = container_of(work,
2326 struct e1000_adapter, 2323 struct e1000_adapter,
2327 phy_info_task.work); 2324 phy_info_task.work);
2328 if (test_bit(__E1000_DOWN, &adapter->flags)) 2325
2329 return;
2330 mutex_lock(&adapter->mutex);
2331 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 2326 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2332 mutex_unlock(&adapter->mutex);
2333} 2327}
2334 2328
2335/** 2329/**
@@ -2345,9 +2339,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2345 struct net_device *netdev = adapter->netdev; 2339 struct net_device *netdev = adapter->netdev;
2346 u32 tctl; 2340 u32 tctl;
2347 2341
2348 if (test_bit(__E1000_DOWN, &adapter->flags))
2349 return;
2350 mutex_lock(&adapter->mutex);
2351 if (atomic_read(&adapter->tx_fifo_stall)) { 2342 if (atomic_read(&adapter->tx_fifo_stall)) {
2352 if ((er32(TDT) == er32(TDH)) && 2343 if ((er32(TDT) == er32(TDH)) &&
2353 (er32(TDFT) == er32(TDFH)) && 2344 (er32(TDFT) == er32(TDFH)) &&
@@ -2368,7 +2359,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2368 schedule_delayed_work(&adapter->fifo_stall_task, 1); 2359 schedule_delayed_work(&adapter->fifo_stall_task, 1);
2369 } 2360 }
2370 } 2361 }
2371 mutex_unlock(&adapter->mutex);
2372} 2362}
2373 2363
2374bool e1000_has_link(struct e1000_adapter *adapter) 2364bool e1000_has_link(struct e1000_adapter *adapter)
@@ -2422,10 +2412,6 @@ static void e1000_watchdog(struct work_struct *work)
2422 struct e1000_tx_ring *txdr = adapter->tx_ring; 2412 struct e1000_tx_ring *txdr = adapter->tx_ring;
2423 u32 link, tctl; 2413 u32 link, tctl;
2424 2414
2425 if (test_bit(__E1000_DOWN, &adapter->flags))
2426 return;
2427
2428 mutex_lock(&adapter->mutex);
2429 link = e1000_has_link(adapter); 2415 link = e1000_has_link(adapter);
2430 if ((netif_carrier_ok(netdev)) && link) 2416 if ((netif_carrier_ok(netdev)) && link)
2431 goto link_up; 2417 goto link_up;
@@ -2516,7 +2502,7 @@ link_up:
2516 adapter->tx_timeout_count++; 2502 adapter->tx_timeout_count++;
2517 schedule_work(&adapter->reset_task); 2503 schedule_work(&adapter->reset_task);
2518 /* exit immediately since reset is imminent */ 2504 /* exit immediately since reset is imminent */
2519 goto unlock; 2505 return;
2520 } 2506 }
2521 } 2507 }
2522 2508
@@ -2544,9 +2530,6 @@ link_up:
2544 /* Reschedule the task */ 2530 /* Reschedule the task */
2545 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2531 if (!test_bit(__E1000_DOWN, &adapter->flags))
2546 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); 2532 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2547
2548unlock:
2549 mutex_unlock(&adapter->mutex);
2550} 2533}
2551 2534
2552enum latency_range { 2535enum latency_range {
@@ -3495,10 +3478,8 @@ static void e1000_reset_task(struct work_struct *work)
3495 struct e1000_adapter *adapter = 3478 struct e1000_adapter *adapter =
3496 container_of(work, struct e1000_adapter, reset_task); 3479 container_of(work, struct e1000_adapter, reset_task);
3497 3480
3498 if (test_bit(__E1000_DOWN, &adapter->flags))
3499 return;
3500 e_err(drv, "Reset adapter\n"); 3481 e_err(drv, "Reset adapter\n");
3501 e1000_reinit_safe(adapter); 3482 e1000_reinit_locked(adapter);
3502} 3483}
3503 3484
3504/** 3485/**
@@ -4963,6 +4944,11 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4963 netif_device_detach(netdev); 4944 netif_device_detach(netdev);
4964 4945
4965 if (netif_running(netdev)) { 4946 if (netif_running(netdev)) {
4947 int count = E1000_CHECK_RESET_COUNT;
4948
4949 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
4950 usleep_range(10000, 20000);
4951
4966 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 4952 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4967 e1000_down(adapter); 4953 e1000_down(adapter);
4968 } 4954 }
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 895450e9bb3c..ff2d806eaef7 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -718,8 +718,11 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
718 e1000_release_phy_80003es2lan(hw); 718 e1000_release_phy_80003es2lan(hw);
719 719
720 /* Disable IBIST slave mode (far-end loopback) */ 720 /* Disable IBIST slave mode (far-end loopback) */
721 e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, 721 ret_val =
722 &kum_reg_data); 722 e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
723 &kum_reg_data);
724 if (ret_val)
725 return ret_val;
723 kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; 726 kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
724 e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, 727 e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
725 kum_reg_data); 728 kum_reg_data);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 8d3945ab7334..c30d41d6e426 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6174,7 +6174,7 @@ static int __e1000_resume(struct pci_dev *pdev)
6174 return 0; 6174 return 0;
6175} 6175}
6176 6176
6177#ifdef CONFIG_PM_SLEEP 6177#ifdef CONFIG_PM
6178static int e1000_suspend(struct device *dev) 6178static int e1000_suspend(struct device *dev)
6179{ 6179{
6180 struct pci_dev *pdev = to_pci_dev(dev); 6180 struct pci_dev *pdev = to_pci_dev(dev);
@@ -6193,7 +6193,7 @@ static int e1000_resume(struct device *dev)
6193 6193
6194 return __e1000_resume(pdev); 6194 return __e1000_resume(pdev);
6195} 6195}
6196#endif /* CONFIG_PM_SLEEP */ 6196#endif /* CONFIG_PM */
6197 6197
6198#ifdef CONFIG_PM_RUNTIME 6198#ifdef CONFIG_PM_RUNTIME
6199static int e1000_runtime_suspend(struct device *dev) 6199static int e1000_runtime_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index da2be59505c0..20e71f4ca426 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1757,19 +1757,23 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
1757 * it across the board. 1757 * it across the board.
1758 */ 1758 */
1759 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); 1759 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
1760 if (ret_val) 1760 if (ret_val) {
1761 /* If the first read fails, another entity may have 1761 /* If the first read fails, another entity may have
1762 * ownership of the resources, wait and try again to 1762 * ownership of the resources, wait and try again to
1763 * see if they have relinquished the resources yet. 1763 * see if they have relinquished the resources yet.
1764 */ 1764 */
1765 udelay(usec_interval); 1765 if (usec_interval >= 1000)
1766 msleep(usec_interval / 1000);
1767 else
1768 udelay(usec_interval);
1769 }
1766 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); 1770 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
1767 if (ret_val) 1771 if (ret_val)
1768 break; 1772 break;
1769 if (phy_status & BMSR_LSTATUS) 1773 if (phy_status & BMSR_LSTATUS)
1770 break; 1774 break;
1771 if (usec_interval >= 1000) 1775 if (usec_interval >= 1000)
1772 mdelay(usec_interval / 1000); 1776 msleep(usec_interval / 1000);
1773 else 1777 else
1774 udelay(usec_interval); 1778 udelay(usec_interval);
1775 } 1779 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index be15938ba213..12b0932204ba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -354,6 +354,9 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
354 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); 354 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
355 int i; 355 int i;
356 356
357 if (!vsi->tx_rings)
358 return stats;
359
357 rcu_read_lock(); 360 rcu_read_lock();
358 for (i = 0; i < vsi->num_queue_pairs; i++) { 361 for (i = 0; i < vsi->num_queue_pairs; i++) {
359 struct i40e_ring *tx_ring, *rx_ring; 362 struct i40e_ring *tx_ring, *rx_ring;
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index c4c4fe332c7e..ad2b74d95138 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1728,7 +1728,10 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
1728 * ownership of the resources, wait and try again to 1728 * ownership of the resources, wait and try again to
1729 * see if they have relinquished the resources yet. 1729 * see if they have relinquished the resources yet.
1730 */ 1730 */
1731 udelay(usec_interval); 1731 if (usec_interval >= 1000)
1732 mdelay(usec_interval/1000);
1733 else
1734 udelay(usec_interval);
1732 } 1735 }
1733 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); 1736 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1734 if (ret_val) 1737 if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index b0f3666b1d7f..c3143da497c8 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2062,14 +2062,15 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2062{ 2062{
2063 struct igb_adapter *adapter = netdev_priv(netdev); 2063 struct igb_adapter *adapter = netdev_priv(netdev);
2064 2064
2065 wol->supported = WAKE_UCAST | WAKE_MCAST |
2066 WAKE_BCAST | WAKE_MAGIC |
2067 WAKE_PHY;
2068 wol->wolopts = 0; 2065 wol->wolopts = 0;
2069 2066
2070 if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) 2067 if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
2071 return; 2068 return;
2072 2069
2070 wol->supported = WAKE_UCAST | WAKE_MCAST |
2071 WAKE_BCAST | WAKE_MAGIC |
2072 WAKE_PHY;
2073
2073 /* apply any specific unsupported masks here */ 2074 /* apply any specific unsupported masks here */
2074 switch (adapter->hw.device_id) { 2075 switch (adapter->hw.device_id) {
2075 default: 2076 default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0c55079ebee3..5bcc870f8367 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4251,8 +4251,8 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
4251 rx_ring->l2_accel_priv = NULL; 4251 rx_ring->l2_accel_priv = NULL;
4252} 4252}
4253 4253
4254int ixgbe_fwd_ring_down(struct net_device *vdev, 4254static int ixgbe_fwd_ring_down(struct net_device *vdev,
4255 struct ixgbe_fwd_adapter *accel) 4255 struct ixgbe_fwd_adapter *accel)
4256{ 4256{
4257 struct ixgbe_adapter *adapter = accel->real_adapter; 4257 struct ixgbe_adapter *adapter = accel->real_adapter;
4258 unsigned int rxbase = accel->rx_base_queue; 4258 unsigned int rxbase = accel->rx_base_queue;
@@ -6827,12 +6827,20 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6827 return __ixgbe_maybe_stop_tx(tx_ring, size); 6827 return __ixgbe_maybe_stop_tx(tx_ring, size);
6828} 6828}
6829 6829
6830#ifdef IXGBE_FCOE 6830static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
6831static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) 6831 void *accel_priv)
6832{ 6832{
6833 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
6834#ifdef IXGBE_FCOE
6833 struct ixgbe_adapter *adapter; 6835 struct ixgbe_adapter *adapter;
6834 struct ixgbe_ring_feature *f; 6836 struct ixgbe_ring_feature *f;
6835 int txq; 6837 int txq;
6838#endif
6839
6840 if (fwd_adapter)
6841 return skb->queue_mapping + fwd_adapter->tx_base_queue;
6842
6843#ifdef IXGBE_FCOE
6836 6844
6837 /* 6845 /*
6838 * only execute the code below if protocol is FCoE 6846 * only execute the code below if protocol is FCoE
@@ -6858,9 +6866,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6858 txq -= f->indices; 6866 txq -= f->indices;
6859 6867
6860 return txq + f->offset; 6868 return txq + f->offset;
6869#else
6870 return __netdev_pick_tx(dev, skb);
6871#endif
6861} 6872}
6862 6873
6863#endif
6864netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, 6874netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6865 struct ixgbe_adapter *adapter, 6875 struct ixgbe_adapter *adapter,
6866 struct ixgbe_ring *tx_ring) 6876 struct ixgbe_ring *tx_ring)
@@ -7629,27 +7639,11 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
7629 kfree(fwd_adapter); 7639 kfree(fwd_adapter);
7630} 7640}
7631 7641
7632static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb,
7633 struct net_device *dev,
7634 void *priv)
7635{
7636 struct ixgbe_fwd_adapter *fwd_adapter = priv;
7637 unsigned int queue;
7638 struct ixgbe_ring *tx_ring;
7639
7640 queue = skb->queue_mapping + fwd_adapter->tx_base_queue;
7641 tx_ring = fwd_adapter->real_adapter->tx_ring[queue];
7642
7643 return __ixgbe_xmit_frame(skb, dev, tx_ring);
7644}
7645
7646static const struct net_device_ops ixgbe_netdev_ops = { 7642static const struct net_device_ops ixgbe_netdev_ops = {
7647 .ndo_open = ixgbe_open, 7643 .ndo_open = ixgbe_open,
7648 .ndo_stop = ixgbe_close, 7644 .ndo_stop = ixgbe_close,
7649 .ndo_start_xmit = ixgbe_xmit_frame, 7645 .ndo_start_xmit = ixgbe_xmit_frame,
7650#ifdef IXGBE_FCOE
7651 .ndo_select_queue = ixgbe_select_queue, 7646 .ndo_select_queue = ixgbe_select_queue,
7652#endif
7653 .ndo_set_rx_mode = ixgbe_set_rx_mode, 7647 .ndo_set_rx_mode = ixgbe_set_rx_mode,
7654 .ndo_validate_addr = eth_validate_addr, 7648 .ndo_validate_addr = eth_validate_addr,
7655 .ndo_set_mac_address = ixgbe_set_mac, 7649 .ndo_set_mac_address = ixgbe_set_mac,
@@ -7689,7 +7683,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7689 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, 7683 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
7690 .ndo_dfwd_add_station = ixgbe_fwd_add, 7684 .ndo_dfwd_add_station = ixgbe_fwd_add,
7691 .ndo_dfwd_del_station = ixgbe_fwd_del, 7685 .ndo_dfwd_del_station = ixgbe_fwd_del,
7692 .ndo_dfwd_start_xmit = ixgbe_fwd_xmit,
7693}; 7686};
7694 7687
7695/** 7688/**
@@ -7986,10 +7979,9 @@ skip_sriov:
7986 NETIF_F_TSO | 7979 NETIF_F_TSO |
7987 NETIF_F_TSO6 | 7980 NETIF_F_TSO6 |
7988 NETIF_F_RXHASH | 7981 NETIF_F_RXHASH |
7989 NETIF_F_RXCSUM | 7982 NETIF_F_RXCSUM;
7990 NETIF_F_HW_L2FW_DOFFLOAD;
7991 7983
7992 netdev->hw_features = netdev->features; 7984 netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD;
7993 7985
7994 switch (adapter->hw.mac.type) { 7986 switch (adapter->hw.mac.type) {
7995 case ixgbe_mac_82599EB: 7987 case ixgbe_mac_82599EB:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index e4c676006be9..39217e5ff7dc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -46,6 +46,7 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl);
46static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); 46static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
47static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); 47static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
48static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); 48static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
49static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
49 50
50/** 51/**
51 * ixgbe_identify_phy_generic - Get physical layer module 52 * ixgbe_identify_phy_generic - Get physical layer module
@@ -1164,7 +1165,7 @@ err_read_i2c_eeprom:
1164 * 1165 *
1165 * Searches for and identifies the QSFP module and assigns appropriate PHY type 1166 * Searches for and identifies the QSFP module and assigns appropriate PHY type
1166 **/ 1167 **/
1167s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) 1168static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
1168{ 1169{
1169 struct ixgbe_adapter *adapter = hw->back; 1170 struct ixgbe_adapter *adapter = hw->back;
1170 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 1171 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index aae900a256da..fffcbdd2bf0e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -145,7 +145,6 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
145s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); 145s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
146s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); 146s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
147s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); 147s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
148s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
149s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 148s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
150 u16 *list_offset, 149 u16 *list_offset,
151 u16 *data_offset); 150 u16 *data_offset);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index d6f0c0d8cf11..72084f70adbb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -291,7 +291,9 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
291{ 291{
292 struct ixgbe_adapter *adapter = pci_get_drvdata(dev); 292 struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
293 int err; 293 int err;
294#ifdef CONFIG_PCI_IOV
294 u32 current_flags = adapter->flags; 295 u32 current_flags = adapter->flags;
296#endif
295 297
296 err = ixgbe_disable_sriov(adapter); 298 err = ixgbe_disable_sriov(adapter);
297 299
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6a6c1f76d8e0..ec94a20d7099 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -619,7 +619,8 @@ ltq_etop_set_multicast_list(struct net_device *dev)
619} 619}
620 620
621static u16 621static u16
622ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) 622ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
623 void *accel_priv)
623{ 624{
624 /* we are currently only using the first queue */ 625 /* we are currently only using the first queue */
625 return 0; 626 return 0;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 7354960b583b..c4eeb69a5bee 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -92,6 +92,12 @@ static int orion_mdio_wait_ready(struct mii_bus *bus)
92 if (time_is_before_jiffies(end)) 92 if (time_is_before_jiffies(end))
93 ++timedout; 93 ++timedout;
94 } else { 94 } else {
95 /* wait_event_timeout does not guarantee a delay of at
96 * least one whole jiffie, so timeout must be no less
97 * than two.
98 */
99 if (timeout < 2)
100 timeout = 2;
95 wait_event_timeout(dev->smi_busy_wait, 101 wait_event_timeout(dev->smi_busy_wait,
96 orion_mdio_smi_is_done(dev), 102 orion_mdio_smi_is_done(dev),
97 timeout); 103 timeout);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b8e232b4ea2d..d5f0d72e5e33 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1378,7 +1378,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1378 1378
1379 dev_kfree_skb_any(skb); 1379 dev_kfree_skb_any(skb);
1380 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, 1380 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1381 rx_desc->data_size, DMA_FROM_DEVICE); 1381 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1382 } 1382 }
1383 1383
1384 if (rx_done) 1384 if (rx_done)
@@ -1424,7 +1424,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1424 } 1424 }
1425 1425
1426 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, 1426 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1427 rx_desc->data_size, DMA_FROM_DEVICE); 1427 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1428 1428
1429 rx_bytes = rx_desc->data_size - 1429 rx_bytes = rx_desc->data_size -
1430 (ETH_FCS_LEN + MVNETA_MH_SIZE); 1430 (ETH_FCS_LEN + MVNETA_MH_SIZE);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 40626690e8a8..c11d063473e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -140,7 +140,6 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
140{ 140{
141 struct mlx4_en_priv *priv = netdev_priv(dev); 141 struct mlx4_en_priv *priv = netdev_priv(dev);
142 struct mlx4_en_dev *mdev = priv->mdev; 142 struct mlx4_en_dev *mdev = priv->mdev;
143 struct mlx4_en_tx_ring *tx_ring;
144 int i, carrier_ok; 143 int i, carrier_ok;
145 144
146 memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); 145 memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
@@ -150,16 +149,10 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
150 carrier_ok = netif_carrier_ok(dev); 149 carrier_ok = netif_carrier_ok(dev);
151 150
152 netif_carrier_off(dev); 151 netif_carrier_off(dev);
153retry_tx:
154 /* Wait until all tx queues are empty. 152 /* Wait until all tx queues are empty.
155 * there should not be any additional incoming traffic 153 * there should not be any additional incoming traffic
156 * since we turned the carrier off */ 154 * since we turned the carrier off */
157 msleep(200); 155 msleep(200);
158 for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
159 tx_ring = priv->tx_ring[i];
160 if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
161 goto retry_tx;
162 }
163 156
164 if (priv->mdev->dev->caps.flags & 157 if (priv->mdev->dev->caps.flags &
165 MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { 158 MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index f54ebd5a1702..a7fcd593b2db 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -592,7 +592,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
592 } 592 }
593} 593}
594 594
595u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) 595u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
596 void *accel_priv)
596{ 597{
597 struct mlx4_en_priv *priv = netdev_priv(dev); 598 struct mlx4_en_priv *priv = netdev_priv(dev);
598 u16 rings_p_up = priv->num_tx_rings_p_up; 599 u16 rings_p_up = priv->num_tx_rings_p_up;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 5789ea2c934d..01fc6515384d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2635,6 +2635,8 @@ static int __init mlx4_init(void)
2635 return -ENOMEM; 2635 return -ENOMEM;
2636 2636
2637 ret = pci_register_driver(&mlx4_driver); 2637 ret = pci_register_driver(&mlx4_driver);
2638 if (ret < 0)
2639 destroy_workqueue(mlx4_wq);
2638 return ret < 0 ? ret : 0; 2640 return ret < 0 ? ret : 0;
2639} 2641}
2640 2642
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f3758de59c05..d5758adceaa2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -714,7 +714,8 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
714int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 714int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
715 715
716void mlx4_en_tx_irq(struct mlx4_cq *mcq); 716void mlx4_en_tx_irq(struct mlx4_cq *mcq);
717u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); 717u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
718 void *accel_priv);
718netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 719netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
719 720
720int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 721int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 2d045be4b5cf..1e8b9514718b 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -5150,8 +5150,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
5150{ 5150{
5151 struct fe_priv *np = netdev_priv(dev); 5151 struct fe_priv *np = netdev_priv(dev);
5152 u8 __iomem *base = get_hwbase(dev); 5152 u8 __iomem *base = get_hwbase(dev);
5153 int result; 5153 int result, count;
5154 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); 5154
5155 count = nv_get_sset_count(dev, ETH_SS_TEST);
5156 memset(buffer, 0, count * sizeof(u64));
5155 5157
5156 if (!nv_link_test(dev)) { 5158 if (!nv_link_test(dev)) {
5157 test->flags |= ETH_TEST_FL_FAILED; 5159 test->flags |= ETH_TEST_FL_FAILED;
@@ -5195,7 +5197,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
5195 return; 5197 return;
5196 } 5198 }
5197 5199
5198 if (!nv_loopback_test(dev)) { 5200 if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) {
5199 test->flags |= ETH_TEST_FL_FAILED; 5201 test->flags |= ETH_TEST_FL_FAILED;
5200 buffer[3] = 1; 5202 buffer[3] = 1;
5201 } 5203 }
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 7692dfd4f262..cc68657f0536 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1604,13 +1604,13 @@ netxen_process_lro(struct netxen_adapter *adapter,
1604 u32 seq_number; 1604 u32 seq_number;
1605 u8 vhdr_len = 0; 1605 u8 vhdr_len = 0;
1606 1606
1607 if (unlikely(ring > adapter->max_rds_rings)) 1607 if (unlikely(ring >= adapter->max_rds_rings))
1608 return NULL; 1608 return NULL;
1609 1609
1610 rds_ring = &recv_ctx->rds_rings[ring]; 1610 rds_ring = &recv_ctx->rds_rings[ring];
1611 1611
1612 index = netxen_get_lro_sts_refhandle(sts_data0); 1612 index = netxen_get_lro_sts_refhandle(sts_data0);
1613 if (unlikely(index > rds_ring->num_desc)) 1613 if (unlikely(index >= rds_ring->num_desc))
1614 return NULL; 1614 return NULL;
1615 1615
1616 buffer = &rds_ring->rx_buf_arr[index]; 1616 buffer = &rds_ring->rx_buf_arr[index];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 631ea0ac1cd8..f2a7c7166e24 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -487,6 +487,7 @@ struct qlcnic_hardware_context {
487 struct qlcnic_mailbox *mailbox; 487 struct qlcnic_mailbox *mailbox;
488 u8 extend_lb_time; 488 u8 extend_lb_time;
489 u8 phys_port_id[ETH_ALEN]; 489 u8 phys_port_id[ETH_ALEN];
490 u8 lb_mode;
490}; 491};
491 492
492struct qlcnic_adapter_stats { 493struct qlcnic_adapter_stats {
@@ -578,6 +579,8 @@ struct qlcnic_host_tx_ring {
578 dma_addr_t phys_addr; 579 dma_addr_t phys_addr;
579 dma_addr_t hw_cons_phys_addr; 580 dma_addr_t hw_cons_phys_addr;
580 struct netdev_queue *txq; 581 struct netdev_queue *txq;
582 /* Lock to protect Tx descriptors cleanup */
583 spinlock_t tx_clean_lock;
581} ____cacheline_internodealigned_in_smp; 584} ____cacheline_internodealigned_in_smp;
582 585
583/* 586/*
@@ -808,6 +811,7 @@ struct qlcnic_mac_list_s {
808 811
809#define QLCNIC_ILB_MODE 0x1 812#define QLCNIC_ILB_MODE 0x1
810#define QLCNIC_ELB_MODE 0x2 813#define QLCNIC_ELB_MODE 0x2
814#define QLCNIC_LB_MODE_MASK 0x3
811 815
812#define QLCNIC_LINKEVENT 0x1 816#define QLCNIC_LINKEVENT 0x1
813#define QLCNIC_LB_RESPONSE 0x2 817#define QLCNIC_LB_RESPONSE 0x2
@@ -1093,7 +1097,6 @@ struct qlcnic_adapter {
1093 struct qlcnic_filter_hash rx_fhash; 1097 struct qlcnic_filter_hash rx_fhash;
1094 struct list_head vf_mc_list; 1098 struct list_head vf_mc_list;
1095 1099
1096 spinlock_t tx_clean_lock;
1097 spinlock_t mac_learn_lock; 1100 spinlock_t mac_learn_lock;
1098 /* spinlock for catching rcv filters for eswitch traffic */ 1101 /* spinlock for catching rcv filters for eswitch traffic */
1099 spinlock_t rx_mac_learn_lock; 1102 spinlock_t rx_mac_learn_lock;
@@ -1708,6 +1711,7 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
1708void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); 1711void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
1709void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); 1712void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
1710void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); 1713void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
1714void qlcnic_update_stats(struct qlcnic_adapter *);
1711 1715
1712/* Adapter hardware abstraction */ 1716/* Adapter hardware abstraction */
1713struct qlcnic_hardware_ops { 1717struct qlcnic_hardware_ops {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index b1cb0ffb15c7..f776f99f7915 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -447,8 +447,9 @@ irqreturn_t qlcnic_83xx_intr(int irq, void *data)
447 447
448 qlcnic_83xx_poll_process_aen(adapter); 448 qlcnic_83xx_poll_process_aen(adapter);
449 449
450 if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) { 450 if (ahw->diag_test) {
451 ahw->diag_cnt++; 451 if (ahw->diag_test == QLCNIC_INTERRUPT_TEST)
452 ahw->diag_cnt++;
452 qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); 453 qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
453 return IRQ_HANDLED; 454 return IRQ_HANDLED;
454 } 455 }
@@ -1345,11 +1346,6 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
1345 } 1346 }
1346 1347
1347 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { 1348 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
1348 /* disable and free mailbox interrupt */
1349 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
1350 qlcnic_83xx_enable_mbx_poll(adapter);
1351 qlcnic_83xx_free_mbx_intr(adapter);
1352 }
1353 adapter->ahw->loopback_state = 0; 1349 adapter->ahw->loopback_state = 0;
1354 adapter->ahw->hw_ops->setup_link_event(adapter, 1); 1350 adapter->ahw->hw_ops->setup_link_event(adapter, 1);
1355 } 1351 }
@@ -1363,33 +1359,20 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
1363{ 1359{
1364 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1360 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1365 struct qlcnic_host_sds_ring *sds_ring; 1361 struct qlcnic_host_sds_ring *sds_ring;
1366 int ring, err; 1362 int ring;
1367 1363
1368 clear_bit(__QLCNIC_DEV_UP, &adapter->state); 1364 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
1369 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { 1365 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
1370 for (ring = 0; ring < adapter->drv_sds_rings; ring++) { 1366 for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
1371 sds_ring = &adapter->recv_ctx->sds_rings[ring]; 1367 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1372 qlcnic_83xx_disable_intr(adapter, sds_ring); 1368 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1373 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) 1369 qlcnic_83xx_disable_intr(adapter, sds_ring);
1374 qlcnic_83xx_enable_mbx_poll(adapter);
1375 } 1370 }
1376 } 1371 }
1377 1372
1378 qlcnic_fw_destroy_ctx(adapter); 1373 qlcnic_fw_destroy_ctx(adapter);
1379 qlcnic_detach(adapter); 1374 qlcnic_detach(adapter);
1380 1375
1381 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
1382 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
1383 err = qlcnic_83xx_setup_mbx_intr(adapter);
1384 qlcnic_83xx_disable_mbx_poll(adapter);
1385 if (err) {
1386 dev_err(&adapter->pdev->dev,
1387 "%s: failed to setup mbx interrupt\n",
1388 __func__);
1389 goto out;
1390 }
1391 }
1392 }
1393 adapter->ahw->diag_test = 0; 1376 adapter->ahw->diag_test = 0;
1394 adapter->drv_sds_rings = drv_sds_rings; 1377 adapter->drv_sds_rings = drv_sds_rings;
1395 1378
@@ -1399,9 +1382,6 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
1399 if (netif_running(netdev)) 1382 if (netif_running(netdev))
1400 __qlcnic_up(adapter, netdev); 1383 __qlcnic_up(adapter, netdev);
1401 1384
1402 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST &&
1403 !(adapter->flags & QLCNIC_MSIX_ENABLED))
1404 qlcnic_83xx_disable_mbx_poll(adapter);
1405out: 1385out:
1406 netif_device_attach(netdev); 1386 netif_device_attach(netdev);
1407} 1387}
@@ -1704,12 +1684,6 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
1704 } 1684 }
1705 } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); 1685 } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
1706 1686
1707 /* Make sure carrier is off and queue is stopped during loopback */
1708 if (netif_running(netdev)) {
1709 netif_carrier_off(netdev);
1710 netif_tx_stop_all_queues(netdev);
1711 }
1712
1713 ret = qlcnic_do_lb_test(adapter, mode); 1687 ret = qlcnic_do_lb_test(adapter, mode);
1714 1688
1715 qlcnic_83xx_clear_lb_mode(adapter, mode); 1689 qlcnic_83xx_clear_lb_mode(adapter, mode);
@@ -2141,6 +2115,7 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
2141 ahw->link_autoneg = MSB(MSW(data[3])); 2115 ahw->link_autoneg = MSB(MSW(data[3]));
2142 ahw->module_type = MSB(LSW(data[3])); 2116 ahw->module_type = MSB(LSW(data[3]));
2143 ahw->has_link_events = 1; 2117 ahw->has_link_events = 1;
2118 ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK;
2144 qlcnic_advert_link_change(adapter, link_status); 2119 qlcnic_advert_link_change(adapter, link_status);
2145} 2120}
2146 2121
@@ -3754,6 +3729,19 @@ static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
3754 return; 3729 return;
3755} 3730}
3756 3731
3732static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter)
3733{
3734 struct qlcnic_hardware_context *ahw = adapter->ahw;
3735 u32 offset;
3736
3737 offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
3738 dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x",
3739 readl(ahw->pci_base0 + offset),
3740 QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL),
3741 QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL),
3742 QLCRDX(ahw, QLCNIC_FW_MBX_CTRL));
3743}
3744
3757static void qlcnic_83xx_mailbox_worker(struct work_struct *work) 3745static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
3758{ 3746{
3759 struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox, 3747 struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
@@ -3798,6 +3786,8 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
3798 __func__, cmd->cmd_op, cmd->type, ahw->pci_func, 3786 __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
3799 ahw->op_mode); 3787 ahw->op_mode);
3800 clear_bit(QLC_83XX_MBX_READY, &mbx->status); 3788 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
3789 qlcnic_dump_mailbox_registers(adapter);
3790 qlcnic_83xx_get_mbx_data(adapter, cmd);
3801 qlcnic_dump_mbx(adapter, cmd); 3791 qlcnic_dump_mbx(adapter, cmd);
3802 qlcnic_83xx_idc_request_reset(adapter, 3792 qlcnic_83xx_idc_request_reset(adapter,
3803 QLCNIC_FORCE_FW_DUMP_KEY); 3793 QLCNIC_FORCE_FW_DUMP_KEY);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 4cae6caa6bfa..a6a33508e401 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -662,4 +662,5 @@ pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
662 pci_channel_state_t); 662 pci_channel_state_t);
663pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *); 663pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
664void qlcnic_83xx_io_resume(struct pci_dev *); 664void qlcnic_83xx_io_resume(struct pci_dev *);
665void qlcnic_83xx_stop_hw(struct qlcnic_adapter *);
665#endif 666#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 89208e5b25d6..918e18ddf038 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -740,6 +740,7 @@ static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter)
740 adapter->ahw->idc.err_code = -EIO; 740 adapter->ahw->idc.err_code = -EIO;
741 dev_err(&adapter->pdev->dev, 741 dev_err(&adapter->pdev->dev,
742 "%s: Device in unknown state\n", __func__); 742 "%s: Device in unknown state\n", __func__);
743 clear_bit(__QLCNIC_RESETTING, &adapter->state);
743 return 0; 744 return 0;
744} 745}
745 746
@@ -818,7 +819,6 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
818 struct qlcnic_hardware_context *ahw = adapter->ahw; 819 struct qlcnic_hardware_context *ahw = adapter->ahw;
819 struct qlcnic_mailbox *mbx = ahw->mailbox; 820 struct qlcnic_mailbox *mbx = ahw->mailbox;
820 int ret = 0; 821 int ret = 0;
821 u32 owner;
822 u32 val; 822 u32 val;
823 823
824 /* Perform NIC configuration based ready state entry actions */ 824 /* Perform NIC configuration based ready state entry actions */
@@ -848,9 +848,9 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
848 set_bit(__QLCNIC_RESETTING, &adapter->state); 848 set_bit(__QLCNIC_RESETTING, &adapter->state);
849 qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); 849 qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
850 } else { 850 } else {
851 owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); 851 netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
852 if (ahw->pci_func == owner) 852 __func__);
853 qlcnic_dump_fw(adapter); 853 qlcnic_83xx_idc_enter_failed_state(adapter, 1);
854 } 854 }
855 return -EIO; 855 return -EIO;
856 } 856 }
@@ -948,13 +948,26 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
948 return 0; 948 return 0;
949} 949}
950 950
951static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) 951static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
952{ 952{
953 dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__); 953 struct qlcnic_hardware_context *ahw = adapter->ahw;
954 u32 val, owner;
955
956 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
957 if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
958 owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
959 if (ahw->pci_func == owner) {
960 qlcnic_83xx_stop_hw(adapter);
961 qlcnic_dump_fw(adapter);
962 }
963 }
964
965 netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n",
966 __func__);
954 clear_bit(__QLCNIC_RESETTING, &adapter->state); 967 clear_bit(__QLCNIC_RESETTING, &adapter->state);
955 adapter->ahw->idc.err_code = -EIO; 968 ahw->idc.err_code = -EIO;
956 969
957 return 0; 970 return;
958} 971}
959 972
960static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter) 973static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter)
@@ -1063,12 +1076,6 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
1063 adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state; 1076 adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
1064 qlcnic_83xx_periodic_tasks(adapter); 1077 qlcnic_83xx_periodic_tasks(adapter);
1065 1078
1066 /* Do not reschedule if firmaware is in hanged state and auto
1067 * recovery is disabled
1068 */
1069 if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset)
1070 return;
1071
1072 /* Re-schedule the function */ 1079 /* Re-schedule the function */
1073 if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status)) 1080 if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
1074 qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, 1081 qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
@@ -1219,10 +1226,10 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
1219 } 1226 }
1220 1227
1221 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); 1228 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
1222 if ((val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) || 1229 if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
1223 !qlcnic_auto_fw_reset) { 1230 netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
1224 dev_err(&adapter->pdev->dev, 1231 __func__);
1225 "%s:failed, device in non reset mode\n", __func__); 1232 qlcnic_83xx_idc_enter_failed_state(adapter, 0);
1226 qlcnic_83xx_unlock_driver(adapter); 1233 qlcnic_83xx_unlock_driver(adapter);
1227 return; 1234 return;
1228 } 1235 }
@@ -1254,24 +1261,24 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
1254 if (size & 0xF) 1261 if (size & 0xF)
1255 size = (size + 16) & ~0xF; 1262 size = (size + 16) & ~0xF;
1256 1263
1257 p_cache = kzalloc(size, GFP_KERNEL); 1264 p_cache = vzalloc(size);
1258 if (p_cache == NULL) 1265 if (p_cache == NULL)
1259 return -ENOMEM; 1266 return -ENOMEM;
1260 1267
1261 ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache, 1268 ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache,
1262 size / sizeof(u32)); 1269 size / sizeof(u32));
1263 if (ret) { 1270 if (ret) {
1264 kfree(p_cache); 1271 vfree(p_cache);
1265 return ret; 1272 return ret;
1266 } 1273 }
1267 /* 16 byte write to MS memory */ 1274 /* 16 byte write to MS memory */
1268 ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache, 1275 ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache,
1269 size / 16); 1276 size / 16);
1270 if (ret) { 1277 if (ret) {
1271 kfree(p_cache); 1278 vfree(p_cache);
1272 return ret; 1279 return ret;
1273 } 1280 }
1274 kfree(p_cache); 1281 vfree(p_cache);
1275 1282
1276 return ret; 1283 return ret;
1277} 1284}
@@ -1939,7 +1946,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
1939 p_dev->ahw->reset.seq_index = index; 1946 p_dev->ahw->reset.seq_index = index;
1940} 1947}
1941 1948
1942static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev) 1949void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
1943{ 1950{
1944 p_dev->ahw->reset.seq_index = 0; 1951 p_dev->ahw->reset.seq_index = 0;
1945 1952
@@ -1994,6 +2001,14 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
1994 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); 2001 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
1995 if (!(val & QLC_83XX_IDC_GRACEFULL_RESET)) 2002 if (!(val & QLC_83XX_IDC_GRACEFULL_RESET))
1996 qlcnic_dump_fw(adapter); 2003 qlcnic_dump_fw(adapter);
2004
2005 if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
2006 netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
2007 __func__);
2008 qlcnic_83xx_idc_enter_failed_state(adapter, 1);
2009 return err;
2010 }
2011
1997 qlcnic_83xx_init_hw(adapter); 2012 qlcnic_83xx_init_hw(adapter);
1998 2013
1999 if (qlcnic_83xx_copy_bootloader(adapter)) 2014 if (qlcnic_83xx_copy_bootloader(adapter))
@@ -2073,8 +2088,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
2073 ahw->nic_mode = QLCNIC_DEFAULT_MODE; 2088 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
2074 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; 2089 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
2075 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; 2090 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
2076 adapter->max_sds_rings = ahw->max_rx_ques; 2091 adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
2077 adapter->max_tx_rings = ahw->max_tx_ques; 2092 adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
2078 } else { 2093 } else {
2079 return -EIO; 2094 return -EIO;
2080 } 2095 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index b36c02fafcfd..6b08194aa0d4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -167,27 +167,35 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
167 167
168#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) 168#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
169 169
170static inline int qlcnic_82xx_statistics(void) 170static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter)
171{ 171{
172 return ARRAY_SIZE(qlcnic_device_gstrings_stats) + 172 return ARRAY_SIZE(qlcnic_gstrings_stats) +
173 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); 173 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
174 QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
174} 175}
175 176
176static inline int qlcnic_83xx_statistics(void) 177static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter)
177{ 178{
178 return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + 179 return ARRAY_SIZE(qlcnic_gstrings_stats) +
180 ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
179 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + 181 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
180 ARRAY_SIZE(qlcnic_83xx_rx_stats_strings); 182 ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) +
183 QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
181} 184}
182 185
183static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) 186static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
184{ 187{
185 if (qlcnic_82xx_check(adapter)) 188 int len = -1;
186 return qlcnic_82xx_statistics(); 189
187 else if (qlcnic_83xx_check(adapter)) 190 if (qlcnic_82xx_check(adapter)) {
188 return qlcnic_83xx_statistics(); 191 len = qlcnic_82xx_statistics(adapter);
189 else 192 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
190 return -1; 193 len += ARRAY_SIZE(qlcnic_device_gstrings_stats);
194 } else if (qlcnic_83xx_check(adapter)) {
195 len = qlcnic_83xx_statistics(adapter);
196 }
197
198 return len;
191} 199}
192 200
193#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 201#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412
@@ -667,30 +675,25 @@ qlcnic_set_ringparam(struct net_device *dev,
667static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter, 675static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter,
668 u8 rx_ring, u8 tx_ring) 676 u8 rx_ring, u8 tx_ring)
669{ 677{
678 if (rx_ring == 0 || tx_ring == 0)
679 return -EINVAL;
680
670 if (rx_ring != 0) { 681 if (rx_ring != 0) {
671 if (rx_ring > adapter->max_sds_rings) { 682 if (rx_ring > adapter->max_sds_rings) {
672 netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n", 683 netdev_err(adapter->netdev,
684 "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
673 rx_ring, adapter->max_sds_rings); 685 rx_ring, adapter->max_sds_rings);
674 return -EINVAL; 686 return -EINVAL;
675 } 687 }
676 } 688 }
677 689
678 if (tx_ring != 0) { 690 if (tx_ring != 0) {
679 if (qlcnic_82xx_check(adapter) && 691 if (tx_ring > adapter->max_tx_rings) {
680 (tx_ring > adapter->max_tx_rings)) {
681 netdev_err(adapter->netdev, 692 netdev_err(adapter->netdev,
682 "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n", 693 "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n",
683 tx_ring, adapter->max_tx_rings); 694 tx_ring, adapter->max_tx_rings);
684 return -EINVAL; 695 return -EINVAL;
685 } 696 }
686
687 if (qlcnic_83xx_check(adapter) &&
688 (tx_ring > QLCNIC_SINGLE_RING)) {
689 netdev_err(adapter->netdev,
690 "Invalid ring count, Tx ring count %d should not be greater than %d driver Tx rings.\n",
691 tx_ring, QLCNIC_SINGLE_RING);
692 return -EINVAL;
693 }
694 } 697 }
695 698
696 return 0; 699 return 0;
@@ -925,18 +928,13 @@ static int qlcnic_eeprom_test(struct net_device *dev)
925 928
926static int qlcnic_get_sset_count(struct net_device *dev, int sset) 929static int qlcnic_get_sset_count(struct net_device *dev, int sset)
927{ 930{
928 int len;
929 931
930 struct qlcnic_adapter *adapter = netdev_priv(dev); 932 struct qlcnic_adapter *adapter = netdev_priv(dev);
931 switch (sset) { 933 switch (sset) {
932 case ETH_SS_TEST: 934 case ETH_SS_TEST:
933 return QLCNIC_TEST_LEN; 935 return QLCNIC_TEST_LEN;
934 case ETH_SS_STATS: 936 case ETH_SS_STATS:
935 len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN; 937 return qlcnic_dev_statistics_len(adapter);
936 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
937 qlcnic_83xx_check(adapter))
938 return len;
939 return qlcnic_82xx_statistics();
940 default: 938 default:
941 return -EOPNOTSUPP; 939 return -EOPNOTSUPP;
942 } 940 }
@@ -948,6 +946,7 @@ static int qlcnic_irq_test(struct net_device *netdev)
948 struct qlcnic_hardware_context *ahw = adapter->ahw; 946 struct qlcnic_hardware_context *ahw = adapter->ahw;
949 struct qlcnic_cmd_args cmd; 947 struct qlcnic_cmd_args cmd;
950 int ret, drv_sds_rings = adapter->drv_sds_rings; 948 int ret, drv_sds_rings = adapter->drv_sds_rings;
949 int drv_tx_rings = adapter->drv_tx_rings;
951 950
952 if (qlcnic_83xx_check(adapter)) 951 if (qlcnic_83xx_check(adapter))
953 return qlcnic_83xx_interrupt_test(netdev); 952 return qlcnic_83xx_interrupt_test(netdev);
@@ -980,6 +979,7 @@ free_diag_res:
980 979
981clear_diag_irq: 980clear_diag_irq:
982 adapter->drv_sds_rings = drv_sds_rings; 981 adapter->drv_sds_rings = drv_sds_rings;
982 adapter->drv_tx_rings = drv_tx_rings;
983 clear_bit(__QLCNIC_RESETTING, &adapter->state); 983 clear_bit(__QLCNIC_RESETTING, &adapter->state);
984 984
985 return ret; 985 return ret;
@@ -1270,7 +1270,7 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
1270 return data; 1270 return data;
1271} 1271}
1272 1272
1273static void qlcnic_update_stats(struct qlcnic_adapter *adapter) 1273void qlcnic_update_stats(struct qlcnic_adapter *adapter)
1274{ 1274{
1275 struct qlcnic_host_tx_ring *tx_ring; 1275 struct qlcnic_host_tx_ring *tx_ring;
1276 int ring; 1276 int ring;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index e9c21e5d0ca9..c4262c23ed7c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -134,6 +134,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
134 struct qlcnic_skb_frag *buffrag; 134 struct qlcnic_skb_frag *buffrag;
135 int i, j; 135 int i, j;
136 136
137 spin_lock(&tx_ring->tx_clean_lock);
138
137 cmd_buf = tx_ring->cmd_buf_arr; 139 cmd_buf = tx_ring->cmd_buf_arr;
138 for (i = 0; i < tx_ring->num_desc; i++) { 140 for (i = 0; i < tx_ring->num_desc; i++) {
139 buffrag = cmd_buf->frag_array; 141 buffrag = cmd_buf->frag_array;
@@ -157,6 +159,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
157 } 159 }
158 cmd_buf++; 160 cmd_buf++;
159 } 161 }
162
163 spin_unlock(&tx_ring->tx_clean_lock);
160} 164}
161 165
162void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) 166void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 0149c9495347..ad1531ae3aa8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -687,17 +687,15 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
687 if (adapter->ahw->linkup && !linkup) { 687 if (adapter->ahw->linkup && !linkup) {
688 netdev_info(netdev, "NIC Link is down\n"); 688 netdev_info(netdev, "NIC Link is down\n");
689 adapter->ahw->linkup = 0; 689 adapter->ahw->linkup = 0;
690 if (netif_running(netdev)) { 690 netif_carrier_off(netdev);
691 netif_carrier_off(netdev);
692 netif_tx_stop_all_queues(netdev);
693 }
694 } else if (!adapter->ahw->linkup && linkup) { 691 } else if (!adapter->ahw->linkup && linkup) {
692 /* Do not advertise Link up if the port is in loopback mode */
693 if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode)
694 return;
695
695 netdev_info(netdev, "NIC Link is up\n"); 696 netdev_info(netdev, "NIC Link is up\n");
696 adapter->ahw->linkup = 1; 697 adapter->ahw->linkup = 1;
697 if (netif_running(netdev)) { 698 netif_carrier_on(netdev);
698 netif_carrier_on(netdev);
699 netif_wake_queue(netdev);
700 }
701 } 699 }
702} 700}
703 701
@@ -784,7 +782,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
784 struct net_device *netdev = adapter->netdev; 782 struct net_device *netdev = adapter->netdev;
785 struct qlcnic_skb_frag *frag; 783 struct qlcnic_skb_frag *frag;
786 784
787 if (!spin_trylock(&adapter->tx_clean_lock)) 785 if (!spin_trylock(&tx_ring->tx_clean_lock))
788 return 1; 786 return 1;
789 787
790 sw_consumer = tx_ring->sw_consumer; 788 sw_consumer = tx_ring->sw_consumer;
@@ -813,8 +811,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
813 break; 811 break;
814 } 812 }
815 813
814 tx_ring->sw_consumer = sw_consumer;
815
816 if (count && netif_running(netdev)) { 816 if (count && netif_running(netdev)) {
817 tx_ring->sw_consumer = sw_consumer;
818 smp_mb(); 817 smp_mb();
819 if (netif_tx_queue_stopped(tx_ring->txq) && 818 if (netif_tx_queue_stopped(tx_ring->txq) &&
820 netif_carrier_ok(netdev)) { 819 netif_carrier_ok(netdev)) {
@@ -840,7 +839,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
840 */ 839 */
841 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 840 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
842 done = (sw_consumer == hw_consumer); 841 done = (sw_consumer == hw_consumer);
843 spin_unlock(&adapter->tx_clean_lock); 842
843 spin_unlock(&tx_ring->tx_clean_lock);
844 844
845 return done; 845 return done;
846} 846}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 05c1eef8df13..550791b8fbae 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1178,6 +1178,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
1178 } else { 1178 } else {
1179 adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE; 1179 adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
1180 adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS; 1180 adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS;
1181 adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
1181 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; 1182 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
1182 } 1183 }
1183 1184
@@ -1755,7 +1756,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1755 if (qlcnic_sriov_vf_check(adapter)) 1756 if (qlcnic_sriov_vf_check(adapter))
1756 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); 1757 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
1757 smp_mb(); 1758 smp_mb();
1758 spin_lock(&adapter->tx_clean_lock);
1759 netif_carrier_off(netdev); 1759 netif_carrier_off(netdev);
1760 adapter->ahw->linkup = 0; 1760 adapter->ahw->linkup = 0;
1761 netif_tx_disable(netdev); 1761 netif_tx_disable(netdev);
@@ -1776,7 +1776,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1776 1776
1777 for (ring = 0; ring < adapter->drv_tx_rings; ring++) 1777 for (ring = 0; ring < adapter->drv_tx_rings; ring++)
1778 qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); 1778 qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
1779 spin_unlock(&adapter->tx_clean_lock);
1780} 1779}
1781 1780
1782/* Usage: During suspend and firmware recovery module */ 1781/* Usage: During suspend and firmware recovery module */
@@ -1940,7 +1939,6 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1940 qlcnic_detach(adapter); 1939 qlcnic_detach(adapter);
1941 1940
1942 adapter->drv_sds_rings = QLCNIC_SINGLE_RING; 1941 adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
1943 adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
1944 adapter->ahw->diag_test = test; 1942 adapter->ahw->diag_test = test;
1945 adapter->ahw->linkup = 0; 1943 adapter->ahw->linkup = 0;
1946 1944
@@ -2172,6 +2170,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
2172 } 2170 }
2173 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); 2171 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
2174 tx_ring->cmd_buf_arr = cmd_buf_arr; 2172 tx_ring->cmd_buf_arr = cmd_buf_arr;
2173 spin_lock_init(&tx_ring->tx_clean_lock);
2175 } 2174 }
2176 2175
2177 if (qlcnic_83xx_check(adapter) || 2176 if (qlcnic_83xx_check(adapter) ||
@@ -2299,7 +2298,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2299 rwlock_init(&adapter->ahw->crb_lock); 2298 rwlock_init(&adapter->ahw->crb_lock);
2300 mutex_init(&adapter->ahw->mem_lock); 2299 mutex_init(&adapter->ahw->mem_lock);
2301 2300
2302 spin_lock_init(&adapter->tx_clean_lock);
2303 INIT_LIST_HEAD(&adapter->mac_list); 2301 INIT_LIST_HEAD(&adapter->mac_list);
2304 2302
2305 qlcnic_register_dcb(adapter); 2303 qlcnic_register_dcb(adapter);
@@ -2782,6 +2780,9 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2782 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2780 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2783 struct net_device_stats *stats = &netdev->stats; 2781 struct net_device_stats *stats = &netdev->stats;
2784 2782
2783 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2784 qlcnic_update_stats(adapter);
2785
2785 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; 2786 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2786 stats->tx_packets = adapter->stats.xmitfinished; 2787 stats->tx_packets = adapter->stats.xmitfinished;
2787 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; 2788 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 686f460b1502..024f8161d2fe 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -75,7 +75,6 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
75 num_vfs = sriov->num_vfs; 75 num_vfs = sriov->num_vfs;
76 max = num_vfs + 1; 76 max = num_vfs + 1;
77 info->bit_offsets = 0xffff; 77 info->bit_offsets = 0xffff;
78 info->max_tx_ques = res->num_tx_queues / max;
79 info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters; 78 info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
80 num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC; 79 num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
81 80
@@ -86,6 +85,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
86 info->max_tx_mac_filters = temp; 85 info->max_tx_mac_filters = temp;
87 info->min_tx_bw = 0; 86 info->min_tx_bw = 0;
88 info->max_tx_bw = MAX_BW; 87 info->max_tx_bw = MAX_BW;
88 info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
89 } else { 89 } else {
90 id = qlcnic_sriov_func_to_index(adapter, func); 90 id = qlcnic_sriov_func_to_index(adapter, func);
91 if (id < 0) 91 if (id < 0)
@@ -95,6 +95,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
95 info->max_tx_bw = vp->max_tx_bw; 95 info->max_tx_bw = vp->max_tx_bw;
96 info->max_rx_ucast_mac_filters = num_vf_macs; 96 info->max_rx_ucast_mac_filters = num_vf_macs;
97 info->max_tx_mac_filters = num_vf_macs; 97 info->max_tx_mac_filters = num_vf_macs;
98 info->max_tx_ques = QLCNIC_SINGLE_RING;
98 } 99 }
99 100
100 info->max_rx_ip_addr = res->num_destip / max; 101 info->max_rx_ip_addr = res->num_destip / max;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 0c9c4e895595..03517478e589 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
18 */ 18 */
19#define DRV_NAME "qlge" 19#define DRV_NAME "qlge"
20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
21#define DRV_VERSION "1.00.00.33" 21#define DRV_VERSION "1.00.00.34"
22 22
23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
24 24
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 0780e039b271..8dee1beb9854 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -181,6 +181,7 @@ static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
181}; 181};
182#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) 182#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
183#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats) 183#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
184#define QLGE_RCV_MAC_ERR_STATS 7
184 185
185static int ql_update_ring_coalescing(struct ql_adapter *qdev) 186static int ql_update_ring_coalescing(struct ql_adapter *qdev)
186{ 187{
@@ -280,6 +281,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
280 iter++; 281 iter++;
281 } 282 }
282 283
284 /* Update receive mac error statistics */
285 iter += QLGE_RCV_MAC_ERR_STATS;
286
283 /* 287 /*
284 * Get Per-priority TX pause frame counter statistics. 288 * Get Per-priority TX pause frame counter statistics.
285 */ 289 */
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index a245dc18d769..449f506d2e8f 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2376,14 +2376,6 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
2376 netdev_features_t features) 2376 netdev_features_t features)
2377{ 2377{
2378 int err; 2378 int err;
2379 /*
2380 * Since there is no support for separate rx/tx vlan accel
2381 * enable/disable make sure tx flag is always in same state as rx.
2382 */
2383 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2384 features |= NETIF_F_HW_VLAN_CTAG_TX;
2385 else
2386 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2387 2379
2388 /* Update the behavior of vlan accel in the adapter */ 2380 /* Update the behavior of vlan accel in the adapter */
2389 err = qlge_update_hw_vlan_features(ndev, features); 2381 err = qlge_update_hw_vlan_features(ndev, features);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index f2a2128165dd..737c1a881f78 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -678,9 +678,6 @@ static void cp_tx (struct cp_private *cp)
678 le32_to_cpu(txd->opts1) & 0xffff, 678 le32_to_cpu(txd->opts1) & 0xffff,
679 PCI_DMA_TODEVICE); 679 PCI_DMA_TODEVICE);
680 680
681 bytes_compl += skb->len;
682 pkts_compl++;
683
684 if (status & LastFrag) { 681 if (status & LastFrag) {
685 if (status & (TxError | TxFIFOUnder)) { 682 if (status & (TxError | TxFIFOUnder)) {
686 netif_dbg(cp, tx_err, cp->dev, 683 netif_dbg(cp, tx_err, cp->dev,
@@ -702,6 +699,8 @@ static void cp_tx (struct cp_private *cp)
702 netif_dbg(cp, tx_done, cp->dev, 699 netif_dbg(cp, tx_done, cp->dev,
703 "tx done, slot %d\n", tx_tail); 700 "tx done, slot %d\n", tx_tail);
704 } 701 }
702 bytes_compl += skb->len;
703 pkts_compl++;
705 dev_kfree_skb_irq(skb); 704 dev_kfree_skb_irq(skb);
706 } 705 }
707 706
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 799387570766..c737f0ea5de7 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -3465,6 +3465,11 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3465 rtl_writephy(tp, 0x14, 0x9065); 3465 rtl_writephy(tp, 0x14, 0x9065);
3466 rtl_writephy(tp, 0x14, 0x1065); 3466 rtl_writephy(tp, 0x14, 0x1065);
3467 3467
3468 /* Check ALDPS bit, disable it if enabled */
3469 rtl_writephy(tp, 0x1f, 0x0a43);
3470 if (rtl_readphy(tp, 0x10) & 0x0004)
3471 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004);
3472
3468 rtl_writephy(tp, 0x1f, 0x0000); 3473 rtl_writephy(tp, 0x1f, 0x0000);
3469} 3474}
3470 3475
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 2e27837ce6a2..fd844b53e385 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -585,7 +585,7 @@ static void efx_start_datapath(struct efx_nic *efx)
585 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 585 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
586 efx->type->rx_buffer_padding); 586 efx->type->rx_buffer_padding);
587 rx_buf_len = (sizeof(struct efx_rx_page_state) + 587 rx_buf_len = (sizeof(struct efx_rx_page_state) +
588 NET_IP_ALIGN + efx->rx_dma_len); 588 efx->rx_ip_align + efx->rx_dma_len);
589 if (rx_buf_len <= PAGE_SIZE) { 589 if (rx_buf_len <= PAGE_SIZE) {
590 efx->rx_scatter = efx->type->always_rx_scatter; 590 efx->rx_scatter = efx->type->always_rx_scatter;
591 efx->rx_buffer_order = 0; 591 efx->rx_buffer_order = 0;
@@ -645,6 +645,8 @@ static void efx_start_datapath(struct efx_nic *efx)
645 WARN_ON(channel->rx_pkt_n_frags); 645 WARN_ON(channel->rx_pkt_n_frags);
646 } 646 }
647 647
648 efx_ptp_start_datapath(efx);
649
648 if (netif_device_present(efx->net_dev)) 650 if (netif_device_present(efx->net_dev))
649 netif_tx_wake_all_queues(efx->net_dev); 651 netif_tx_wake_all_queues(efx->net_dev);
650} 652}
@@ -659,6 +661,8 @@ static void efx_stop_datapath(struct efx_nic *efx)
659 EFX_ASSERT_RESET_SERIALISED(efx); 661 EFX_ASSERT_RESET_SERIALISED(efx);
660 BUG_ON(efx->port_enabled); 662 BUG_ON(efx->port_enabled);
661 663
664 efx_ptp_stop_datapath(efx);
665
662 /* Stop RX refill */ 666 /* Stop RX refill */
663 efx_for_each_channel(channel, efx) { 667 efx_for_each_channel(channel, efx) {
664 efx_for_each_channel_rx_queue(rx_queue, channel) 668 efx_for_each_channel_rx_queue(rx_queue, channel)
@@ -2540,6 +2544,8 @@ static int efx_init_struct(struct efx_nic *efx,
2540 2544
2541 efx->net_dev = net_dev; 2545 efx->net_dev = net_dev;
2542 efx->rx_prefix_size = efx->type->rx_prefix_size; 2546 efx->rx_prefix_size = efx->type->rx_prefix_size;
2547 efx->rx_ip_align =
2548 NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
2543 efx->rx_packet_hash_offset = 2549 efx->rx_packet_hash_offset =
2544 efx->type->rx_hash_offset - efx->type->rx_prefix_size; 2550 efx->type->rx_hash_offset - efx->type->rx_prefix_size;
2545 spin_lock_init(&efx->stats_lock); 2551 spin_lock_init(&efx->stats_lock);
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 366c8e3e3784..4b0bd8a1514d 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -50,6 +50,7 @@ struct efx_mcdi_async_param {
50static void efx_mcdi_timeout_async(unsigned long context); 50static void efx_mcdi_timeout_async(unsigned long context);
51static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 51static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
52 bool *was_attached_out); 52 bool *was_attached_out);
53static bool efx_mcdi_poll_once(struct efx_nic *efx);
53 54
54static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) 55static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
55{ 56{
@@ -237,6 +238,21 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
237 } 238 }
238} 239}
239 240
241static bool efx_mcdi_poll_once(struct efx_nic *efx)
242{
243 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
244
245 rmb();
246 if (!efx->type->mcdi_poll_response(efx))
247 return false;
248
249 spin_lock_bh(&mcdi->iface_lock);
250 efx_mcdi_read_response_header(efx);
251 spin_unlock_bh(&mcdi->iface_lock);
252
253 return true;
254}
255
240static int efx_mcdi_poll(struct efx_nic *efx) 256static int efx_mcdi_poll(struct efx_nic *efx)
241{ 257{
242 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 258 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
@@ -272,18 +288,13 @@ static int efx_mcdi_poll(struct efx_nic *efx)
272 288
273 time = jiffies; 289 time = jiffies;
274 290
275 rmb(); 291 if (efx_mcdi_poll_once(efx))
276 if (efx->type->mcdi_poll_response(efx))
277 break; 292 break;
278 293
279 if (time_after(time, finish)) 294 if (time_after(time, finish))
280 return -ETIMEDOUT; 295 return -ETIMEDOUT;
281 } 296 }
282 297
283 spin_lock_bh(&mcdi->iface_lock);
284 efx_mcdi_read_response_header(efx);
285 spin_unlock_bh(&mcdi->iface_lock);
286
287 /* Return rc=0 like wait_event_timeout() */ 298 /* Return rc=0 like wait_event_timeout() */
288 return 0; 299 return 0;
289} 300}
@@ -619,6 +630,16 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
619 rc = efx_mcdi_await_completion(efx); 630 rc = efx_mcdi_await_completion(efx);
620 631
621 if (rc != 0) { 632 if (rc != 0) {
633 netif_err(efx, hw, efx->net_dev,
634 "MC command 0x%x inlen %d mode %d timed out\n",
635 cmd, (int)inlen, mcdi->mode);
636
637 if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
638 netif_err(efx, hw, efx->net_dev,
639 "MCDI request was completed without an event\n");
640 rc = 0;
641 }
642
622 /* Close the race with efx_mcdi_ev_cpl() executing just too late 643 /* Close the race with efx_mcdi_ev_cpl() executing just too late
623 * and completing a request we've just cancelled, by ensuring 644 * and completing a request we've just cancelled, by ensuring
624 * that the seqno check therein fails. 645 * that the seqno check therein fails.
@@ -627,11 +648,9 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
627 ++mcdi->seqno; 648 ++mcdi->seqno;
628 ++mcdi->credits; 649 ++mcdi->credits;
629 spin_unlock_bh(&mcdi->iface_lock); 650 spin_unlock_bh(&mcdi->iface_lock);
651 }
630 652
631 netif_err(efx, hw, efx->net_dev, 653 if (rc == 0) {
632 "MC command 0x%x inlen %d mode %d timed out\n",
633 cmd, (int)inlen, mcdi->mode);
634 } else {
635 size_t hdr_len, data_len; 654 size_t hdr_len, data_len;
636 655
637 /* At the very least we need a memory barrier here to ensure 656 /* At the very least we need a memory barrier here to ensure
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 656a3277c2b2..15816cacb548 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -75,6 +75,8 @@ struct efx_mcdi_mon {
75 unsigned long last_update; 75 unsigned long last_update;
76 struct device *device; 76 struct device *device;
77 struct efx_mcdi_mon_attribute *attrs; 77 struct efx_mcdi_mon_attribute *attrs;
78 struct attribute_group group;
79 const struct attribute_group *groups[2];
78 unsigned int n_attrs; 80 unsigned int n_attrs;
79}; 81};
80 82
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index 4cc5d95b2a5a..d72ad4fc3617 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -139,17 +139,10 @@ static int efx_mcdi_mon_update(struct efx_nic *efx)
139 return rc; 139 return rc;
140} 140}
141 141
142static ssize_t efx_mcdi_mon_show_name(struct device *dev,
143 struct device_attribute *attr,
144 char *buf)
145{
146 return sprintf(buf, "%s\n", KBUILD_MODNAME);
147}
148
149static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index, 142static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index,
150 efx_dword_t *entry) 143 efx_dword_t *entry)
151{ 144{
152 struct efx_nic *efx = dev_get_drvdata(dev); 145 struct efx_nic *efx = dev_get_drvdata(dev->parent);
153 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); 146 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
154 int rc; 147 int rc;
155 148
@@ -263,7 +256,7 @@ static ssize_t efx_mcdi_mon_show_label(struct device *dev,
263 efx_mcdi_sensor_type[mon_attr->type].label); 256 efx_mcdi_sensor_type[mon_attr->type].label);
264} 257}
265 258
266static int 259static void
267efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, 260efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
268 ssize_t (*reader)(struct device *, 261 ssize_t (*reader)(struct device *,
269 struct device_attribute *, char *), 262 struct device_attribute *, char *),
@@ -272,7 +265,6 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
272{ 265{
273 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); 266 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
274 struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs]; 267 struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
275 int rc;
276 268
277 strlcpy(attr->name, name, sizeof(attr->name)); 269 strlcpy(attr->name, name, sizeof(attr->name));
278 attr->index = index; 270 attr->index = index;
@@ -286,10 +278,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
286 attr->dev_attr.attr.name = attr->name; 278 attr->dev_attr.attr.name = attr->name;
287 attr->dev_attr.attr.mode = S_IRUGO; 279 attr->dev_attr.attr.mode = S_IRUGO;
288 attr->dev_attr.show = reader; 280 attr->dev_attr.show = reader;
289 rc = device_create_file(&efx->pci_dev->dev, &attr->dev_attr); 281 hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr;
290 if (rc == 0)
291 ++hwmon->n_attrs;
292 return rc;
293} 282}
294 283
295int efx_mcdi_mon_probe(struct efx_nic *efx) 284int efx_mcdi_mon_probe(struct efx_nic *efx)
@@ -338,26 +327,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
338 efx_mcdi_mon_update(efx); 327 efx_mcdi_mon_update(efx);
339 328
340 /* Allocate space for the maximum possible number of 329 /* Allocate space for the maximum possible number of
341 * attributes for this set of sensors: name of the driver plus 330 * attributes for this set of sensors:
342 * value, min, max, crit, alarm and label for each sensor. 331 * value, min, max, crit, alarm and label for each sensor.
343 */ 332 */
344 n_attrs = 1 + 6 * n_sensors; 333 n_attrs = 6 * n_sensors;
345 hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL); 334 hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
346 if (!hwmon->attrs) { 335 if (!hwmon->attrs) {
347 rc = -ENOMEM; 336 rc = -ENOMEM;
348 goto fail; 337 goto fail;
349 } 338 }
350 339 hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *),
351 hwmon->device = hwmon_device_register(&efx->pci_dev->dev); 340 GFP_KERNEL);
352 if (IS_ERR(hwmon->device)) { 341 if (!hwmon->group.attrs) {
353 rc = PTR_ERR(hwmon->device); 342 rc = -ENOMEM;
354 goto fail; 343 goto fail;
355 } 344 }
356 345
357 rc = efx_mcdi_mon_add_attr(efx, "name", efx_mcdi_mon_show_name, 0, 0, 0);
358 if (rc)
359 goto fail;
360
361 for (i = 0, j = -1, type = -1; ; i++) { 346 for (i = 0, j = -1, type = -1; ; i++) {
362 enum efx_hwmon_type hwmon_type; 347 enum efx_hwmon_type hwmon_type;
363 const char *hwmon_prefix; 348 const char *hwmon_prefix;
@@ -372,7 +357,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
372 page = type / 32; 357 page = type / 32;
373 j = -1; 358 j = -1;
374 if (page == n_pages) 359 if (page == n_pages)
375 return 0; 360 goto hwmon_register;
376 361
377 MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, 362 MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE,
378 page); 363 page);
@@ -453,28 +438,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
453 if (min1 != max1) { 438 if (min1 != max1) {
454 snprintf(name, sizeof(name), "%s%u_input", 439 snprintf(name, sizeof(name), "%s%u_input",
455 hwmon_prefix, hwmon_index); 440 hwmon_prefix, hwmon_index);
456 rc = efx_mcdi_mon_add_attr( 441 efx_mcdi_mon_add_attr(
457 efx, name, efx_mcdi_mon_show_value, i, type, 0); 442 efx, name, efx_mcdi_mon_show_value, i, type, 0);
458 if (rc)
459 goto fail;
460 443
461 if (hwmon_type != EFX_HWMON_POWER) { 444 if (hwmon_type != EFX_HWMON_POWER) {
462 snprintf(name, sizeof(name), "%s%u_min", 445 snprintf(name, sizeof(name), "%s%u_min",
463 hwmon_prefix, hwmon_index); 446 hwmon_prefix, hwmon_index);
464 rc = efx_mcdi_mon_add_attr( 447 efx_mcdi_mon_add_attr(
465 efx, name, efx_mcdi_mon_show_limit, 448 efx, name, efx_mcdi_mon_show_limit,
466 i, type, min1); 449 i, type, min1);
467 if (rc)
468 goto fail;
469 } 450 }
470 451
471 snprintf(name, sizeof(name), "%s%u_max", 452 snprintf(name, sizeof(name), "%s%u_max",
472 hwmon_prefix, hwmon_index); 453 hwmon_prefix, hwmon_index);
473 rc = efx_mcdi_mon_add_attr( 454 efx_mcdi_mon_add_attr(
474 efx, name, efx_mcdi_mon_show_limit, 455 efx, name, efx_mcdi_mon_show_limit,
475 i, type, max1); 456 i, type, max1);
476 if (rc)
477 goto fail;
478 457
479 if (min2 != max2) { 458 if (min2 != max2) {
480 /* Assume max2 is critical value. 459 /* Assume max2 is critical value.
@@ -482,32 +461,38 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
482 */ 461 */
483 snprintf(name, sizeof(name), "%s%u_crit", 462 snprintf(name, sizeof(name), "%s%u_crit",
484 hwmon_prefix, hwmon_index); 463 hwmon_prefix, hwmon_index);
485 rc = efx_mcdi_mon_add_attr( 464 efx_mcdi_mon_add_attr(
486 efx, name, efx_mcdi_mon_show_limit, 465 efx, name, efx_mcdi_mon_show_limit,
487 i, type, max2); 466 i, type, max2);
488 if (rc)
489 goto fail;
490 } 467 }
491 } 468 }
492 469
493 snprintf(name, sizeof(name), "%s%u_alarm", 470 snprintf(name, sizeof(name), "%s%u_alarm",
494 hwmon_prefix, hwmon_index); 471 hwmon_prefix, hwmon_index);
495 rc = efx_mcdi_mon_add_attr( 472 efx_mcdi_mon_add_attr(
496 efx, name, efx_mcdi_mon_show_alarm, i, type, 0); 473 efx, name, efx_mcdi_mon_show_alarm, i, type, 0);
497 if (rc)
498 goto fail;
499 474
500 if (type < ARRAY_SIZE(efx_mcdi_sensor_type) && 475 if (type < ARRAY_SIZE(efx_mcdi_sensor_type) &&
501 efx_mcdi_sensor_type[type].label) { 476 efx_mcdi_sensor_type[type].label) {
502 snprintf(name, sizeof(name), "%s%u_label", 477 snprintf(name, sizeof(name), "%s%u_label",
503 hwmon_prefix, hwmon_index); 478 hwmon_prefix, hwmon_index);
504 rc = efx_mcdi_mon_add_attr( 479 efx_mcdi_mon_add_attr(
505 efx, name, efx_mcdi_mon_show_label, i, type, 0); 480 efx, name, efx_mcdi_mon_show_label, i, type, 0);
506 if (rc)
507 goto fail;
508 } 481 }
509 } 482 }
510 483
484hwmon_register:
485 hwmon->groups[0] = &hwmon->group;
486 hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev,
487 KBUILD_MODNAME, NULL,
488 hwmon->groups);
489 if (IS_ERR(hwmon->device)) {
490 rc = PTR_ERR(hwmon->device);
491 goto fail;
492 }
493
494 return 0;
495
511fail: 496fail:
512 efx_mcdi_mon_remove(efx); 497 efx_mcdi_mon_remove(efx);
513 return rc; 498 return rc;
@@ -516,14 +501,11 @@ fail:
516void efx_mcdi_mon_remove(struct efx_nic *efx) 501void efx_mcdi_mon_remove(struct efx_nic *efx)
517{ 502{
518 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); 503 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
519 unsigned int i;
520 504
521 for (i = 0; i < hwmon->n_attrs; i++)
522 device_remove_file(&efx->pci_dev->dev,
523 &hwmon->attrs[i].dev_attr);
524 kfree(hwmon->attrs);
525 if (hwmon->device) 505 if (hwmon->device)
526 hwmon_device_unregister(hwmon->device); 506 hwmon_device_unregister(hwmon->device);
507 kfree(hwmon->attrs);
508 kfree(hwmon->group.attrs);
527 efx_nic_free_buffer(efx, &hwmon->dma_buf); 509 efx_nic_free_buffer(efx, &hwmon->dma_buf);
528} 510}
529 511
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b14a717ac3e8..542a0d252ae0 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -683,6 +683,8 @@ struct vfdi_status;
683 * @n_channels: Number of channels in use 683 * @n_channels: Number of channels in use
684 * @n_rx_channels: Number of channels used for RX (= number of RX queues) 684 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
685 * @n_tx_channels: Number of channels used for TX 685 * @n_tx_channels: Number of channels used for TX
686 * @rx_ip_align: RX DMA address offset to have IP header aligned in
687 * in accordance with NET_IP_ALIGN
686 * @rx_dma_len: Current maximum RX DMA length 688 * @rx_dma_len: Current maximum RX DMA length
687 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 689 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
688 * @rx_buffer_truesize: Amortised allocation size of an RX buffer, 690 * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
@@ -816,6 +818,7 @@ struct efx_nic {
816 unsigned rss_spread; 818 unsigned rss_spread;
817 unsigned tx_channel_offset; 819 unsigned tx_channel_offset;
818 unsigned n_tx_channels; 820 unsigned n_tx_channels;
821 unsigned int rx_ip_align;
819 unsigned int rx_dma_len; 822 unsigned int rx_dma_len;
820 unsigned int rx_buffer_order; 823 unsigned int rx_buffer_order;
821 unsigned int rx_buffer_truesize; 824 unsigned int rx_buffer_truesize;
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 11b6112d9249..91c63ec79c5f 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -560,6 +560,8 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
560bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); 560bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
561int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); 561int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
562void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); 562void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
563void efx_ptp_start_datapath(struct efx_nic *efx);
564void efx_ptp_stop_datapath(struct efx_nic *efx);
563 565
564extern const struct efx_nic_type falcon_a1_nic_type; 566extern const struct efx_nic_type falcon_a1_nic_type;
565extern const struct efx_nic_type falcon_b0_nic_type; 567extern const struct efx_nic_type falcon_b0_nic_type;
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 03acf57df045..3dd39dcfe36b 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -220,6 +220,7 @@ struct efx_ptp_timeset {
220 * @evt_list: List of MC receive events awaiting packets 220 * @evt_list: List of MC receive events awaiting packets
221 * @evt_free_list: List of free events 221 * @evt_free_list: List of free events
222 * @evt_lock: Lock for manipulating evt_list and evt_free_list 222 * @evt_lock: Lock for manipulating evt_list and evt_free_list
223 * @evt_overflow: Boolean indicating that event list has overflowed
223 * @rx_evts: Instantiated events (on evt_list and evt_free_list) 224 * @rx_evts: Instantiated events (on evt_list and evt_free_list)
224 * @workwq: Work queue for processing pending PTP operations 225 * @workwq: Work queue for processing pending PTP operations
225 * @work: Work task 226 * @work: Work task
@@ -270,6 +271,7 @@ struct efx_ptp_data {
270 struct list_head evt_list; 271 struct list_head evt_list;
271 struct list_head evt_free_list; 272 struct list_head evt_free_list;
272 spinlock_t evt_lock; 273 spinlock_t evt_lock;
274 bool evt_overflow;
273 struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; 275 struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
274 struct workqueue_struct *workwq; 276 struct workqueue_struct *workwq;
275 struct work_struct work; 277 struct work_struct work;
@@ -635,6 +637,11 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
635 } 637 }
636 } 638 }
637 } 639 }
640 /* If the event overflow flag is set and the event list is now empty
641 * clear the flag to re-enable the overflow warning message.
642 */
643 if (ptp->evt_overflow && list_empty(&ptp->evt_list))
644 ptp->evt_overflow = false;
638 spin_unlock_bh(&ptp->evt_lock); 645 spin_unlock_bh(&ptp->evt_lock);
639} 646}
640 647
@@ -676,6 +683,11 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
676 break; 683 break;
677 } 684 }
678 } 685 }
686 /* If the event overflow flag is set and the event list is now empty
687 * clear the flag to re-enable the overflow warning message.
688 */
689 if (ptp->evt_overflow && list_empty(&ptp->evt_list))
690 ptp->evt_overflow = false;
679 spin_unlock_bh(&ptp->evt_lock); 691 spin_unlock_bh(&ptp->evt_lock);
680 692
681 return rc; 693 return rc;
@@ -705,8 +717,9 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
705 __skb_queue_tail(q, skb); 717 __skb_queue_tail(q, skb);
706 } else if (time_after(jiffies, match->expiry)) { 718 } else if (time_after(jiffies, match->expiry)) {
707 match->state = PTP_PACKET_STATE_TIMED_OUT; 719 match->state = PTP_PACKET_STATE_TIMED_OUT;
708 netif_warn(efx, rx_err, efx->net_dev, 720 if (net_ratelimit())
709 "PTP packet - no timestamp seen\n"); 721 netif_warn(efx, rx_err, efx->net_dev,
722 "PTP packet - no timestamp seen\n");
710 __skb_queue_tail(q, skb); 723 __skb_queue_tail(q, skb);
711 } else { 724 } else {
712 /* Replace unprocessed entry and stop */ 725 /* Replace unprocessed entry and stop */
@@ -788,9 +801,14 @@ fail:
788static int efx_ptp_stop(struct efx_nic *efx) 801static int efx_ptp_stop(struct efx_nic *efx)
789{ 802{
790 struct efx_ptp_data *ptp = efx->ptp_data; 803 struct efx_ptp_data *ptp = efx->ptp_data;
791 int rc = efx_ptp_disable(efx);
792 struct list_head *cursor; 804 struct list_head *cursor;
793 struct list_head *next; 805 struct list_head *next;
806 int rc;
807
808 if (ptp == NULL)
809 return 0;
810
811 rc = efx_ptp_disable(efx);
794 812
795 if (ptp->rxfilter_installed) { 813 if (ptp->rxfilter_installed) {
796 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, 814 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
@@ -809,11 +827,19 @@ static int efx_ptp_stop(struct efx_nic *efx)
809 list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { 827 list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
810 list_move(cursor, &efx->ptp_data->evt_free_list); 828 list_move(cursor, &efx->ptp_data->evt_free_list);
811 } 829 }
830 ptp->evt_overflow = false;
812 spin_unlock_bh(&efx->ptp_data->evt_lock); 831 spin_unlock_bh(&efx->ptp_data->evt_lock);
813 832
814 return rc; 833 return rc;
815} 834}
816 835
836static int efx_ptp_restart(struct efx_nic *efx)
837{
838 if (efx->ptp_data && efx->ptp_data->enabled)
839 return efx_ptp_start(efx);
840 return 0;
841}
842
817static void efx_ptp_pps_worker(struct work_struct *work) 843static void efx_ptp_pps_worker(struct work_struct *work)
818{ 844{
819 struct efx_ptp_data *ptp = 845 struct efx_ptp_data *ptp =
@@ -901,6 +927,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
901 spin_lock_init(&ptp->evt_lock); 927 spin_lock_init(&ptp->evt_lock);
902 for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) 928 for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
903 list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); 929 list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
930 ptp->evt_overflow = false;
904 931
905 ptp->phc_clock_info.owner = THIS_MODULE; 932 ptp->phc_clock_info.owner = THIS_MODULE;
906 snprintf(ptp->phc_clock_info.name, 933 snprintf(ptp->phc_clock_info.name,
@@ -989,7 +1016,11 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
989 skb->len >= PTP_MIN_LENGTH && 1016 skb->len >= PTP_MIN_LENGTH &&
990 skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM && 1017 skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM &&
991 likely(skb->protocol == htons(ETH_P_IP)) && 1018 likely(skb->protocol == htons(ETH_P_IP)) &&
1019 skb_transport_header_was_set(skb) &&
1020 skb_network_header_len(skb) >= sizeof(struct iphdr) &&
992 ip_hdr(skb)->protocol == IPPROTO_UDP && 1021 ip_hdr(skb)->protocol == IPPROTO_UDP &&
1022 skb_headlen(skb) >=
1023 skb_transport_offset(skb) + sizeof(struct udphdr) &&
993 udp_hdr(skb)->dest == htons(PTP_EVENT_PORT); 1024 udp_hdr(skb)->dest == htons(PTP_EVENT_PORT);
994} 1025}
995 1026
@@ -1106,7 +1137,7 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
1106{ 1137{
1107 if ((enable_wanted != efx->ptp_data->enabled) || 1138 if ((enable_wanted != efx->ptp_data->enabled) ||
1108 (enable_wanted && (efx->ptp_data->mode != new_mode))) { 1139 (enable_wanted && (efx->ptp_data->mode != new_mode))) {
1109 int rc; 1140 int rc = 0;
1110 1141
1111 if (enable_wanted) { 1142 if (enable_wanted) {
1112 /* Change of mode requires disable */ 1143 /* Change of mode requires disable */
@@ -1123,7 +1154,8 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
1123 * succeed. 1154 * succeed.
1124 */ 1155 */
1125 efx->ptp_data->mode = new_mode; 1156 efx->ptp_data->mode = new_mode;
1126 rc = efx_ptp_start(efx); 1157 if (netif_running(efx->net_dev))
1158 rc = efx_ptp_start(efx);
1127 if (rc == 0) { 1159 if (rc == 0) {
1128 rc = efx_ptp_synchronize(efx, 1160 rc = efx_ptp_synchronize(efx,
1129 PTP_SYNC_ATTEMPTS * 2); 1161 PTP_SYNC_ATTEMPTS * 2);
@@ -1295,8 +1327,13 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
1295 list_add_tail(&evt->link, &ptp->evt_list); 1327 list_add_tail(&evt->link, &ptp->evt_list);
1296 1328
1297 queue_work(ptp->workwq, &ptp->work); 1329 queue_work(ptp->workwq, &ptp->work);
1298 } else { 1330 } else if (!ptp->evt_overflow) {
1299 netif_err(efx, rx_err, efx->net_dev, "No free PTP event"); 1331 /* Log a warning message and set the event overflow flag.
1332 * The message won't be logged again until the event queue
1333 * becomes empty.
1334 */
1335 netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
1336 ptp->evt_overflow = true;
1300 } 1337 }
1301 spin_unlock_bh(&ptp->evt_lock); 1338 spin_unlock_bh(&ptp->evt_lock);
1302} 1339}
@@ -1389,7 +1426,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
1389 if (rc != 0) 1426 if (rc != 0)
1390 return rc; 1427 return rc;
1391 1428
1392 ptp_data->current_adjfreq = delta; 1429 ptp_data->current_adjfreq = adjustment_ns;
1393 return 0; 1430 return 0;
1394} 1431}
1395 1432
@@ -1404,7 +1441,7 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
1404 1441
1405 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); 1442 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
1406 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); 1443 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
1407 MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0); 1444 MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq);
1408 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec); 1445 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
1409 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec); 1446 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
1410 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), 1447 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
@@ -1491,3 +1528,14 @@ void efx_ptp_probe(struct efx_nic *efx)
1491 efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] = 1528 efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] =
1492 &efx_ptp_channel_type; 1529 &efx_ptp_channel_type;
1493} 1530}
1531
1532void efx_ptp_start_datapath(struct efx_nic *efx)
1533{
1534 if (efx_ptp_restart(efx))
1535 netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n");
1536}
1537
1538void efx_ptp_stop_datapath(struct efx_nic *efx)
1539{
1540 efx_ptp_stop(efx);
1541}
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 8f09e686fc23..42488df1f4ec 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -94,7 +94,7 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
94 94
95void efx_rx_config_page_split(struct efx_nic *efx) 95void efx_rx_config_page_split(struct efx_nic *efx)
96{ 96{
97 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN, 97 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
98 EFX_RX_BUF_ALIGNMENT); 98 EFX_RX_BUF_ALIGNMENT);
99 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : 99 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
100 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / 100 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
@@ -189,9 +189,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
189 do { 189 do {
190 index = rx_queue->added_count & rx_queue->ptr_mask; 190 index = rx_queue->added_count & rx_queue->ptr_mask;
191 rx_buf = efx_rx_buffer(rx_queue, index); 191 rx_buf = efx_rx_buffer(rx_queue, index);
192 rx_buf->dma_addr = dma_addr + NET_IP_ALIGN; 192 rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
193 rx_buf->page = page; 193 rx_buf->page = page;
194 rx_buf->page_offset = page_offset + NET_IP_ALIGN; 194 rx_buf->page_offset = page_offset + efx->rx_ip_align;
195 rx_buf->len = efx->rx_dma_len; 195 rx_buf->len = efx->rx_dma_len;
196 rx_buf->flags = 0; 196 rx_buf->flags = 0;
197 ++rx_queue->added_count; 197 ++rx_queue->added_count;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 0c9b5d94154f..8bf29eb4a5a0 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -82,6 +82,7 @@ static const char version[] =
82#include <linux/mii.h> 82#include <linux/mii.h>
83#include <linux/workqueue.h> 83#include <linux/workqueue.h>
84#include <linux/of.h> 84#include <linux/of.h>
85#include <linux/of_device.h>
85 86
86#include <linux/netdevice.h> 87#include <linux/netdevice.h>
87#include <linux/etherdevice.h> 88#include <linux/etherdevice.h>
@@ -2184,6 +2185,15 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
2184 } 2185 }
2185} 2186}
2186 2187
2188#if IS_BUILTIN(CONFIG_OF)
2189static const struct of_device_id smc91x_match[] = {
2190 { .compatible = "smsc,lan91c94", },
2191 { .compatible = "smsc,lan91c111", },
2192 {},
2193};
2194MODULE_DEVICE_TABLE(of, smc91x_match);
2195#endif
2196
2187/* 2197/*
2188 * smc_init(void) 2198 * smc_init(void)
2189 * Input parameters: 2199 * Input parameters:
@@ -2198,6 +2208,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
2198static int smc_drv_probe(struct platform_device *pdev) 2208static int smc_drv_probe(struct platform_device *pdev)
2199{ 2209{
2200 struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev); 2210 struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev);
2211 const struct of_device_id *match = NULL;
2201 struct smc_local *lp; 2212 struct smc_local *lp;
2202 struct net_device *ndev; 2213 struct net_device *ndev;
2203 struct resource *res, *ires; 2214 struct resource *res, *ires;
@@ -2217,11 +2228,34 @@ static int smc_drv_probe(struct platform_device *pdev)
2217 */ 2228 */
2218 2229
2219 lp = netdev_priv(ndev); 2230 lp = netdev_priv(ndev);
2231 lp->cfg.flags = 0;
2220 2232
2221 if (pd) { 2233 if (pd) {
2222 memcpy(&lp->cfg, pd, sizeof(lp->cfg)); 2234 memcpy(&lp->cfg, pd, sizeof(lp->cfg));
2223 lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); 2235 lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
2224 } else { 2236 }
2237
2238#if IS_BUILTIN(CONFIG_OF)
2239 match = of_match_device(of_match_ptr(smc91x_match), &pdev->dev);
2240 if (match) {
2241 struct device_node *np = pdev->dev.of_node;
2242 u32 val;
2243
2244 /* Combination of IO widths supported, default to 16-bit */
2245 if (!of_property_read_u32(np, "reg-io-width", &val)) {
2246 if (val & 1)
2247 lp->cfg.flags |= SMC91X_USE_8BIT;
2248 if ((val == 0) || (val & 2))
2249 lp->cfg.flags |= SMC91X_USE_16BIT;
2250 if (val & 4)
2251 lp->cfg.flags |= SMC91X_USE_32BIT;
2252 } else {
2253 lp->cfg.flags |= SMC91X_USE_16BIT;
2254 }
2255 }
2256#endif
2257
2258 if (!pd && !match) {
2225 lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0; 2259 lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0;
2226 lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0; 2260 lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0;
2227 lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0; 2261 lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0;
@@ -2370,15 +2404,6 @@ static int smc_drv_resume(struct device *dev)
2370 return 0; 2404 return 0;
2371} 2405}
2372 2406
2373#ifdef CONFIG_OF
2374static const struct of_device_id smc91x_match[] = {
2375 { .compatible = "smsc,lan91c94", },
2376 { .compatible = "smsc,lan91c111", },
2377 {},
2378};
2379MODULE_DEVICE_TABLE(of, smc91x_match);
2380#endif
2381
2382static struct dev_pm_ops smc_drv_pm_ops = { 2407static struct dev_pm_ops smc_drv_pm_ops = {
2383 .suspend = smc_drv_suspend, 2408 .suspend = smc_drv_suspend,
2384 .resume = smc_drv_resume, 2409 .resume = smc_drv_resume,
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index c9d4c872e81d..749654b976bc 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -46,7 +46,8 @@
46 defined(CONFIG_MACH_LITTLETON) ||\ 46 defined(CONFIG_MACH_LITTLETON) ||\
47 defined(CONFIG_MACH_ZYLONITE2) ||\ 47 defined(CONFIG_MACH_ZYLONITE2) ||\
48 defined(CONFIG_ARCH_VIPER) ||\ 48 defined(CONFIG_ARCH_VIPER) ||\
49 defined(CONFIG_MACH_STARGATE2) 49 defined(CONFIG_MACH_STARGATE2) ||\
50 defined(CONFIG_ARCH_VERSATILE)
50 51
51#include <asm/mach-types.h> 52#include <asm/mach-types.h>
52 53
@@ -154,6 +155,8 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
154#define SMC_outl(v, a, r) writel(v, (a) + (r)) 155#define SMC_outl(v, a, r) writel(v, (a) + (r))
155#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) 156#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
156#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) 157#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
158#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
159#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
157#define SMC_IRQ_FLAGS (-1) /* from resource */ 160#define SMC_IRQ_FLAGS (-1) /* from resource */
158 161
159/* We actually can't write halfwords properly if not word aligned */ 162/* We actually can't write halfwords properly if not word aligned */
@@ -206,23 +209,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
206#define RPC_LSA_DEFAULT RPC_LED_TX_RX 209#define RPC_LSA_DEFAULT RPC_LED_TX_RX
207#define RPC_LSB_DEFAULT RPC_LED_100_10 210#define RPC_LSB_DEFAULT RPC_LED_100_10
208 211
209#elif defined(CONFIG_ARCH_VERSATILE)
210
211#define SMC_CAN_USE_8BIT 1
212#define SMC_CAN_USE_16BIT 1
213#define SMC_CAN_USE_32BIT 1
214#define SMC_NOWAIT 1
215
216#define SMC_inb(a, r) readb((a) + (r))
217#define SMC_inw(a, r) readw((a) + (r))
218#define SMC_inl(a, r) readl((a) + (r))
219#define SMC_outb(v, a, r) writeb(v, (a) + (r))
220#define SMC_outw(v, a, r) writew(v, (a) + (r))
221#define SMC_outl(v, a, r) writel(v, (a) + (r))
222#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
223#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
224#define SMC_IRQ_FLAGS (-1) /* from resource */
225
226#elif defined(CONFIG_MN10300) 212#elif defined(CONFIG_MN10300)
227 213
228/* 214/*
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8a7a23a84ac5..797b56a0efc4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -622,17 +622,15 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
622 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 622 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
623 return -EOPNOTSUPP; 623 return -EOPNOTSUPP;
624 624
625 if (netif_msg_hw(priv)) { 625 priv->adv_ts = 0;
626 if (priv->dma_cap.time_stamp) { 626 if (priv->dma_cap.atime_stamp && priv->extend_desc)
627 pr_debug("IEEE 1588-2002 Time Stamp supported\n"); 627 priv->adv_ts = 1;
628 priv->adv_ts = 0; 628
629 } 629 if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
630 if (priv->dma_cap.atime_stamp && priv->extend_desc) { 630 pr_debug("IEEE 1588-2002 Time Stamp supported\n");
631 pr_debug 631
632 ("IEEE 1588-2008 Advanced Time Stamp supported\n"); 632 if (netif_msg_hw(priv) && priv->adv_ts)
633 priv->adv_ts = 1; 633 pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
634 }
635 }
636 634
637 priv->hw->ptp = &stmmac_ptp; 635 priv->hw->ptp = &stmmac_ptp;
638 priv->hwts_tx_en = 0; 636 priv->hwts_tx_en = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index b8b0eeed0f92..7680581ebe12 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -56,7 +56,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
56 56
57 priv->hw->ptp->config_addend(priv->ioaddr, addend); 57 priv->hw->ptp->config_addend(priv->ioaddr, addend);
58 58
59 spin_unlock_irqrestore(&priv->lock, flags); 59 spin_unlock_irqrestore(&priv->ptp_lock, flags);
60 60
61 return 0; 61 return 0;
62} 62}
@@ -91,7 +91,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
91 91
92 priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); 92 priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
93 93
94 spin_unlock_irqrestore(&priv->lock, flags); 94 spin_unlock_irqrestore(&priv->ptp_lock, flags);
95 95
96 return 0; 96 return 0;
97} 97}
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index dd0dd6279b4e..4f1d2549130e 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2019,7 +2019,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2019 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO 2019 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
2020 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 2020 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2021 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM 2021 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM
2022 /*| NETIF_F_FRAGLIST */
2023 ; 2022 ;
2024 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 2023 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2025 NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX; 2024 NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7536a4c01293..5330fd298705 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -740,6 +740,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
740 /* set speed_in input in case RMII mode is used in 100Mbps */ 740 /* set speed_in input in case RMII mode is used in 100Mbps */
741 if (phy->speed == 100) 741 if (phy->speed == 100)
742 mac_control |= BIT(15); 742 mac_control |= BIT(15);
743 else if (phy->speed == 10)
744 mac_control |= BIT(18); /* In Band mode */
743 745
744 *link = true; 746 *link = true;
745 } else { 747 } else {
@@ -1151,6 +1153,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
1151 * receive descs 1153 * receive descs
1152 */ 1154 */
1153 cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); 1155 cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
1156
1157 if (cpts_register(&priv->pdev->dev, priv->cpts,
1158 priv->data.cpts_clock_mult,
1159 priv->data.cpts_clock_shift))
1160 dev_err(priv->dev, "error registering cpts device\n");
1161
1154 } 1162 }
1155 1163
1156 /* Enable Interrupt pacing if configured */ 1164 /* Enable Interrupt pacing if configured */
@@ -1197,6 +1205,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
1197 netif_carrier_off(priv->ndev); 1205 netif_carrier_off(priv->ndev);
1198 1206
1199 if (cpsw_common_res_usage_state(priv) <= 1) { 1207 if (cpsw_common_res_usage_state(priv) <= 1) {
1208 cpts_unregister(priv->cpts);
1200 cpsw_intr_disable(priv); 1209 cpsw_intr_disable(priv);
1201 cpdma_ctlr_int_ctrl(priv->dma, false); 1210 cpdma_ctlr_int_ctrl(priv->dma, false);
1202 cpdma_ctlr_stop(priv->dma); 1211 cpdma_ctlr_stop(priv->dma);
@@ -1816,6 +1825,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1816 } 1825 }
1817 1826
1818 i++; 1827 i++;
1828 if (i == data->slaves)
1829 break;
1819 } 1830 }
1820 1831
1821 return 0; 1832 return 0;
@@ -1983,9 +1994,15 @@ static int cpsw_probe(struct platform_device *pdev)
1983 goto clean_runtime_disable_ret; 1994 goto clean_runtime_disable_ret;
1984 } 1995 }
1985 priv->regs = ss_regs; 1996 priv->regs = ss_regs;
1986 priv->version = __raw_readl(&priv->regs->id_ver);
1987 priv->host_port = HOST_PORT_NUM; 1997 priv->host_port = HOST_PORT_NUM;
1988 1998
1999 /* Need to enable clocks with runtime PM api to access module
2000 * registers
2001 */
2002 pm_runtime_get_sync(&pdev->dev);
2003 priv->version = readl(&priv->regs->id_ver);
2004 pm_runtime_put_sync(&pdev->dev);
2005
1989 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2006 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1990 priv->wr_regs = devm_ioremap_resource(&pdev->dev, res); 2007 priv->wr_regs = devm_ioremap_resource(&pdev->dev, res);
1991 if (IS_ERR(priv->wr_regs)) { 2008 if (IS_ERR(priv->wr_regs)) {
@@ -2091,7 +2108,7 @@ static int cpsw_probe(struct platform_device *pdev)
2091 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { 2108 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
2092 for (i = res->start; i <= res->end; i++) { 2109 for (i = res->start; i <= res->end; i++) {
2093 if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0, 2110 if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
2094 dev_name(priv->dev), priv)) { 2111 dev_name(&pdev->dev), priv)) {
2095 dev_err(priv->dev, "error attaching irq\n"); 2112 dev_err(priv->dev, "error attaching irq\n");
2096 goto clean_ale_ret; 2113 goto clean_ale_ret;
2097 } 2114 }
@@ -2155,8 +2172,6 @@ static int cpsw_remove(struct platform_device *pdev)
2155 unregister_netdev(cpsw_get_slave_ndev(priv, 1)); 2172 unregister_netdev(cpsw_get_slave_ndev(priv, 1));
2156 unregister_netdev(ndev); 2173 unregister_netdev(ndev);
2157 2174
2158 cpts_unregister(priv->cpts);
2159
2160 cpsw_ale_destroy(priv->ale); 2175 cpsw_ale_destroy(priv->ale);
2161 cpdma_chan_destroy(priv->txch); 2176 cpdma_chan_destroy(priv->txch);
2162 cpdma_chan_destroy(priv->rxch); 2177 cpdma_chan_destroy(priv->rxch);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 41ba974bf37c..cd9b164a0434 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -61,6 +61,7 @@
61#include <linux/davinci_emac.h> 61#include <linux/davinci_emac.h>
62#include <linux/of.h> 62#include <linux/of.h>
63#include <linux/of_address.h> 63#include <linux/of_address.h>
64#include <linux/of_device.h>
64#include <linux/of_irq.h> 65#include <linux/of_irq.h>
65#include <linux/of_net.h> 66#include <linux/of_net.h>
66 67
@@ -1752,10 +1753,14 @@ static const struct net_device_ops emac_netdev_ops = {
1752#endif 1753#endif
1753}; 1754};
1754 1755
1756static const struct of_device_id davinci_emac_of_match[];
1757
1755static struct emac_platform_data * 1758static struct emac_platform_data *
1756davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) 1759davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
1757{ 1760{
1758 struct device_node *np; 1761 struct device_node *np;
1762 const struct of_device_id *match;
1763 const struct emac_platform_data *auxdata;
1759 struct emac_platform_data *pdata = NULL; 1764 struct emac_platform_data *pdata = NULL;
1760 const u8 *mac_addr; 1765 const u8 *mac_addr;
1761 1766
@@ -1793,7 +1798,20 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
1793 1798
1794 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); 1799 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
1795 if (!priv->phy_node) 1800 if (!priv->phy_node)
1796 pdata->phy_id = ""; 1801 pdata->phy_id = NULL;
1802
1803 auxdata = pdev->dev.platform_data;
1804 if (auxdata) {
1805 pdata->interrupt_enable = auxdata->interrupt_enable;
1806 pdata->interrupt_disable = auxdata->interrupt_disable;
1807 }
1808
1809 match = of_match_device(davinci_emac_of_match, &pdev->dev);
1810 if (match && match->data) {
1811 auxdata = match->data;
1812 pdata->version = auxdata->version;
1813 pdata->hw_ram_addr = auxdata->hw_ram_addr;
1814 }
1797 1815
1798 pdev->dev.platform_data = pdata; 1816 pdev->dev.platform_data = pdata;
1799 1817
@@ -2020,8 +2038,14 @@ static const struct dev_pm_ops davinci_emac_pm_ops = {
2020}; 2038};
2021 2039
2022#if IS_ENABLED(CONFIG_OF) 2040#if IS_ENABLED(CONFIG_OF)
2041static const struct emac_platform_data am3517_emac_data = {
2042 .version = EMAC_VERSION_2,
2043 .hw_ram_addr = 0x01e20000,
2044};
2045
2023static const struct of_device_id davinci_emac_of_match[] = { 2046static const struct of_device_id davinci_emac_of_match[] = {
2024 {.compatible = "ti,davinci-dm6467-emac", }, 2047 {.compatible = "ti,davinci-dm6467-emac", },
2048 {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, },
2025 {}, 2049 {},
2026}; 2050};
2027MODULE_DEVICE_TABLE(of, davinci_emac_of_match); 2051MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 628b736e5ae7..0e9fb3301b11 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2080,7 +2080,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
2080} 2080}
2081 2081
2082/* Return subqueue id on this core (one per core). */ 2082/* Return subqueue id on this core (one per core). */
2083static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) 2083static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
2084 void *accel_priv)
2084{ 2085{
2085 return smp_processor_id(); 2086 return smp_processor_id();
2086} 2087}
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index d022bf936572..ad61d26a44f3 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2172,16 +2172,13 @@ static int velocity_poll(struct napi_struct *napi, int budget)
2172 unsigned int rx_done; 2172 unsigned int rx_done;
2173 unsigned long flags; 2173 unsigned long flags;
2174 2174
2175 spin_lock_irqsave(&vptr->lock, flags);
2176 /* 2175 /*
2177 * Do rx and tx twice for performance (taken from the VIA 2176 * Do rx and tx twice for performance (taken from the VIA
2178 * out-of-tree driver). 2177 * out-of-tree driver).
2179 */ 2178 */
2180 rx_done = velocity_rx_srv(vptr, budget / 2); 2179 rx_done = velocity_rx_srv(vptr, budget);
2181 velocity_tx_srv(vptr); 2180 spin_lock_irqsave(&vptr->lock, flags);
2182 rx_done += velocity_rx_srv(vptr, budget - rx_done);
2183 velocity_tx_srv(vptr); 2181 velocity_tx_srv(vptr);
2184
2185 /* If budget not fully consumed, exit the polling mode */ 2182 /* If budget not fully consumed, exit the polling mode */
2186 if (rx_done < budget) { 2183 if (rx_done < budget) {
2187 napi_complete(napi); 2184 napi_complete(napi);
@@ -2342,6 +2339,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2342 if (ret < 0) 2339 if (ret < 0)
2343 goto out_free_tmp_vptr_1; 2340 goto out_free_tmp_vptr_1;
2344 2341
2342 napi_disable(&vptr->napi);
2343
2345 spin_lock_irqsave(&vptr->lock, flags); 2344 spin_lock_irqsave(&vptr->lock, flags);
2346 2345
2347 netif_stop_queue(dev); 2346 netif_stop_queue(dev);
@@ -2362,6 +2361,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2362 2361
2363 velocity_give_many_rx_descs(vptr); 2362 velocity_give_many_rx_descs(vptr);
2364 2363
2364 napi_enable(&vptr->napi);
2365
2365 mac_enable_int(vptr->mac_regs); 2366 mac_enable_int(vptr->mac_regs);
2366 netif_start_queue(dev); 2367 netif_start_queue(dev);
2367 2368
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 1f2364126323..2166e879a096 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1017,7 +1017,7 @@ static int temac_of_probe(struct platform_device *op)
1017 platform_set_drvdata(op, ndev); 1017 platform_set_drvdata(op, ndev);
1018 SET_NETDEV_DEV(ndev, &op->dev); 1018 SET_NETDEV_DEV(ndev, &op->dev);
1019 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1019 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
1020 ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; 1020 ndev->features = NETIF_F_SG;
1021 ndev->netdev_ops = &temac_netdev_ops; 1021 ndev->netdev_ops = &temac_netdev_ops;
1022 ndev->ethtool_ops = &temac_ethtool_ops; 1022 ndev->ethtool_ops = &temac_ethtool_ops;
1023#if 0 1023#if 0
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index b2ff038d6d20..f9293da19e26 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1486,7 +1486,7 @@ static int axienet_of_probe(struct platform_device *op)
1486 1486
1487 SET_NETDEV_DEV(ndev, &op->dev); 1487 SET_NETDEV_DEV(ndev, &op->dev);
1488 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1488 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
1489 ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; 1489 ndev->features = NETIF_F_SG;
1490 ndev->netdev_ops = &axienet_netdev_ops; 1490 ndev->netdev_ops = &axienet_netdev_ops;
1491 ndev->ethtool_ops = &axienet_ethtool_ops; 1491 ndev->ethtool_ops = &axienet_ethtool_ops;
1492 1492
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 74234a51c851..fefb8cd5eb65 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -163,26 +163,9 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
163 __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, 163 __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
164 drvdata->base_addr + XEL_TSR_OFFSET); 164 drvdata->base_addr + XEL_TSR_OFFSET);
165 165
166 /* Enable the Tx interrupts for the second Buffer if
167 * configured in HW */
168 if (drvdata->tx_ping_pong != 0) {
169 reg_data = __raw_readl(drvdata->base_addr +
170 XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
171 __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
172 drvdata->base_addr + XEL_BUFFER_OFFSET +
173 XEL_TSR_OFFSET);
174 }
175
176 /* Enable the Rx interrupts for the first buffer */ 166 /* Enable the Rx interrupts for the first buffer */
177 __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); 167 __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
178 168
179 /* Enable the Rx interrupts for the second Buffer if
180 * configured in HW */
181 if (drvdata->rx_ping_pong != 0) {
182 __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr +
183 XEL_BUFFER_OFFSET + XEL_RSR_OFFSET);
184 }
185
186 /* Enable the Global Interrupt Enable */ 169 /* Enable the Global Interrupt Enable */
187 __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); 170 __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
188} 171}
@@ -206,31 +189,10 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
206 __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), 189 __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
207 drvdata->base_addr + XEL_TSR_OFFSET); 190 drvdata->base_addr + XEL_TSR_OFFSET);
208 191
209 /* Disable the Tx interrupts for the second Buffer
210 * if configured in HW */
211 if (drvdata->tx_ping_pong != 0) {
212 reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET +
213 XEL_TSR_OFFSET);
214 __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
215 drvdata->base_addr + XEL_BUFFER_OFFSET +
216 XEL_TSR_OFFSET);
217 }
218
219 /* Disable the Rx interrupts for the first buffer */ 192 /* Disable the Rx interrupts for the first buffer */
220 reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); 193 reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET);
221 __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), 194 __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
222 drvdata->base_addr + XEL_RSR_OFFSET); 195 drvdata->base_addr + XEL_RSR_OFFSET);
223
224 /* Disable the Rx interrupts for the second buffer
225 * if configured in HW */
226 if (drvdata->rx_ping_pong != 0) {
227
228 reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET +
229 XEL_RSR_OFFSET);
230 __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
231 drvdata->base_addr + XEL_BUFFER_OFFSET +
232 XEL_RSR_OFFSET);
233 }
234} 196}
235 197
236/** 198/**
@@ -258,6 +220,13 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
258 *to_u16_ptr++ = *from_u16_ptr++; 220 *to_u16_ptr++ = *from_u16_ptr++;
259 *to_u16_ptr++ = *from_u16_ptr++; 221 *to_u16_ptr++ = *from_u16_ptr++;
260 222
223 /* This barrier resolves occasional issues seen around
224 * cases where the data is not properly flushed out
225 * from the processor store buffers to the destination
226 * memory locations.
227 */
228 wmb();
229
261 /* Output a word */ 230 /* Output a word */
262 *to_u32_ptr++ = align_buffer; 231 *to_u32_ptr++ = align_buffer;
263 } 232 }
@@ -273,6 +242,12 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
273 for (; length > 0; length--) 242 for (; length > 0; length--)
274 *to_u8_ptr++ = *from_u8_ptr++; 243 *to_u8_ptr++ = *from_u8_ptr++;
275 244
245 /* This barrier resolves occasional issues seen around
246 * cases where the data is not properly flushed out
247 * from the processor store buffers to the destination
248 * memory locations.
249 */
250 wmb();
276 *to_u32_ptr = align_buffer; 251 *to_u32_ptr = align_buffer;
277 } 252 }
278} 253}
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 3169252613fa..5d78c1d08abd 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
571 case HDLCDRVCTL_CALIBRATE: 571 case HDLCDRVCTL_CALIBRATE:
572 if(!capable(CAP_SYS_RAWIO)) 572 if(!capable(CAP_SYS_RAWIO))
573 return -EPERM; 573 return -EPERM;
574 if (bi.data.calibrate > INT_MAX / s->par.bitrate)
575 return -EINVAL;
574 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; 576 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
575 return 0; 577 return 0;
576 578
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 1971411574db..61dd2447e1bb 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1057,6 +1057,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1057 break; 1057 break;
1058 1058
1059 case SIOCYAMGCFG: 1059 case SIOCYAMGCFG:
1060 memset(&yi, 0, sizeof(yi));
1060 yi.cfg.mask = 0xffffffff; 1061 yi.cfg.mask = 0xffffffff;
1061 yi.cfg.iobase = yp->iobase; 1062 yi.cfg.iobase = yp->iobase;
1062 yi.cfg.irq = yp->irq; 1063 yi.cfg.irq = yp->irq;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 524f713f6017..71baeb3ed905 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -261,9 +261,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
261 struct sk_buff *skb; 261 struct sk_buff *skb;
262 262
263 net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev; 263 net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
264 if (!net) { 264 if (!net || net->reg_state != NETREG_REGISTERED) {
265 netdev_err(net, "got receive callback but net device"
266 " not initialized yet\n");
267 packet->status = NVSP_STAT_FAIL; 265 packet->status = NVSP_STAT_FAIL;
268 return 0; 266 return 0;
269 } 267 }
@@ -327,7 +325,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
327 return -EINVAL; 325 return -EINVAL;
328 326
329 nvdev->start_remove = true; 327 nvdev->start_remove = true;
330 cancel_delayed_work_sync(&ndevctx->dwork);
331 cancel_work_sync(&ndevctx->work); 328 cancel_work_sync(&ndevctx->work);
332 netif_tx_disable(ndev); 329 netif_tx_disable(ndev);
333 rndis_filter_device_remove(hdev); 330 rndis_filter_device_remove(hdev);
@@ -436,19 +433,11 @@ static int netvsc_probe(struct hv_device *dev,
436 SET_ETHTOOL_OPS(net, &ethtool_ops); 433 SET_ETHTOOL_OPS(net, &ethtool_ops);
437 SET_NETDEV_DEV(net, &dev->device); 434 SET_NETDEV_DEV(net, &dev->device);
438 435
439 ret = register_netdev(net);
440 if (ret != 0) {
441 pr_err("Unable to register netdev.\n");
442 free_netdev(net);
443 goto out;
444 }
445
446 /* Notify the netvsc driver of the new device */ 436 /* Notify the netvsc driver of the new device */
447 device_info.ring_size = ring_size; 437 device_info.ring_size = ring_size;
448 ret = rndis_filter_device_add(dev, &device_info); 438 ret = rndis_filter_device_add(dev, &device_info);
449 if (ret != 0) { 439 if (ret != 0) {
450 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); 440 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
451 unregister_netdev(net);
452 free_netdev(net); 441 free_netdev(net);
453 hv_set_drvdata(dev, NULL); 442 hv_set_drvdata(dev, NULL);
454 return ret; 443 return ret;
@@ -457,7 +446,13 @@ static int netvsc_probe(struct hv_device *dev,
457 446
458 netif_carrier_on(net); 447 netif_carrier_on(net);
459 448
460out: 449 ret = register_netdev(net);
450 if (ret != 0) {
451 pr_err("Unable to register netdev.\n");
452 rndis_filter_device_remove(dev);
453 free_netdev(net);
454 }
455
461 return ret; 456 return ret;
462} 457}
463 458
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index acf93798dc67..bc8faaec33f5 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -299,7 +299,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
299 299
300 if (vlan->fwd_priv) { 300 if (vlan->fwd_priv) {
301 skb->dev = vlan->lowerdev; 301 skb->dev = vlan->lowerdev;
302 ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv); 302 ret = dev_queue_xmit_accel(skb, vlan->fwd_priv);
303 } else { 303 } else {
304 ret = macvlan_queue_xmit(skb, dev); 304 ret = macvlan_queue_xmit(skb, dev);
305 } 305 }
@@ -338,6 +338,8 @@ static const struct header_ops macvlan_hard_header_ops = {
338 .cache_update = eth_header_cache_update, 338 .cache_update = eth_header_cache_update,
339}; 339};
340 340
341static struct rtnl_link_ops macvlan_link_ops;
342
341static int macvlan_open(struct net_device *dev) 343static int macvlan_open(struct net_device *dev)
342{ 344{
343 struct macvlan_dev *vlan = netdev_priv(dev); 345 struct macvlan_dev *vlan = netdev_priv(dev);
@@ -353,7 +355,8 @@ static int macvlan_open(struct net_device *dev)
353 goto hash_add; 355 goto hash_add;
354 } 356 }
355 357
356 if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) { 358 if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD &&
359 dev->rtnl_link_ops == &macvlan_link_ops) {
357 vlan->fwd_priv = 360 vlan->fwd_priv =
358 lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); 361 lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
359 362
@@ -362,10 +365,8 @@ static int macvlan_open(struct net_device *dev)
362 */ 365 */
363 if (IS_ERR_OR_NULL(vlan->fwd_priv)) { 366 if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
364 vlan->fwd_priv = NULL; 367 vlan->fwd_priv = NULL;
365 } else { 368 } else
366 dev->features &= ~NETIF_F_LLTX;
367 return 0; 369 return 0;
368 }
369 } 370 }
370 371
371 err = -EBUSY; 372 err = -EBUSY;
@@ -690,8 +691,18 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
690 netdev_features_t features) 691 netdev_features_t features)
691{ 692{
692 struct macvlan_dev *vlan = netdev_priv(dev); 693 struct macvlan_dev *vlan = netdev_priv(dev);
694 netdev_features_t mask;
695
696 features |= NETIF_F_ALL_FOR_ALL;
697 features &= (vlan->set_features | ~MACVLAN_FEATURES);
698 mask = features;
699
700 features = netdev_increment_features(vlan->lowerdev->features,
701 features,
702 mask);
703 features |= NETIF_F_LLTX;
693 704
694 return features & (vlan->set_features | ~MACVLAN_FEATURES); 705 return features;
695} 706}
696 707
697static const struct ethtool_ops macvlan_ethtool_ops = { 708static const struct ethtool_ops macvlan_ethtool_ops = {
@@ -1019,9 +1030,8 @@ static int macvlan_device_event(struct notifier_block *unused,
1019 break; 1030 break;
1020 case NETDEV_FEAT_CHANGE: 1031 case NETDEV_FEAT_CHANGE:
1021 list_for_each_entry(vlan, &port->vlans, list) { 1032 list_for_each_entry(vlan, &port->vlans, list) {
1022 vlan->dev->features = dev->features & MACVLAN_FEATURES;
1023 vlan->dev->gso_max_size = dev->gso_max_size; 1033 vlan->dev->gso_max_size = dev->gso_max_size;
1024 netdev_features_change(vlan->dev); 1034 netdev_update_features(vlan->dev);
1025 } 1035 }
1026 break; 1036 break;
1027 case NETDEV_UNREGISTER: 1037 case NETDEV_UNREGISTER:
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index dc76670c2f2a..2a89da080317 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -744,7 +744,7 @@ err:
744 rcu_read_lock(); 744 rcu_read_lock();
745 vlan = rcu_dereference(q->vlan); 745 vlan = rcu_dereference(q->vlan);
746 if (vlan) 746 if (vlan)
747 vlan->dev->stats.tx_dropped++; 747 this_cpu_inc(vlan->pcpu_stats->tx_dropped);
748 rcu_read_unlock(); 748 rcu_read_unlock();
749 749
750 return err; 750 return err;
@@ -767,11 +767,10 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
767 const struct sk_buff *skb, 767 const struct sk_buff *skb,
768 const struct iovec *iv, int len) 768 const struct iovec *iv, int len)
769{ 769{
770 struct macvlan_dev *vlan;
771 int ret; 770 int ret;
772 int vnet_hdr_len = 0; 771 int vnet_hdr_len = 0;
773 int vlan_offset = 0; 772 int vlan_offset = 0;
774 int copied; 773 int copied, total;
775 774
776 if (q->flags & IFF_VNET_HDR) { 775 if (q->flags & IFF_VNET_HDR) {
777 struct virtio_net_hdr vnet_hdr; 776 struct virtio_net_hdr vnet_hdr;
@@ -786,7 +785,8 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
786 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr))) 785 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
787 return -EFAULT; 786 return -EFAULT;
788 } 787 }
789 copied = vnet_hdr_len; 788 total = copied = vnet_hdr_len;
789 total += skb->len;
790 790
791 if (!vlan_tx_tag_present(skb)) 791 if (!vlan_tx_tag_present(skb))
792 len = min_t(int, skb->len, len); 792 len = min_t(int, skb->len, len);
@@ -801,6 +801,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
801 801
802 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 802 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
803 len = min_t(int, skb->len + VLAN_HLEN, len); 803 len = min_t(int, skb->len + VLAN_HLEN, len);
804 total += VLAN_HLEN;
804 805
805 copy = min_t(int, vlan_offset, len); 806 copy = min_t(int, vlan_offset, len);
806 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); 807 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
@@ -818,19 +819,9 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
818 } 819 }
819 820
820 ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len); 821 ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
821 copied += len;
822 822
823done: 823done:
824 rcu_read_lock(); 824 return ret ? ret : total;
825 vlan = rcu_dereference(q->vlan);
826 if (vlan) {
827 preempt_disable();
828 macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
829 preempt_enable();
830 }
831 rcu_read_unlock();
832
833 return ret ? ret : copied;
834} 825}
835 826
836static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb, 827static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
@@ -885,7 +876,9 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
885 } 876 }
886 877
887 ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK); 878 ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
888 ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */ 879 ret = min_t(ssize_t, ret, len);
880 if (ret > 0)
881 iocb->ki_pos = ret;
889out: 882out:
890 return ret; 883 return ret;
891} 884}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 3ae28f420868..26fa05a472b4 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -336,6 +336,21 @@ static struct phy_driver ksphy_driver[] = {
336 .resume = genphy_resume, 336 .resume = genphy_resume,
337 .driver = { .owner = THIS_MODULE,}, 337 .driver = { .owner = THIS_MODULE,},
338}, { 338}, {
339 .phy_id = PHY_ID_KSZ8041RNLI,
340 .phy_id_mask = 0x00fffff0,
341 .name = "Micrel KSZ8041RNLI",
342 .features = PHY_BASIC_FEATURES |
343 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
344 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
345 .config_init = kszphy_config_init,
346 .config_aneg = genphy_config_aneg,
347 .read_status = genphy_read_status,
348 .ack_interrupt = kszphy_ack_interrupt,
349 .config_intr = kszphy_config_intr,
350 .suspend = genphy_suspend,
351 .resume = genphy_resume,
352 .driver = { .owner = THIS_MODULE,},
353}, {
339 .phy_id = PHY_ID_KSZ8051, 354 .phy_id = PHY_ID_KSZ8051,
340 .phy_id_mask = 0x00fffff0, 355 .phy_id_mask = 0x00fffff0,
341 .name = "Micrel KSZ8051", 356 .name = "Micrel KSZ8051",
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 36c6994436b7..98434b84f041 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -565,10 +565,8 @@ int phy_start_interrupts(struct phy_device *phydev)
565 int err = 0; 565 int err = 0;
566 566
567 atomic_set(&phydev->irq_disable, 0); 567 atomic_set(&phydev->irq_disable, 0);
568 if (request_irq(phydev->irq, phy_interrupt, 568 if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
569 IRQF_SHARED, 569 phydev) < 0) {
570 "phy_interrupt",
571 phydev) < 0) {
572 pr_warn("%s: Can't get IRQ %d (PHY)\n", 570 pr_warn("%s: Can't get IRQ %d (PHY)\n",
573 phydev->bus->name, phydev->irq); 571 phydev->bus->name, phydev->irq);
574 phydev->irq = PHY_POLL; 572 phydev->irq = PHY_POLL;
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 508e4359338b..14372c65a7e8 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -64,6 +64,7 @@
64 64
65#define PHY_ID_VSC8234 0x000fc620 65#define PHY_ID_VSC8234 0x000fc620
66#define PHY_ID_VSC8244 0x000fc6c0 66#define PHY_ID_VSC8244 0x000fc6c0
67#define PHY_ID_VSC8514 0x00070670
67#define PHY_ID_VSC8574 0x000704a0 68#define PHY_ID_VSC8574 0x000704a0
68#define PHY_ID_VSC8662 0x00070660 69#define PHY_ID_VSC8662 0x00070660
69#define PHY_ID_VSC8221 0x000fc550 70#define PHY_ID_VSC8221 0x000fc550
@@ -131,6 +132,7 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
131 err = phy_write(phydev, MII_VSC8244_IMASK, 132 err = phy_write(phydev, MII_VSC8244_IMASK,
132 (phydev->drv->phy_id == PHY_ID_VSC8234 || 133 (phydev->drv->phy_id == PHY_ID_VSC8234 ||
133 phydev->drv->phy_id == PHY_ID_VSC8244 || 134 phydev->drv->phy_id == PHY_ID_VSC8244 ||
135 phydev->drv->phy_id == PHY_ID_VSC8514 ||
134 phydev->drv->phy_id == PHY_ID_VSC8574) ? 136 phydev->drv->phy_id == PHY_ID_VSC8574) ?
135 MII_VSC8244_IMASK_MASK : 137 MII_VSC8244_IMASK_MASK :
136 MII_VSC8221_IMASK_MASK); 138 MII_VSC8221_IMASK_MASK);
@@ -246,6 +248,18 @@ static struct phy_driver vsc82xx_driver[] = {
246 .config_intr = &vsc82xx_config_intr, 248 .config_intr = &vsc82xx_config_intr,
247 .driver = { .owner = THIS_MODULE,}, 249 .driver = { .owner = THIS_MODULE,},
248}, { 250}, {
251 .phy_id = PHY_ID_VSC8514,
252 .name = "Vitesse VSC8514",
253 .phy_id_mask = 0x000ffff0,
254 .features = PHY_GBIT_FEATURES,
255 .flags = PHY_HAS_INTERRUPT,
256 .config_init = &vsc824x_config_init,
257 .config_aneg = &vsc82x4_config_aneg,
258 .read_status = &genphy_read_status,
259 .ack_interrupt = &vsc824x_ack_interrupt,
260 .config_intr = &vsc82xx_config_intr,
261 .driver = { .owner = THIS_MODULE,},
262}, {
249 .phy_id = PHY_ID_VSC8574, 263 .phy_id = PHY_ID_VSC8574,
250 .name = "Vitesse VSC8574", 264 .name = "Vitesse VSC8574",
251 .phy_id_mask = 0x000ffff0, 265 .phy_id_mask = 0x000ffff0,
@@ -315,6 +329,7 @@ module_exit(vsc82xx_exit);
315static struct mdio_device_id __maybe_unused vitesse_tbl[] = { 329static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
316 { PHY_ID_VSC8234, 0x000ffff0 }, 330 { PHY_ID_VSC8234, 0x000ffff0 },
317 { PHY_ID_VSC8244, 0x000fffc0 }, 331 { PHY_ID_VSC8244, 0x000fffc0 },
332 { PHY_ID_VSC8514, 0x000ffff0 },
318 { PHY_ID_VSC8574, 0x000ffff0 }, 333 { PHY_ID_VSC8574, 0x000ffff0 },
319 { PHY_ID_VSC8662, 0x000ffff0 }, 334 { PHY_ID_VSC8662, 0x000ffff0 },
320 { PHY_ID_VSC8221, 0x000ffff0 }, 335 { PHY_ID_VSC8221, 0x000ffff0 },
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 34b0de09d881..b75ae5bde673 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1366,6 +1366,8 @@ static int team_user_linkup_option_get(struct team *team,
1366 return 0; 1366 return 0;
1367} 1367}
1368 1368
1369static void __team_carrier_check(struct team *team);
1370
1369static int team_user_linkup_option_set(struct team *team, 1371static int team_user_linkup_option_set(struct team *team,
1370 struct team_gsetter_ctx *ctx) 1372 struct team_gsetter_ctx *ctx)
1371{ 1373{
@@ -1373,6 +1375,7 @@ static int team_user_linkup_option_set(struct team *team,
1373 1375
1374 port->user.linkup = ctx->data.bool_val; 1376 port->user.linkup = ctx->data.bool_val;
1375 team_refresh_port_linkup(port); 1377 team_refresh_port_linkup(port);
1378 __team_carrier_check(port->team);
1376 return 0; 1379 return 0;
1377} 1380}
1378 1381
@@ -1392,6 +1395,7 @@ static int team_user_linkup_en_option_set(struct team *team,
1392 1395
1393 port->user.linkup_enabled = ctx->data.bool_val; 1396 port->user.linkup_enabled = ctx->data.bool_val;
1394 team_refresh_port_linkup(port); 1397 team_refresh_port_linkup(port);
1398 __team_carrier_check(port->team);
1395 return 0; 1399 return 0;
1396} 1400}
1397 1401
@@ -1643,7 +1647,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1643 return NETDEV_TX_OK; 1647 return NETDEV_TX_OK;
1644} 1648}
1645 1649
1646static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb) 1650static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1651 void *accel_priv)
1647{ 1652{
1648 /* 1653 /*
1649 * This helper function exists to help dev_pick_tx get the correct 1654 * This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 782e38bfc1ee..ecec8029c5e8 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -348,7 +348,8 @@ unlock:
348 * different rxq no. here. If we could not get rxhash, then we would 348 * different rxq no. here. If we could not get rxhash, then we would
349 * hope the rxq no. may help here. 349 * hope the rxq no. may help here.
350 */ 350 */
351static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb) 351static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
352 void *accel_priv)
352{ 353{
353 struct tun_struct *tun = netdev_priv(dev); 354 struct tun_struct *tun = netdev_priv(dev);
354 struct tun_flow_entry *e; 355 struct tun_flow_entry *e;
@@ -1184,7 +1185,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1184{ 1185{
1185 struct tun_pi pi = { 0, skb->protocol }; 1186 struct tun_pi pi = { 0, skb->protocol };
1186 ssize_t total = 0; 1187 ssize_t total = 0;
1187 int vlan_offset = 0; 1188 int vlan_offset = 0, copied;
1188 1189
1189 if (!(tun->flags & TUN_NO_PI)) { 1190 if (!(tun->flags & TUN_NO_PI)) {
1190 if ((len -= sizeof(pi)) < 0) 1191 if ((len -= sizeof(pi)) < 0)
@@ -1248,6 +1249,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1248 total += tun->vnet_hdr_sz; 1249 total += tun->vnet_hdr_sz;
1249 } 1250 }
1250 1251
1252 copied = total;
1253 total += skb->len;
1251 if (!vlan_tx_tag_present(skb)) { 1254 if (!vlan_tx_tag_present(skb)) {
1252 len = min_t(int, skb->len, len); 1255 len = min_t(int, skb->len, len);
1253 } else { 1256 } else {
@@ -1262,24 +1265,24 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1262 1265
1263 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 1266 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
1264 len = min_t(int, skb->len + VLAN_HLEN, len); 1267 len = min_t(int, skb->len + VLAN_HLEN, len);
1268 total += VLAN_HLEN;
1265 1269
1266 copy = min_t(int, vlan_offset, len); 1270 copy = min_t(int, vlan_offset, len);
1267 ret = skb_copy_datagram_const_iovec(skb, 0, iv, total, copy); 1271 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
1268 len -= copy; 1272 len -= copy;
1269 total += copy; 1273 copied += copy;
1270 if (ret || !len) 1274 if (ret || !len)
1271 goto done; 1275 goto done;
1272 1276
1273 copy = min_t(int, sizeof(veth), len); 1277 copy = min_t(int, sizeof(veth), len);
1274 ret = memcpy_toiovecend(iv, (void *)&veth, total, copy); 1278 ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
1275 len -= copy; 1279 len -= copy;
1276 total += copy; 1280 copied += copy;
1277 if (ret || !len) 1281 if (ret || !len)
1278 goto done; 1282 goto done;
1279 } 1283 }
1280 1284
1281 skb_copy_datagram_const_iovec(skb, vlan_offset, iv, total, len); 1285 skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
1282 total += len;
1283 1286
1284done: 1287done:
1285 tun->dev->stats.tx_packets++; 1288 tun->dev->stats.tx_packets++;
@@ -1356,6 +1359,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
1356 ret = tun_do_read(tun, tfile, iocb, iv, len, 1359 ret = tun_do_read(tun, tfile, iocb, iv, len,
1357 file->f_flags & O_NONBLOCK); 1360 file->f_flags & O_NONBLOCK);
1358 ret = min_t(ssize_t, ret, len); 1361 ret = min_t(ssize_t, ret, len);
1362 if (ret > 0)
1363 iocb->ki_pos = ret;
1359out: 1364out:
1360 tun_put(tun); 1365 tun_put(tun);
1361 return ret; 1366 return ret;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 85e4a01670f0..47b0f732b0b1 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -276,12 +276,12 @@ config USB_NET_CDC_MBIM
276 module will be called cdc_mbim. 276 module will be called cdc_mbim.
277 277
278config USB_NET_DM9601 278config USB_NET_DM9601
279 tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices" 279 tristate "Davicom DM96xx based USB 10/100 ethernet devices"
280 depends on USB_USBNET 280 depends on USB_USBNET
281 select CRC32 281 select CRC32
282 help 282 help
283 This option adds support for Davicom DM9601 based USB 1.1 283 This option adds support for Davicom DM9601/DM9620/DM9621A
284 10/100 Ethernet adapters. 284 based USB 10/100 Ethernet adapters.
285 285
286config USB_NET_SR9700 286config USB_NET_SR9700
287 tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices" 287 tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices"
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index c6867f926cff..14aa48fa8d7e 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Davicom DM9601 USB 1.1 10/100Mbps ethernet devices 2 * Davicom DM96xx USB 10/100Mbps ethernet devices
3 * 3 *
4 * Peter Korsgaard <jacmet@sunsite.dk> 4 * Peter Korsgaard <jacmet@sunsite.dk>
5 * 5 *
@@ -364,7 +364,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
364 dev->net->ethtool_ops = &dm9601_ethtool_ops; 364 dev->net->ethtool_ops = &dm9601_ethtool_ops;
365 dev->net->hard_header_len += DM_TX_OVERHEAD; 365 dev->net->hard_header_len += DM_TX_OVERHEAD;
366 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 366 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
367 dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD; 367
368 /* dm9620/21a require room for 4 byte padding, even in dm9601
369 * mode, so we need +1 to be able to receive full size
370 * ethernet frames.
371 */
372 dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1;
368 373
369 dev->mii.dev = dev->net; 374 dev->mii.dev = dev->net;
370 dev->mii.mdio_read = dm9601_mdio_read; 375 dev->mii.mdio_read = dm9601_mdio_read;
@@ -468,7 +473,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
468static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, 473static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
469 gfp_t flags) 474 gfp_t flags)
470{ 475{
471 int len; 476 int len, pad;
472 477
473 /* format: 478 /* format:
474 b1: packet length low 479 b1: packet length low
@@ -476,12 +481,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
476 b3..n: packet data 481 b3..n: packet data
477 */ 482 */
478 483
479 len = skb->len; 484 len = skb->len + DM_TX_OVERHEAD;
485
486 /* workaround for dm962x errata with tx fifo getting out of
487 * sync if a USB bulk transfer retry happens right after a
488 * packet with odd / maxpacket length by adding up to 3 bytes
489 * padding.
490 */
491 while ((len & 1) || !(len % dev->maxpacket))
492 len++;
480 493
481 if (skb_headroom(skb) < DM_TX_OVERHEAD) { 494 len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */
495 pad = len - skb->len;
496
497 if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) {
482 struct sk_buff *skb2; 498 struct sk_buff *skb2;
483 499
484 skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags); 500 skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags);
485 dev_kfree_skb_any(skb); 501 dev_kfree_skb_any(skb);
486 skb = skb2; 502 skb = skb2;
487 if (!skb) 503 if (!skb)
@@ -490,10 +506,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
490 506
491 __skb_push(skb, DM_TX_OVERHEAD); 507 __skb_push(skb, DM_TX_OVERHEAD);
492 508
493 /* usbnet adds padding if length is a multiple of packet size 509 if (pad) {
494 if so, adjust length value in header */ 510 memset(skb->data + skb->len, 0, pad);
495 if ((skb->len % dev->maxpacket) == 0) 511 __skb_put(skb, pad);
496 len++; 512 }
497 513
498 skb->data[0] = len; 514 skb->data[0] = len;
499 skb->data[1] = len >> 8; 515 skb->data[1] = len >> 8;
@@ -543,7 +559,7 @@ static int dm9601_link_reset(struct usbnet *dev)
543} 559}
544 560
545static const struct driver_info dm9601_info = { 561static const struct driver_info dm9601_info = {
546 .description = "Davicom DM9601 USB Ethernet", 562 .description = "Davicom DM96xx USB 10/100 Ethernet",
547 .flags = FLAG_ETHER | FLAG_LINK_INTR, 563 .flags = FLAG_ETHER | FLAG_LINK_INTR,
548 .bind = dm9601_bind, 564 .bind = dm9601_bind,
549 .rx_fixup = dm9601_rx_fixup, 565 .rx_fixup = dm9601_rx_fixup,
@@ -594,6 +610,10 @@ static const struct usb_device_id products[] = {
594 USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */ 610 USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */
595 .driver_info = (unsigned long)&dm9601_info, 611 .driver_info = (unsigned long)&dm9601_info,
596 }, 612 },
613 {
614 USB_DEVICE(0x0a46, 0x9621), /* DM9621A USB to Fast Ethernet Adapter */
615 .driver_info = (unsigned long)&dm9601_info,
616 },
597 {}, // END 617 {}, // END
598}; 618};
599 619
@@ -612,5 +632,5 @@ static struct usb_driver dm9601_driver = {
612module_usb_driver(dm9601_driver); 632module_usb_driver(dm9601_driver);
613 633
614MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>"); 634MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
615MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices"); 635MODULE_DESCRIPTION("Davicom DM96xx USB 10/100 ethernet devices");
616MODULE_LICENSE("GPL"); 636MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 86292e6aaf49..1a482344b3f5 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -185,7 +185,6 @@ enum rx_ctrl_state{
185#define BM_REQUEST_TYPE (0xa1) 185#define BM_REQUEST_TYPE (0xa1)
186#define B_NOTIFICATION (0x20) 186#define B_NOTIFICATION (0x20)
187#define W_VALUE (0x0) 187#define W_VALUE (0x0)
188#define W_INDEX (0x2)
189#define W_LENGTH (0x2) 188#define W_LENGTH (0x2)
190 189
191#define B_OVERRUN (0x1<<6) 190#define B_OVERRUN (0x1<<6)
@@ -1487,6 +1486,7 @@ static void tiocmget_intr_callback(struct urb *urb)
1487 struct uart_icount *icount; 1486 struct uart_icount *icount;
1488 struct hso_serial_state_notification *serial_state_notification; 1487 struct hso_serial_state_notification *serial_state_notification;
1489 struct usb_device *usb; 1488 struct usb_device *usb;
1489 int if_num;
1490 1490
1491 /* Sanity checks */ 1491 /* Sanity checks */
1492 if (!serial) 1492 if (!serial)
@@ -1495,15 +1495,24 @@ static void tiocmget_intr_callback(struct urb *urb)
1495 handle_usb_error(status, __func__, serial->parent); 1495 handle_usb_error(status, __func__, serial->parent);
1496 return; 1496 return;
1497 } 1497 }
1498
1499 /* tiocmget is only supported on HSO_PORT_MODEM */
1498 tiocmget = serial->tiocmget; 1500 tiocmget = serial->tiocmget;
1499 if (!tiocmget) 1501 if (!tiocmget)
1500 return; 1502 return;
1503 BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM);
1504
1501 usb = serial->parent->usb; 1505 usb = serial->parent->usb;
1506 if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
1507
1508 /* wIndex should be the USB interface number of the port to which the
1509 * notification applies, which should always be the Modem port.
1510 */
1502 serial_state_notification = &tiocmget->serial_state_notification; 1511 serial_state_notification = &tiocmget->serial_state_notification;
1503 if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE || 1512 if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE ||
1504 serial_state_notification->bNotification != B_NOTIFICATION || 1513 serial_state_notification->bNotification != B_NOTIFICATION ||
1505 le16_to_cpu(serial_state_notification->wValue) != W_VALUE || 1514 le16_to_cpu(serial_state_notification->wValue) != W_VALUE ||
1506 le16_to_cpu(serial_state_notification->wIndex) != W_INDEX || 1515 le16_to_cpu(serial_state_notification->wIndex) != if_num ||
1507 le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) { 1516 le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) {
1508 dev_warn(&usb->dev, 1517 dev_warn(&usb->dev,
1509 "hso received invalid serial state notification\n"); 1518 "hso received invalid serial state notification\n");
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 03832d3780aa..f54637828574 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -117,7 +117,6 @@ enum {
117struct mcs7830_data { 117struct mcs7830_data {
118 u8 multi_filter[8]; 118 u8 multi_filter[8];
119 u8 config; 119 u8 config;
120 u8 link_counter;
121}; 120};
122 121
123static const char driver_name[] = "MOSCHIP usb-ethernet driver"; 122static const char driver_name[] = "MOSCHIP usb-ethernet driver";
@@ -561,26 +560,16 @@ static void mcs7830_status(struct usbnet *dev, struct urb *urb)
561{ 560{
562 u8 *buf = urb->transfer_buffer; 561 u8 *buf = urb->transfer_buffer;
563 bool link, link_changed; 562 bool link, link_changed;
564 struct mcs7830_data *data = mcs7830_get_data(dev);
565 563
566 if (urb->actual_length < 16) 564 if (urb->actual_length < 16)
567 return; 565 return;
568 566
569 link = !(buf[1] & 0x20); 567 link = !(buf[1] == 0x20);
570 link_changed = netif_carrier_ok(dev->net) != link; 568 link_changed = netif_carrier_ok(dev->net) != link;
571 if (link_changed) { 569 if (link_changed) {
572 data->link_counter++; 570 usbnet_link_change(dev, link, 0);
573 /* 571 netdev_dbg(dev->net, "Link Status is: %d\n", link);
574 track link state 20 times to guard against erroneous 572 }
575 link state changes reported sometimes by the chip
576 */
577 if (data->link_counter > 20) {
578 data->link_counter = 0;
579 usbnet_link_change(dev, link, 0);
580 netdev_dbg(dev->net, "Link Status is: %d\n", link);
581 }
582 } else
583 data->link_counter = 0;
584} 573}
585 574
586static const struct driver_info moschip_info = { 575static const struct driver_info moschip_info = {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7bab4de658a9..5d776447d9c3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -299,35 +299,76 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
299 return skb; 299 return skb;
300} 300}
301 301
302static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) 302static struct sk_buff *receive_small(void *buf, unsigned int len)
303{ 303{
304 struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb); 304 struct sk_buff * skb = buf;
305
306 len -= sizeof(struct virtio_net_hdr);
307 skb_trim(skb, len);
308
309 return skb;
310}
311
312static struct sk_buff *receive_big(struct net_device *dev,
313 struct receive_queue *rq,
314 void *buf,
315 unsigned int len)
316{
317 struct page *page = buf;
318 struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
319
320 if (unlikely(!skb))
321 goto err;
322
323 return skb;
324
325err:
326 dev->stats.rx_dropped++;
327 give_pages(rq, page);
328 return NULL;
329}
330
331static struct sk_buff *receive_mergeable(struct net_device *dev,
332 struct receive_queue *rq,
333 void *buf,
334 unsigned int len)
335{
336 struct skb_vnet_hdr *hdr = buf;
337 int num_buf = hdr->mhdr.num_buffers;
338 struct page *page = virt_to_head_page(buf);
339 int offset = buf - page_address(page);
340 struct sk_buff *head_skb = page_to_skb(rq, page, offset, len,
341 MERGE_BUFFER_LEN);
305 struct sk_buff *curr_skb = head_skb; 342 struct sk_buff *curr_skb = head_skb;
306 char *buf;
307 struct page *page;
308 int num_buf, len, offset;
309 343
310 num_buf = hdr->mhdr.num_buffers; 344 if (unlikely(!curr_skb))
345 goto err_skb;
346
311 while (--num_buf) { 347 while (--num_buf) {
312 int num_skb_frags = skb_shinfo(curr_skb)->nr_frags; 348 int num_skb_frags;
349
313 buf = virtqueue_get_buf(rq->vq, &len); 350 buf = virtqueue_get_buf(rq->vq, &len);
314 if (unlikely(!buf)) { 351 if (unlikely(!buf)) {
315 pr_debug("%s: rx error: %d buffers missing\n", 352 pr_debug("%s: rx error: %d buffers out of %d missing\n",
316 head_skb->dev->name, hdr->mhdr.num_buffers); 353 dev->name, num_buf, hdr->mhdr.num_buffers);
317 head_skb->dev->stats.rx_length_errors++; 354 dev->stats.rx_length_errors++;
318 return -EINVAL; 355 goto err_buf;
319 } 356 }
320 if (unlikely(len > MERGE_BUFFER_LEN)) { 357 if (unlikely(len > MERGE_BUFFER_LEN)) {
321 pr_debug("%s: rx error: merge buffer too long\n", 358 pr_debug("%s: rx error: merge buffer too long\n",
322 head_skb->dev->name); 359 dev->name);
323 len = MERGE_BUFFER_LEN; 360 len = MERGE_BUFFER_LEN;
324 } 361 }
362
363 page = virt_to_head_page(buf);
364 --rq->num;
365
366 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
325 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { 367 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
326 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); 368 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
327 if (unlikely(!nskb)) { 369
328 head_skb->dev->stats.rx_dropped++; 370 if (unlikely(!nskb))
329 return -ENOMEM; 371 goto err_skb;
330 }
331 if (curr_skb == head_skb) 372 if (curr_skb == head_skb)
332 skb_shinfo(curr_skb)->frag_list = nskb; 373 skb_shinfo(curr_skb)->frag_list = nskb;
333 else 374 else
@@ -341,8 +382,7 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
341 head_skb->len += len; 382 head_skb->len += len;
342 head_skb->truesize += MERGE_BUFFER_LEN; 383 head_skb->truesize += MERGE_BUFFER_LEN;
343 } 384 }
344 page = virt_to_head_page(buf); 385 offset = buf - page_address(page);
345 offset = buf - (char *)page_address(page);
346 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { 386 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
347 put_page(page); 387 put_page(page);
348 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, 388 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
@@ -351,9 +391,28 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
351 skb_add_rx_frag(curr_skb, num_skb_frags, page, 391 skb_add_rx_frag(curr_skb, num_skb_frags, page,
352 offset, len, MERGE_BUFFER_LEN); 392 offset, len, MERGE_BUFFER_LEN);
353 } 393 }
394 }
395
396 return head_skb;
397
398err_skb:
399 put_page(page);
400 while (--num_buf) {
401 buf = virtqueue_get_buf(rq->vq, &len);
402 if (unlikely(!buf)) {
403 pr_debug("%s: rx error: %d buffers missing\n",
404 dev->name, num_buf);
405 dev->stats.rx_length_errors++;
406 break;
407 }
408 page = virt_to_head_page(buf);
409 put_page(page);
354 --rq->num; 410 --rq->num;
355 } 411 }
356 return 0; 412err_buf:
413 dev->stats.rx_dropped++;
414 dev_kfree_skb(head_skb);
415 return NULL;
357} 416}
358 417
359static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) 418static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
@@ -362,48 +421,29 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
362 struct net_device *dev = vi->dev; 421 struct net_device *dev = vi->dev;
363 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 422 struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
364 struct sk_buff *skb; 423 struct sk_buff *skb;
365 struct page *page;
366 struct skb_vnet_hdr *hdr; 424 struct skb_vnet_hdr *hdr;
367 425
368 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 426 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
369 pr_debug("%s: short packet %i\n", dev->name, len); 427 pr_debug("%s: short packet %i\n", dev->name, len);
370 dev->stats.rx_length_errors++; 428 dev->stats.rx_length_errors++;
371 if (vi->big_packets) 429 if (vi->mergeable_rx_bufs)
372 give_pages(rq, buf);
373 else if (vi->mergeable_rx_bufs)
374 put_page(virt_to_head_page(buf)); 430 put_page(virt_to_head_page(buf));
431 else if (vi->big_packets)
432 give_pages(rq, buf);
375 else 433 else
376 dev_kfree_skb(buf); 434 dev_kfree_skb(buf);
377 return; 435 return;
378 } 436 }
379 437
380 if (!vi->mergeable_rx_bufs && !vi->big_packets) { 438 if (vi->mergeable_rx_bufs)
381 skb = buf; 439 skb = receive_mergeable(dev, rq, buf, len);
382 len -= sizeof(struct virtio_net_hdr); 440 else if (vi->big_packets)
383 skb_trim(skb, len); 441 skb = receive_big(dev, rq, buf, len);
384 } else if (vi->mergeable_rx_bufs) { 442 else
385 struct page *page = virt_to_head_page(buf); 443 skb = receive_small(buf, len);
386 skb = page_to_skb(rq, page, 444
387 (char *)buf - (char *)page_address(page), 445 if (unlikely(!skb))
388 len, MERGE_BUFFER_LEN); 446 return;
389 if (unlikely(!skb)) {
390 dev->stats.rx_dropped++;
391 put_page(page);
392 return;
393 }
394 if (receive_mergeable(rq, skb)) {
395 dev_kfree_skb(skb);
396 return;
397 }
398 } else {
399 page = buf;
400 skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
401 if (unlikely(!skb)) {
402 dev->stats.rx_dropped++;
403 give_pages(rq, page);
404 return;
405 }
406 }
407 447
408 hdr = skb_vnet_hdr(skb); 448 hdr = skb_vnet_hdr(skb);
409 449
@@ -1084,7 +1124,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
1084 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 1124 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1085 VIRTIO_NET_CTRL_MAC_TABLE_SET, 1125 VIRTIO_NET_CTRL_MAC_TABLE_SET,
1086 sg, NULL)) 1126 sg, NULL))
1087 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); 1127 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
1088 1128
1089 kfree(buf); 1129 kfree(buf);
1090} 1130}
@@ -1327,6 +1367,11 @@ static void virtnet_config_changed(struct virtio_device *vdev)
1327 1367
1328static void virtnet_free_queues(struct virtnet_info *vi) 1368static void virtnet_free_queues(struct virtnet_info *vi)
1329{ 1369{
1370 int i;
1371
1372 for (i = 0; i < vi->max_queue_pairs; i++)
1373 netif_napi_del(&vi->rq[i].napi);
1374
1330 kfree(vi->rq); 1375 kfree(vi->rq);
1331 kfree(vi->sq); 1376 kfree(vi->sq);
1332} 1377}
@@ -1356,10 +1401,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
1356 struct virtqueue *vq = vi->rq[i].vq; 1401 struct virtqueue *vq = vi->rq[i].vq;
1357 1402
1358 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 1403 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1359 if (vi->big_packets) 1404 if (vi->mergeable_rx_bufs)
1360 give_pages(&vi->rq[i], buf);
1361 else if (vi->mergeable_rx_bufs)
1362 put_page(virt_to_head_page(buf)); 1405 put_page(virt_to_head_page(buf));
1406 else if (vi->big_packets)
1407 give_pages(&vi->rq[i], buf);
1363 else 1408 else
1364 dev_kfree_skb(buf); 1409 dev_kfree_skb(buf);
1365 --vi->rq[i].num; 1410 --vi->rq[i].num;
@@ -1752,16 +1797,17 @@ static int virtnet_restore(struct virtio_device *vdev)
1752 if (err) 1797 if (err)
1753 return err; 1798 return err;
1754 1799
1755 if (netif_running(vi->dev)) 1800 if (netif_running(vi->dev)) {
1801 for (i = 0; i < vi->curr_queue_pairs; i++)
1802 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1803 schedule_delayed_work(&vi->refill, 0);
1804
1756 for (i = 0; i < vi->max_queue_pairs; i++) 1805 for (i = 0; i < vi->max_queue_pairs; i++)
1757 virtnet_napi_enable(&vi->rq[i]); 1806 virtnet_napi_enable(&vi->rq[i]);
1807 }
1758 1808
1759 netif_device_attach(vi->dev); 1809 netif_device_attach(vi->dev);
1760 1810
1761 for (i = 0; i < vi->curr_queue_pairs; i++)
1762 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1763 schedule_delayed_work(&vi->refill, 0);
1764
1765 mutex_lock(&vi->config_lock); 1811 mutex_lock(&vi->config_lock);
1766 vi->config_enable = true; 1812 vi->config_enable = true;
1767 mutex_unlock(&vi->config_lock); 1813 mutex_unlock(&vi->config_lock);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 0358c07f7669..ed384fee76ac 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1668,7 +1668,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1668 netdev_dbg(dev, "circular route to %pI4\n", 1668 netdev_dbg(dev, "circular route to %pI4\n",
1669 &dst->sin.sin_addr.s_addr); 1669 &dst->sin.sin_addr.s_addr);
1670 dev->stats.collisions++; 1670 dev->stats.collisions++;
1671 goto tx_error; 1671 goto rt_tx_error;
1672 } 1672 }
1673 1673
1674 /* Bypass encapsulation if the destination is local */ 1674 /* Bypass encapsulation if the destination is local */
@@ -2440,7 +2440,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2440 /* update header length based on lower device */ 2440 /* update header length based on lower device */
2441 dev->hard_header_len = lowerdev->hard_header_len + 2441 dev->hard_header_len = lowerdev->hard_header_len +
2442 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2442 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2443 } 2443 } else if (use_ipv6)
2444 vxlan->flags |= VXLAN_F_IPV6;
2444 2445
2445 if (data[IFLA_VXLAN_TOS]) 2446 if (data[IFLA_VXLAN_TOS])
2446 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); 2447 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 8d78253c26ce..a366d6b4626f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -76,9 +76,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
76 mask2 |= ATH9K_INT_CST; 76 mask2 |= ATH9K_INT_CST;
77 if (isr2 & AR_ISR_S2_TSFOOR) 77 if (isr2 & AR_ISR_S2_TSFOOR)
78 mask2 |= ATH9K_INT_TSFOOR; 78 mask2 |= ATH9K_INT_TSFOOR;
79
80 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
81 REG_WRITE(ah, AR_ISR_S2, isr2);
82 isr &= ~AR_ISR_BCNMISC;
83 }
79 } 84 }
80 85
81 isr = REG_READ(ah, AR_ISR_RAC); 86 if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
87 isr = REG_READ(ah, AR_ISR_RAC);
88
82 if (isr == 0xffffffff) { 89 if (isr == 0xffffffff) {
83 *masked = 0; 90 *masked = 0;
84 return false; 91 return false;
@@ -97,11 +104,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
97 104
98 *masked |= ATH9K_INT_TX; 105 *masked |= ATH9K_INT_TX;
99 106
100 s0_s = REG_READ(ah, AR_ISR_S0_S); 107 if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
108 s0_s = REG_READ(ah, AR_ISR_S0_S);
109 s1_s = REG_READ(ah, AR_ISR_S1_S);
110 } else {
111 s0_s = REG_READ(ah, AR_ISR_S0);
112 REG_WRITE(ah, AR_ISR_S0, s0_s);
113 s1_s = REG_READ(ah, AR_ISR_S1);
114 REG_WRITE(ah, AR_ISR_S1, s1_s);
115
116 isr &= ~(AR_ISR_TXOK |
117 AR_ISR_TXDESC |
118 AR_ISR_TXERR |
119 AR_ISR_TXEOL);
120 }
121
101 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); 122 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
102 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); 123 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
103
104 s1_s = REG_READ(ah, AR_ISR_S1_S);
105 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); 124 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
106 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); 125 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
107 } 126 }
@@ -114,13 +133,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
114 *masked |= mask2; 133 *masked |= mask2;
115 } 134 }
116 135
117 if (AR_SREV_9100(ah)) 136 if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
118 return true;
119
120 if (isr & AR_ISR_GENTMR) {
121 u32 s5_s; 137 u32 s5_s;
122 138
123 s5_s = REG_READ(ah, AR_ISR_S5_S); 139 if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
140 s5_s = REG_READ(ah, AR_ISR_S5_S);
141 } else {
142 s5_s = REG_READ(ah, AR_ISR_S5);
143 }
144
124 ah->intr_gen_timer_trigger = 145 ah->intr_gen_timer_trigger =
125 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG); 146 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
126 147
@@ -133,8 +154,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
133 if ((s5_s & AR_ISR_S5_TIM_TIMER) && 154 if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
134 !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 155 !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
135 *masked |= ATH9K_INT_TIM_TIMER; 156 *masked |= ATH9K_INT_TIM_TIMER;
157
158 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
159 REG_WRITE(ah, AR_ISR_S5, s5_s);
160 isr &= ~AR_ISR_GENTMR;
161 }
136 } 162 }
137 163
164 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
165 REG_WRITE(ah, AR_ISR, isr);
166 REG_READ(ah, AR_ISR);
167 }
168
169 if (AR_SREV_9100(ah))
170 return true;
171
138 if (sync_cause) { 172 if (sync_cause) {
139 ath9k_debug_sync_cause(common, sync_cause); 173 ath9k_debug_sync_cause(common, sync_cause);
140 fatal_int = 174 fatal_int =
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 1ec52356b5a1..130657db5c43 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3984,18 +3984,20 @@ static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq)
3984 int quick_drop; 3984 int quick_drop;
3985 s32 t[3], f[3] = {5180, 5500, 5785}; 3985 s32 t[3], f[3] = {5180, 5500, 5785};
3986 3986
3987 if (!(pBase->miscConfiguration & BIT(1))) 3987 if (!(pBase->miscConfiguration & BIT(4)))
3988 return; 3988 return;
3989 3989
3990 if (freq < 4000) 3990 if (AR_SREV_9300(ah) || AR_SREV_9580(ah) || AR_SREV_9340(ah)) {
3991 quick_drop = eep->modalHeader2G.quick_drop; 3991 if (freq < 4000) {
3992 else { 3992 quick_drop = eep->modalHeader2G.quick_drop;
3993 t[0] = eep->base_ext1.quick_drop_low; 3993 } else {
3994 t[1] = eep->modalHeader5G.quick_drop; 3994 t[0] = eep->base_ext1.quick_drop_low;
3995 t[2] = eep->base_ext1.quick_drop_high; 3995 t[1] = eep->modalHeader5G.quick_drop;
3996 quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3); 3996 t[2] = eep->base_ext1.quick_drop_high;
3997 quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3);
3998 }
3999 REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
3997 } 4000 }
3998 REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
3999} 4001}
4000 4002
4001static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, bool is2ghz) 4003static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, bool is2ghz)
@@ -4035,7 +4037,7 @@ static void ar9003_hw_xlna_bias_strength_apply(struct ath_hw *ah, bool is2ghz)
4035 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; 4037 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
4036 u8 bias; 4038 u8 bias;
4037 4039
4038 if (!(eep->baseEepHeader.featureEnable & 0x40)) 4040 if (!(eep->baseEepHeader.miscConfiguration & 0x40))
4039 return; 4041 return;
4040 4042
4041 if (!AR_SREV_9300(ah)) 4043 if (!AR_SREV_9300(ah))
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9a2657fdd9cc..608d739d1378 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -127,21 +127,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
127 struct ath9k_vif_iter_data *iter_data = data; 127 struct ath9k_vif_iter_data *iter_data = data;
128 int i; 128 int i;
129 129
130 for (i = 0; i < ETH_ALEN; i++) 130 if (iter_data->hw_macaddr != NULL) {
131 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); 131 for (i = 0; i < ETH_ALEN; i++)
132 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
133 } else {
134 iter_data->hw_macaddr = mac;
135 }
132} 136}
133 137
134static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv, 138static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
135 struct ieee80211_vif *vif) 139 struct ieee80211_vif *vif)
136{ 140{
137 struct ath_common *common = ath9k_hw_common(priv->ah); 141 struct ath_common *common = ath9k_hw_common(priv->ah);
138 struct ath9k_vif_iter_data iter_data; 142 struct ath9k_vif_iter_data iter_data;
139 143
140 /* 144 /*
141 * Use the hardware MAC address as reference, the hardware uses it 145 * Pick the MAC address of the first interface as the new hardware
142 * together with the BSSID mask when matching addresses. 146 * MAC address. The hardware will use it together with the BSSID mask
147 * when matching addresses.
143 */ 148 */
144 iter_data.hw_macaddr = common->macaddr; 149 iter_data.hw_macaddr = NULL;
145 memset(&iter_data.mask, 0xff, ETH_ALEN); 150 memset(&iter_data.mask, 0xff, ETH_ALEN);
146 151
147 if (vif) 152 if (vif)
@@ -153,6 +158,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
153 ath9k_htc_bssid_iter, &iter_data); 158 ath9k_htc_bssid_iter, &iter_data);
154 159
155 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); 160 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
161
162 if (iter_data.hw_macaddr)
163 memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
164
156 ath_hw_setbssidmask(common); 165 ath_hw_setbssidmask(common);
157} 166}
158 167
@@ -1063,7 +1072,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1063 goto out; 1072 goto out;
1064 } 1073 }
1065 1074
1066 ath9k_htc_set_bssid_mask(priv, vif); 1075 ath9k_htc_set_mac_bssid_mask(priv, vif);
1067 1076
1068 priv->vif_slot |= (1 << avp->index); 1077 priv->vif_slot |= (1 << avp->index);
1069 priv->nvifs++; 1078 priv->nvifs++;
@@ -1128,7 +1137,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1128 1137
1129 ath9k_htc_set_opmode(priv); 1138 ath9k_htc_set_opmode(priv);
1130 1139
1131 ath9k_htc_set_bssid_mask(priv, vif); 1140 ath9k_htc_set_mac_bssid_mask(priv, vif);
1132 1141
1133 /* 1142 /*
1134 * Stop ANI only if there are no associated station interfaces. 1143 * Stop ANI only if there are no associated station interfaces.
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 54b04155e43b..8918035da3a3 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -146,10 +146,9 @@ static void ath9k_hw_set_clockrate(struct ath_hw *ah)
146 else 146 else
147 clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM; 147 clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM;
148 148
149 if (IS_CHAN_HT40(chan)) 149 if (chan) {
150 clockrate *= 2; 150 if (IS_CHAN_HT40(chan))
151 151 clockrate *= 2;
152 if (ah->curchan) {
153 if (IS_CHAN_HALF_RATE(chan)) 152 if (IS_CHAN_HALF_RATE(chan))
154 clockrate /= 2; 153 clockrate /= 2;
155 if (IS_CHAN_QUARTER_RATE(chan)) 154 if (IS_CHAN_QUARTER_RATE(chan))
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 74f452c7b166..21aa09e0e825 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -965,8 +965,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
965 struct ath_common *common = ath9k_hw_common(ah); 965 struct ath_common *common = ath9k_hw_common(ah);
966 966
967 /* 967 /*
968 * Use the hardware MAC address as reference, the hardware uses it 968 * Pick the MAC address of the first interface as the new hardware
969 * together with the BSSID mask when matching addresses. 969 * MAC address. The hardware will use it together with the BSSID mask
970 * when matching addresses.
970 */ 971 */
971 memset(iter_data, 0, sizeof(*iter_data)); 972 memset(iter_data, 0, sizeof(*iter_data));
972 memset(&iter_data->mask, 0xff, ETH_ALEN); 973 memset(&iter_data->mask, 0xff, ETH_ALEN);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 09cdbcd09739..b5a19e098f2d 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1276,6 +1276,10 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1276 if (!rts_thresh || (len > rts_thresh)) 1276 if (!rts_thresh || (len > rts_thresh))
1277 rts = true; 1277 rts = true;
1278 } 1278 }
1279
1280 if (!aggr)
1281 len = fi->framelen;
1282
1279 ath_buf_set_rate(sc, bf, &info, len, rts); 1283 ath_buf_set_rate(sc, bf, &info, len, rts);
1280 } 1284 }
1281 1285
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index de9eb2cfbf4b..366339421d4f 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -2041,13 +2041,20 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
2041 case WCN36XX_HAL_DELETE_STA_CONTEXT_IND: 2041 case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
2042 mutex_lock(&wcn->hal_ind_mutex); 2042 mutex_lock(&wcn->hal_ind_mutex);
2043 msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL); 2043 msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL);
2044 msg_ind->msg_len = len; 2044 if (msg_ind) {
2045 msg_ind->msg = kmalloc(len, GFP_KERNEL); 2045 msg_ind->msg_len = len;
2046 memcpy(msg_ind->msg, buf, len); 2046 msg_ind->msg = kmalloc(len, GFP_KERNEL);
2047 list_add_tail(&msg_ind->list, &wcn->hal_ind_queue); 2047 memcpy(msg_ind->msg, buf, len);
2048 queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work); 2048 list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
2049 wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n"); 2049 queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
2050 wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
2051 }
2050 mutex_unlock(&wcn->hal_ind_mutex); 2052 mutex_unlock(&wcn->hal_ind_mutex);
2053 if (msg_ind)
2054 break;
2055 /* FIXME: Do something smarter then just printing an error. */
2056 wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
2057 msg_header->msg_type);
2051 break; 2058 break;
2052 default: 2059 default:
2053 wcn36xx_err("SMD_EVENT (%d) not supported\n", 2060 wcn36xx_err("SMD_EVENT (%d) not supported\n",
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index b00a7e92225f..54e36fcb3954 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -5,6 +5,8 @@ config BRCMSMAC
5 tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver" 5 tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver"
6 depends on MAC80211 6 depends on MAC80211
7 depends on BCMA 7 depends on BCMA
8 select NEW_LEDS if BCMA_DRIVER_GPIO
9 select LEDS_CLASS if BCMA_DRIVER_GPIO
8 select BRCMUTIL 10 select BRCMUTIL
9 select FW_LOADER 11 select FW_LOADER
10 select CRC_CCITT 12 select CRC_CCITT
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 905704e335d7..abc9ceca70f3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -109,6 +109,8 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
109 brcmf_err("Disable F2 failed:%d\n", 109 brcmf_err("Disable F2 failed:%d\n",
110 err_ret); 110 err_ret);
111 } 111 }
112 } else {
113 err_ret = -ENOENT;
112 } 114 }
113 } else if ((regaddr == SDIO_CCCR_ABORT) || 115 } else if ((regaddr == SDIO_CCCR_ABORT) ||
114 (regaddr == SDIO_CCCR_IENx)) { 116 (regaddr == SDIO_CCCR_IENx)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 85879dbaa402..3c34a72a5d64 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -67,8 +67,8 @@
67#include "iwl-agn-hw.h" 67#include "iwl-agn-hw.h"
68 68
69/* Highest firmware API version supported */ 69/* Highest firmware API version supported */
70#define IWL7260_UCODE_API_MAX 7 70#define IWL7260_UCODE_API_MAX 8
71#define IWL3160_UCODE_API_MAX 7 71#define IWL3160_UCODE_API_MAX 8
72 72
73/* Oldest version we won't warn about */ 73/* Oldest version we won't warn about */
74#define IWL7260_UCODE_API_OK 7 74#define IWL7260_UCODE_API_OK 7
@@ -130,6 +130,7 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
130 .ht_params = &iwl7000_ht_params, 130 .ht_params = &iwl7000_ht_params,
131 .nvm_ver = IWL7260_NVM_VERSION, 131 .nvm_ver = IWL7260_NVM_VERSION,
132 .nvm_calib_ver = IWL7260_TX_POWER_VERSION, 132 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
133 .host_interrupt_operation_mode = true,
133}; 134};
134 135
135const struct iwl_cfg iwl7260_2ac_cfg_high_temp = { 136const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
@@ -140,6 +141,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
140 .nvm_ver = IWL7260_NVM_VERSION, 141 .nvm_ver = IWL7260_NVM_VERSION,
141 .nvm_calib_ver = IWL7260_TX_POWER_VERSION, 142 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
142 .high_temp = true, 143 .high_temp = true,
144 .host_interrupt_operation_mode = true,
143}; 145};
144 146
145const struct iwl_cfg iwl7260_2n_cfg = { 147const struct iwl_cfg iwl7260_2n_cfg = {
@@ -149,6 +151,7 @@ const struct iwl_cfg iwl7260_2n_cfg = {
149 .ht_params = &iwl7000_ht_params, 151 .ht_params = &iwl7000_ht_params,
150 .nvm_ver = IWL7260_NVM_VERSION, 152 .nvm_ver = IWL7260_NVM_VERSION,
151 .nvm_calib_ver = IWL7260_TX_POWER_VERSION, 153 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
154 .host_interrupt_operation_mode = true,
152}; 155};
153 156
154const struct iwl_cfg iwl7260_n_cfg = { 157const struct iwl_cfg iwl7260_n_cfg = {
@@ -158,6 +161,7 @@ const struct iwl_cfg iwl7260_n_cfg = {
158 .ht_params = &iwl7000_ht_params, 161 .ht_params = &iwl7000_ht_params,
159 .nvm_ver = IWL7260_NVM_VERSION, 162 .nvm_ver = IWL7260_NVM_VERSION,
160 .nvm_calib_ver = IWL7260_TX_POWER_VERSION, 163 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
164 .host_interrupt_operation_mode = true,
161}; 165};
162 166
163const struct iwl_cfg iwl3160_2ac_cfg = { 167const struct iwl_cfg iwl3160_2ac_cfg = {
@@ -167,6 +171,7 @@ const struct iwl_cfg iwl3160_2ac_cfg = {
167 .ht_params = &iwl7000_ht_params, 171 .ht_params = &iwl7000_ht_params,
168 .nvm_ver = IWL3160_NVM_VERSION, 172 .nvm_ver = IWL3160_NVM_VERSION,
169 .nvm_calib_ver = IWL3160_TX_POWER_VERSION, 173 .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
174 .host_interrupt_operation_mode = true,
170}; 175};
171 176
172const struct iwl_cfg iwl3160_2n_cfg = { 177const struct iwl_cfg iwl3160_2n_cfg = {
@@ -176,6 +181,7 @@ const struct iwl_cfg iwl3160_2n_cfg = {
176 .ht_params = &iwl7000_ht_params, 181 .ht_params = &iwl7000_ht_params,
177 .nvm_ver = IWL3160_NVM_VERSION, 182 .nvm_ver = IWL3160_NVM_VERSION,
178 .nvm_calib_ver = IWL3160_TX_POWER_VERSION, 183 .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
184 .host_interrupt_operation_mode = true,
179}; 185};
180 186
181const struct iwl_cfg iwl3160_n_cfg = { 187const struct iwl_cfg iwl3160_n_cfg = {
@@ -185,6 +191,7 @@ const struct iwl_cfg iwl3160_n_cfg = {
185 .ht_params = &iwl7000_ht_params, 191 .ht_params = &iwl7000_ht_params,
186 .nvm_ver = IWL3160_NVM_VERSION, 192 .nvm_ver = IWL3160_NVM_VERSION,
187 .nvm_calib_ver = IWL3160_TX_POWER_VERSION, 193 .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
194 .host_interrupt_operation_mode = true,
188}; 195};
189 196
190const struct iwl_cfg iwl7265_2ac_cfg = { 197const struct iwl_cfg iwl7265_2ac_cfg = {
@@ -196,5 +203,23 @@ const struct iwl_cfg iwl7265_2ac_cfg = {
196 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, 203 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
197}; 204};
198 205
206const struct iwl_cfg iwl7265_2n_cfg = {
207 .name = "Intel(R) Dual Band Wireless N 7265",
208 .fw_name_pre = IWL7265_FW_PRE,
209 IWL_DEVICE_7000,
210 .ht_params = &iwl7000_ht_params,
211 .nvm_ver = IWL7265_NVM_VERSION,
212 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
213};
214
215const struct iwl_cfg iwl7265_n_cfg = {
216 .name = "Intel(R) Wireless N 7265",
217 .fw_name_pre = IWL7265_FW_PRE,
218 IWL_DEVICE_7000,
219 .ht_params = &iwl7000_ht_params,
220 .nvm_ver = IWL7265_NVM_VERSION,
221 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
222};
223
199MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 224MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
200MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); 225MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 18f232e8e812..03fd9aa8bfda 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -207,6 +207,8 @@ struct iwl_eeprom_params {
207 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity 207 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
208 * @internal_wimax_coex: internal wifi/wimax combo device 208 * @internal_wimax_coex: internal wifi/wimax combo device
209 * @high_temp: Is this NIC is designated to be in high temperature. 209 * @high_temp: Is this NIC is designated to be in high temperature.
210 * @host_interrupt_operation_mode: device needs host interrupt operation
211 * mode set
210 * 212 *
211 * We enable the driver to be backward compatible wrt. hardware features. 213 * We enable the driver to be backward compatible wrt. hardware features.
212 * API differences in uCode shouldn't be handled here but through TLVs 214 * API differences in uCode shouldn't be handled here but through TLVs
@@ -235,6 +237,7 @@ struct iwl_cfg {
235 enum iwl_led_mode led_mode; 237 enum iwl_led_mode led_mode;
236 const bool rx_with_siso_diversity; 238 const bool rx_with_siso_diversity;
237 const bool internal_wimax_coex; 239 const bool internal_wimax_coex;
240 const bool host_interrupt_operation_mode;
238 bool high_temp; 241 bool high_temp;
239}; 242};
240 243
@@ -294,6 +297,8 @@ extern const struct iwl_cfg iwl3160_2ac_cfg;
294extern const struct iwl_cfg iwl3160_2n_cfg; 297extern const struct iwl_cfg iwl3160_2n_cfg;
295extern const struct iwl_cfg iwl3160_n_cfg; 298extern const struct iwl_cfg iwl3160_n_cfg;
296extern const struct iwl_cfg iwl7265_2ac_cfg; 299extern const struct iwl_cfg iwl7265_2ac_cfg;
300extern const struct iwl_cfg iwl7265_2n_cfg;
301extern const struct iwl_cfg iwl7265_n_cfg;
297#endif /* CONFIG_IWLMVM */ 302#endif /* CONFIG_IWLMVM */
298 303
299#endif /* __IWL_CONFIG_H__ */ 304#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 54a4fdc631b7..da4eca8b3007 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -495,14 +495,11 @@ enum secure_load_status_reg {
495 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit 495 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
496 * 496 *
497 * default interrupt coalescing timer is 64 x 32 = 2048 usecs 497 * default interrupt coalescing timer is 64 x 32 = 2048 usecs
498 * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
499 */ 498 */
500#define IWL_HOST_INT_TIMEOUT_MAX (0xFF) 499#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
501#define IWL_HOST_INT_TIMEOUT_DEF (0x40) 500#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
502#define IWL_HOST_INT_TIMEOUT_MIN (0x0) 501#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
503#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF) 502#define IWL_HOST_INT_OPER_MODE BIT(31)
504#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
505#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
506 503
507/***************************************************************************** 504/*****************************************************************************
508 * 7000/3000 series SHR DTS addresses * 505 * 7000/3000 series SHR DTS addresses *
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 5d066cbc5ac7..75b72a956552 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -391,7 +391,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
391 BT_VALID_LUT | 391 BT_VALID_LUT |
392 BT_VALID_WIFI_RX_SW_PRIO_BOOST | 392 BT_VALID_WIFI_RX_SW_PRIO_BOOST |
393 BT_VALID_WIFI_TX_SW_PRIO_BOOST | 393 BT_VALID_WIFI_TX_SW_PRIO_BOOST |
394 BT_VALID_MULTI_PRIO_LUT |
395 BT_VALID_CORUN_LUT_20 | 394 BT_VALID_CORUN_LUT_20 |
396 BT_VALID_CORUN_LUT_40 | 395 BT_VALID_CORUN_LUT_40 |
397 BT_VALID_ANT_ISOLATION | 396 BT_VALID_ANT_ISOLATION |
@@ -842,6 +841,11 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
842 841
843 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], 842 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
844 lockdep_is_held(&mvm->mutex)); 843 lockdep_is_held(&mvm->mutex));
844
845 /* This can happen if the station has been removed right now */
846 if (IS_ERR_OR_NULL(sta))
847 return;
848
845 mvmsta = (void *)sta->drv_priv; 849 mvmsta = (void *)sta->drv_priv;
846 850
847 data->num_bss_ifaces++; 851 data->num_bss_ifaces++;
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 6f45966817bb..b9b81e881dd0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -895,7 +895,7 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
895 /* new API returns next, not last-used seqno */ 895 /* new API returns next, not last-used seqno */
896 if (mvm->fw->ucode_capa.flags & 896 if (mvm->fw->ucode_capa.flags &
897 IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) 897 IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
898 err -= 0x10; 898 err = (u16) (err - 0x10);
899 } 899 }
900 900
901 iwl_free_resp(&cmd); 901 iwl_free_resp(&cmd);
@@ -1549,7 +1549,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
1549 if (gtkdata.unhandled_cipher) 1549 if (gtkdata.unhandled_cipher)
1550 return false; 1550 return false;
1551 if (!gtkdata.num_keys) 1551 if (!gtkdata.num_keys)
1552 return true; 1552 goto out;
1553 if (!gtkdata.last_gtk) 1553 if (!gtkdata.last_gtk)
1554 return false; 1554 return false;
1555 1555
@@ -1600,6 +1600,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
1600 (void *)&replay_ctr, GFP_KERNEL); 1600 (void *)&replay_ctr, GFP_KERNEL);
1601 } 1601 }
1602 1602
1603out:
1603 mvmvif->seqno_valid = true; 1604 mvmvif->seqno_valid = true;
1604 /* +0x10 because the set API expects next-to-use, not last-used */ 1605 /* +0x10 because the set API expects next-to-use, not last-used */
1605 mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10; 1606 mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 9864d713eb2c..a8fe6b41f9a3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -119,6 +119,10 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct file *file,
119 119
120 if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) 120 if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
121 return -EINVAL; 121 return -EINVAL;
122 if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT)
123 return -EINVAL;
124 if (drain < 0 || drain > 1)
125 return -EINVAL;
122 126
123 mutex_lock(&mvm->mutex); 127 mutex_lock(&mvm->mutex);
124 128
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 33cf56fdfc41..95ce4b601fef 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -176,8 +176,11 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
176 * P2P Device discoveribility, while there are other higher priority 176 * P2P Device discoveribility, while there are other higher priority
177 * events in the system). 177 * events in the system).
178 */ 178 */
179 if (WARN_ONCE(!le32_to_cpu(notif->status), 179 if (!le32_to_cpu(notif->status)) {
180 "Failed to schedule time event\n")) { 180 bool start = le32_to_cpu(notif->action) &
181 TE_V2_NOTIF_HOST_EVENT_START;
182 IWL_WARN(mvm, "Time Event %s notification failure\n",
183 start ? "start" : "end");
181 if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) { 184 if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) {
182 iwl_mvm_te_clear_data(mvm, te_data); 185 iwl_mvm_te_clear_data(mvm, te_data);
183 return; 186 return;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 941c0c88f982..e6272546395a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -353,6 +353,33 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
353 353
354/* 7265 Series */ 354/* 7265 Series */
355 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, 355 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
356 {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
357 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
358 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
359 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
360 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
361 {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)},
362 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
363 {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
364 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
365 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
366 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
367 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
368 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
369 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
372 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
373 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
374 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
375 {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
376 {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
377 {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)},
378 {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)},
379 {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)},
380 {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
381 {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
382 {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
356#endif /* CONFIG_IWLMVM */ 383#endif /* CONFIG_IWLMVM */
357 384
358 {0} 385 {0}
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index fa22639b63c9..051268c037b1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -477,4 +477,12 @@ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
477 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 477 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
478} 478}
479 479
480static inline void iwl_nic_error(struct iwl_trans *trans)
481{
482 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
483
484 set_bit(STATUS_FW_ERROR, &trans_pcie->status);
485 iwl_op_mode_nic_error(trans->op_mode);
486}
487
480#endif /* __iwl_trans_int_pcie_h__ */ 488#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 3f237b42eb36..be3995afa9d0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -489,6 +489,10 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
489 489
490 /* Set interrupt coalescing timer to default (2048 usecs) */ 490 /* Set interrupt coalescing timer to default (2048 usecs) */
491 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 491 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
492
493 /* W/A for interrupt coalescing bug in 7260 and 3160 */
494 if (trans->cfg->host_interrupt_operation_mode)
495 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
492} 496}
493 497
494static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) 498static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
@@ -796,12 +800,13 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
796 iwl_pcie_dump_csr(trans); 800 iwl_pcie_dump_csr(trans);
797 iwl_dump_fh(trans, NULL); 801 iwl_dump_fh(trans, NULL);
798 802
803 /* set the ERROR bit before we wake up the caller */
799 set_bit(STATUS_FW_ERROR, &trans_pcie->status); 804 set_bit(STATUS_FW_ERROR, &trans_pcie->status);
800 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); 805 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
801 wake_up(&trans_pcie->wait_command_queue); 806 wake_up(&trans_pcie->wait_command_queue);
802 807
803 local_bh_disable(); 808 local_bh_disable();
804 iwl_op_mode_nic_error(trans->op_mode); 809 iwl_nic_error(trans);
805 local_bh_enable(); 810 local_bh_enable();
806} 811}
807 812
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 5d9337bec67a..cde9c16f6e4f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -279,9 +279,6 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
279 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 279 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
280 iwl_pcie_apm_init(trans); 280 iwl_pcie_apm_init(trans);
281 281
282 /* Set interrupt coalescing calibration timer to default (512 usecs) */
283 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
284
285 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 282 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
286 283
287 iwl_pcie_set_pwr(trans, false); 284 iwl_pcie_set_pwr(trans, false);
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 059c5acad3a0..0adde919a258 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -207,7 +207,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
207 IWL_ERR(trans, "scratch %d = 0x%08x\n", i, 207 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
208 le32_to_cpu(txq->scratchbufs[i].scratch)); 208 le32_to_cpu(txq->scratchbufs[i].scratch));
209 209
210 iwl_op_mode_nic_error(trans->op_mode); 210 iwl_nic_error(trans);
211} 211}
212 212
213/* 213/*
@@ -1023,7 +1023,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1023 if (nfreed++ > 0) { 1023 if (nfreed++ > 0) {
1024 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 1024 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
1025 idx, q->write_ptr, q->read_ptr); 1025 idx, q->write_ptr, q->read_ptr);
1026 iwl_op_mode_nic_error(trans->op_mode); 1026 iwl_nic_error(trans);
1027 } 1027 }
1028 } 1028 }
1029 1029
@@ -1562,7 +1562,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1562 get_cmd_string(trans_pcie, cmd->id)); 1562 get_cmd_string(trans_pcie, cmd->id));
1563 ret = -ETIMEDOUT; 1563 ret = -ETIMEDOUT;
1564 1564
1565 iwl_op_mode_nic_error(trans->op_mode); 1565 iwl_nic_error(trans);
1566 1566
1567 goto cancel; 1567 goto cancel;
1568 } 1568 }
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 9df7bc91a26f..a1b32ee9594a 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -383,6 +383,14 @@ struct hwsim_radiotap_hdr {
383 __le16 rt_chbitmask; 383 __le16 rt_chbitmask;
384} __packed; 384} __packed;
385 385
386struct hwsim_radiotap_ack_hdr {
387 struct ieee80211_radiotap_header hdr;
388 u8 rt_flags;
389 u8 pad;
390 __le16 rt_channel;
391 __le16 rt_chbitmask;
392} __packed;
393
386/* MAC80211_HWSIM netlinf family */ 394/* MAC80211_HWSIM netlinf family */
387static struct genl_family hwsim_genl_family = { 395static struct genl_family hwsim_genl_family = {
388 .id = GENL_ID_GENERATE, 396 .id = GENL_ID_GENERATE,
@@ -500,7 +508,7 @@ static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan,
500 const u8 *addr) 508 const u8 *addr)
501{ 509{
502 struct sk_buff *skb; 510 struct sk_buff *skb;
503 struct hwsim_radiotap_hdr *hdr; 511 struct hwsim_radiotap_ack_hdr *hdr;
504 u16 flags; 512 u16 flags;
505 struct ieee80211_hdr *hdr11; 513 struct ieee80211_hdr *hdr11;
506 514
@@ -511,14 +519,14 @@ static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan,
511 if (skb == NULL) 519 if (skb == NULL)
512 return; 520 return;
513 521
514 hdr = (struct hwsim_radiotap_hdr *) skb_put(skb, sizeof(*hdr)); 522 hdr = (struct hwsim_radiotap_ack_hdr *) skb_put(skb, sizeof(*hdr));
515 hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION; 523 hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION;
516 hdr->hdr.it_pad = 0; 524 hdr->hdr.it_pad = 0;
517 hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); 525 hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr));
518 hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | 526 hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
519 (1 << IEEE80211_RADIOTAP_CHANNEL)); 527 (1 << IEEE80211_RADIOTAP_CHANNEL));
520 hdr->rt_flags = 0; 528 hdr->rt_flags = 0;
521 hdr->rt_rate = 0; 529 hdr->pad = 0;
522 hdr->rt_channel = cpu_to_le16(chan->center_freq); 530 hdr->rt_channel = cpu_to_le16(chan->center_freq);
523 flags = IEEE80211_CHAN_2GHZ; 531 flags = IEEE80211_CHAN_2GHZ;
524 hdr->rt_chbitmask = cpu_to_le16(flags); 532 hdr->rt_chbitmask = cpu_to_le16(flags);
@@ -1230,7 +1238,7 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
1230 HRTIMER_MODE_REL); 1238 HRTIMER_MODE_REL);
1231 } else if (!info->enable_beacon) { 1239 } else if (!info->enable_beacon) {
1232 unsigned int count = 0; 1240 unsigned int count = 0;
1233 ieee80211_iterate_active_interfaces( 1241 ieee80211_iterate_active_interfaces_atomic(
1234 data->hw, IEEE80211_IFACE_ITER_NORMAL, 1242 data->hw, IEEE80211_IFACE_ITER_NORMAL,
1235 mac80211_hwsim_bcn_en_iter, &count); 1243 mac80211_hwsim_bcn_en_iter, &count);
1236 wiphy_debug(hw->wiphy, " beaconing vifs remaining: %u", 1244 wiphy_debug(hw->wiphy, " beaconing vifs remaining: %u",
@@ -2003,7 +2011,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
2003 (hwsim_flags & HWSIM_TX_STAT_ACK)) { 2011 (hwsim_flags & HWSIM_TX_STAT_ACK)) {
2004 if (skb->len >= 16) { 2012 if (skb->len >= 16) {
2005 hdr = (struct ieee80211_hdr *) skb->data; 2013 hdr = (struct ieee80211_hdr *) skb->data;
2006 mac80211_hwsim_monitor_ack(txi->rate_driver_data[0], 2014 mac80211_hwsim_monitor_ack(data2->channel,
2007 hdr->addr2); 2015 hdr->addr2);
2008 } 2016 }
2009 txi->flags |= IEEE80211_TX_STAT_ACK; 2017 txi->flags |= IEEE80211_TX_STAT_ACK;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 78e8a6666cc6..8bb8988c435c 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -746,7 +746,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
746} 746}
747 747
748static u16 748static u16
749mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb) 749mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
750 void *accel_priv)
750{ 751{
751 skb->priority = cfg80211_classify8021d(skb); 752 skb->priority = cfg80211_classify8021d(skb);
752 return mwifiex_1d_to_wmm_queue[skb->priority]; 753 return mwifiex_1d_to_wmm_queue[skb->priority];
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index c8e029df770e..a09398fe9e2a 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -319,8 +319,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
319 if (bss_desc && bss_desc->ssid.ssid_len && 319 if (bss_desc && bss_desc->ssid.ssid_len &&
320 (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor. 320 (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor.
321 ssid, &bss_desc->ssid))) { 321 ssid, &bss_desc->ssid))) {
322 kfree(bss_desc); 322 ret = 0;
323 return 0; 323 goto done;
324 } 324 }
325 325
326 /* Exit Adhoc mode first */ 326 /* Exit Adhoc mode first */
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 0f494444bcd1..5a53195d016b 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -740,6 +740,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
740 }; 740 };
741 int index = rtlpci->rx_ring[rx_queue_idx].idx; 741 int index = rtlpci->rx_ring[rx_queue_idx].idx;
742 742
743 if (rtlpci->driver_is_goingto_unload)
744 return;
743 /*RX NORMAL PKT */ 745 /*RX NORMAL PKT */
744 while (count--) { 746 while (count--) {
745 /*rx descriptor */ 747 /*rx descriptor */
@@ -1636,6 +1638,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
1636 */ 1638 */
1637 set_hal_stop(rtlhal); 1639 set_hal_stop(rtlhal);
1638 1640
1641 rtlpci->driver_is_goingto_unload = true;
1639 rtlpriv->cfg->ops->disable_interrupt(hw); 1642 rtlpriv->cfg->ops->disable_interrupt(hw);
1640 cancel_work_sync(&rtlpriv->works.lps_change_work); 1643 cancel_work_sync(&rtlpriv->works.lps_change_work);
1641 1644
@@ -1653,7 +1656,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
1653 ppsc->rfchange_inprogress = true; 1656 ppsc->rfchange_inprogress = true;
1654 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags); 1657 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1655 1658
1656 rtlpci->driver_is_goingto_unload = true;
1657 rtlpriv->cfg->ops->hw_disable(hw); 1659 rtlpriv->cfg->ops->hw_disable(hw);
1658 /* some things are not needed if firmware not available */ 1660 /* some things are not needed if firmware not available */
1659 if (!rtlpriv->max_fw_size) 1661 if (!rtlpriv->max_fw_size)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 08ae01b41c83..c47794b9d42f 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -101,6 +101,13 @@ struct xenvif_rx_meta {
101 101
102#define MAX_PENDING_REQS 256 102#define MAX_PENDING_REQS 256
103 103
104/* It's possible for an skb to have a maximal number of frags
105 * but still be less than MAX_BUFFER_OFFSET in size. Thus the
106 * worst-case number of copy operations is MAX_SKB_FRAGS per
107 * ring slot.
108 */
109#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
110
104struct xenvif { 111struct xenvif {
105 /* Unique identifier for this interface. */ 112 /* Unique identifier for this interface. */
106 domid_t domid; 113 domid_t domid;
@@ -143,13 +150,13 @@ struct xenvif {
143 */ 150 */
144 RING_IDX rx_req_cons_peek; 151 RING_IDX rx_req_cons_peek;
145 152
146 /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each 153 /* This array is allocated seperately as it is large */
147 * head/fragment page uses 2 copy operations because it 154 struct gnttab_copy *grant_copy_op;
148 * straddles two buffers in the frontend.
149 */
150 struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
151 struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
152 155
156 /* We create one meta structure per ring request we consume, so
157 * the maximum number is the same as the ring size.
158 */
159 struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
153 160
154 u8 fe_dev_addr[6]; 161 u8 fe_dev_addr[6];
155 162
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 2329cccf1fa6..fff8cddfed81 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -34,6 +34,7 @@
34#include <linux/ethtool.h> 34#include <linux/ethtool.h>
35#include <linux/rtnetlink.h> 35#include <linux/rtnetlink.h>
36#include <linux/if_vlan.h> 36#include <linux/if_vlan.h>
37#include <linux/vmalloc.h>
37 38
38#include <xen/events.h> 39#include <xen/events.h>
39#include <asm/xen/hypercall.h> 40#include <asm/xen/hypercall.h>
@@ -307,6 +308,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
307 SET_NETDEV_DEV(dev, parent); 308 SET_NETDEV_DEV(dev, parent);
308 309
309 vif = netdev_priv(dev); 310 vif = netdev_priv(dev);
311
312 vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
313 MAX_GRANT_COPY_OPS);
314 if (vif->grant_copy_op == NULL) {
315 pr_warn("Could not allocate grant copy space for %s\n", name);
316 free_netdev(dev);
317 return ERR_PTR(-ENOMEM);
318 }
319
310 vif->domid = domid; 320 vif->domid = domid;
311 vif->handle = handle; 321 vif->handle = handle;
312 vif->can_sg = 1; 322 vif->can_sg = 1;
@@ -368,11 +378,11 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
368 unsigned long rx_ring_ref, unsigned int tx_evtchn, 378 unsigned long rx_ring_ref, unsigned int tx_evtchn,
369 unsigned int rx_evtchn) 379 unsigned int rx_evtchn)
370{ 380{
381 struct task_struct *task;
371 int err = -ENOMEM; 382 int err = -ENOMEM;
372 383
373 /* Already connected through? */ 384 BUG_ON(vif->tx_irq);
374 if (vif->tx_irq) 385 BUG_ON(vif->task);
375 return 0;
376 386
377 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); 387 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
378 if (err < 0) 388 if (err < 0)
@@ -411,14 +421,16 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
411 } 421 }
412 422
413 init_waitqueue_head(&vif->wq); 423 init_waitqueue_head(&vif->wq);
414 vif->task = kthread_create(xenvif_kthread, 424 task = kthread_create(xenvif_kthread,
415 (void *)vif, "%s", vif->dev->name); 425 (void *)vif, "%s", vif->dev->name);
416 if (IS_ERR(vif->task)) { 426 if (IS_ERR(task)) {
417 pr_warn("Could not allocate kthread for %s\n", vif->dev->name); 427 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
418 err = PTR_ERR(vif->task); 428 err = PTR_ERR(task);
419 goto err_rx_unbind; 429 goto err_rx_unbind;
420 } 430 }
421 431
432 vif->task = task;
433
422 rtnl_lock(); 434 rtnl_lock();
423 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) 435 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
424 dev_set_mtu(vif->dev, ETH_DATA_LEN); 436 dev_set_mtu(vif->dev, ETH_DATA_LEN);
@@ -461,8 +473,10 @@ void xenvif_disconnect(struct xenvif *vif)
461 if (netif_carrier_ok(vif->dev)) 473 if (netif_carrier_ok(vif->dev))
462 xenvif_carrier_off(vif); 474 xenvif_carrier_off(vif);
463 475
464 if (vif->task) 476 if (vif->task) {
465 kthread_stop(vif->task); 477 kthread_stop(vif->task);
478 vif->task = NULL;
479 }
466 480
467 if (vif->tx_irq) { 481 if (vif->tx_irq) {
468 if (vif->tx_irq == vif->rx_irq) 482 if (vif->tx_irq == vif->rx_irq)
@@ -483,6 +497,7 @@ void xenvif_free(struct xenvif *vif)
483 497
484 unregister_netdev(vif->dev); 498 unregister_netdev(vif->dev);
485 499
500 vfree(vif->grant_copy_op);
486 free_netdev(vif->dev); 501 free_netdev(vif->dev);
487 502
488 module_put(THIS_MODULE); 503 module_put(THIS_MODULE);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 919b6509455c..78425554a537 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -39,6 +39,7 @@
39#include <linux/udp.h> 39#include <linux/udp.h>
40 40
41#include <net/tcp.h> 41#include <net/tcp.h>
42#include <net/ip6_checksum.h>
42 43
43#include <xen/xen.h> 44#include <xen/xen.h>
44#include <xen/events.h> 45#include <xen/events.h>
@@ -451,7 +452,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
451 } 452 }
452 453
453 /* Set up a GSO prefix descriptor, if necessary */ 454 /* Set up a GSO prefix descriptor, if necessary */
454 if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) { 455 if ((1 << gso_type) & vif->gso_prefix_mask) {
455 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 456 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
456 meta = npo->meta + npo->meta_prod++; 457 meta = npo->meta + npo->meta_prod++;
457 meta->gso_type = gso_type; 458 meta->gso_type = gso_type;
@@ -607,7 +608,7 @@ void xenvif_rx_action(struct xenvif *vif)
607 if (!npo.copy_prod) 608 if (!npo.copy_prod)
608 return; 609 return;
609 610
610 BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op)); 611 BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
611 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); 612 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
612 613
613 while ((skb = __skb_dequeue(&rxq)) != NULL) { 614 while ((skb = __skb_dequeue(&rxq)) != NULL) {
@@ -1148,75 +1149,99 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
1148 return 0; 1149 return 0;
1149} 1150}
1150 1151
1151static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len) 1152static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len,
1153 unsigned int max)
1152{ 1154{
1153 if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) { 1155 if (skb_headlen(skb) >= len)
1154 /* If we need to pullup then pullup to the max, so we 1156 return 0;
1155 * won't need to do it again. 1157
1156 */ 1158 /* If we need to pullup then pullup to the max, so we
1157 int target = min_t(int, skb->len, MAX_TCP_HEADER); 1159 * won't need to do it again.
1158 __pskb_pull_tail(skb, target - skb_headlen(skb)); 1160 */
1159 } 1161 if (max > skb->len)
1162 max = skb->len;
1163
1164 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
1165 return -ENOMEM;
1166
1167 if (skb_headlen(skb) < len)
1168 return -EPROTO;
1169
1170 return 0;
1160} 1171}
1161 1172
1173/* This value should be large enough to cover a tagged ethernet header plus
1174 * maximally sized IP and TCP or UDP headers.
1175 */
1176#define MAX_IP_HDR_LEN 128
1177
1162static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, 1178static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1163 int recalculate_partial_csum) 1179 int recalculate_partial_csum)
1164{ 1180{
1165 struct iphdr *iph = (void *)skb->data;
1166 unsigned int header_size;
1167 unsigned int off; 1181 unsigned int off;
1168 int err = -EPROTO; 1182 bool fragment;
1183 int err;
1169 1184
1170 off = sizeof(struct iphdr); 1185 fragment = false;
1171 1186
1172 header_size = skb->network_header + off + MAX_IPOPTLEN; 1187 err = maybe_pull_tail(skb,
1173 maybe_pull_tail(skb, header_size); 1188 sizeof(struct iphdr),
1189 MAX_IP_HDR_LEN);
1190 if (err < 0)
1191 goto out;
1174 1192
1175 off = iph->ihl * 4; 1193 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
1194 fragment = true;
1176 1195
1177 switch (iph->protocol) { 1196 off = ip_hdrlen(skb);
1178 case IPPROTO_TCP:
1179 if (!skb_partial_csum_set(skb, off,
1180 offsetof(struct tcphdr, check)))
1181 goto out;
1182 1197
1183 if (recalculate_partial_csum) { 1198 err = -EPROTO;
1184 struct tcphdr *tcph = tcp_hdr(skb); 1199
1200 if (fragment)
1201 goto out;
1185 1202
1186 header_size = skb->network_header + 1203 switch (ip_hdr(skb)->protocol) {
1187 off + 1204 case IPPROTO_TCP:
1188 sizeof(struct tcphdr); 1205 err = maybe_pull_tail(skb,
1189 maybe_pull_tail(skb, header_size); 1206 off + sizeof(struct tcphdr),
1207 MAX_IP_HDR_LEN);
1208 if (err < 0)
1209 goto out;
1190 1210
1191 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1211 if (!skb_partial_csum_set(skb, off,
1192 skb->len - off, 1212 offsetof(struct tcphdr, check))) {
1193 IPPROTO_TCP, 0); 1213 err = -EPROTO;
1214 goto out;
1194 } 1215 }
1216
1217 if (recalculate_partial_csum)
1218 tcp_hdr(skb)->check =
1219 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1220 ip_hdr(skb)->daddr,
1221 skb->len - off,
1222 IPPROTO_TCP, 0);
1195 break; 1223 break;
1196 case IPPROTO_UDP: 1224 case IPPROTO_UDP:
1197 if (!skb_partial_csum_set(skb, off, 1225 err = maybe_pull_tail(skb,
1198 offsetof(struct udphdr, check))) 1226 off + sizeof(struct udphdr),
1227 MAX_IP_HDR_LEN);
1228 if (err < 0)
1199 goto out; 1229 goto out;
1200 1230
1201 if (recalculate_partial_csum) { 1231 if (!skb_partial_csum_set(skb, off,
1202 struct udphdr *udph = udp_hdr(skb); 1232 offsetof(struct udphdr, check))) {
1203 1233 err = -EPROTO;
1204 header_size = skb->network_header + 1234 goto out;
1205 off +
1206 sizeof(struct udphdr);
1207 maybe_pull_tail(skb, header_size);
1208
1209 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1210 skb->len - off,
1211 IPPROTO_UDP, 0);
1212 } 1235 }
1236
1237 if (recalculate_partial_csum)
1238 udp_hdr(skb)->check =
1239 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1240 ip_hdr(skb)->daddr,
1241 skb->len - off,
1242 IPPROTO_UDP, 0);
1213 break; 1243 break;
1214 default: 1244 default:
1215 if (net_ratelimit())
1216 netdev_err(vif->dev,
1217 "Attempting to checksum a non-TCP/UDP packet, "
1218 "dropping a protocol %d packet\n",
1219 iph->protocol);
1220 goto out; 1245 goto out;
1221 } 1246 }
1222 1247
@@ -1226,121 +1251,142 @@ out:
1226 return err; 1251 return err;
1227} 1252}
1228 1253
1254/* This value should be large enough to cover a tagged ethernet header plus
1255 * an IPv6 header, all options, and a maximal TCP or UDP header.
1256 */
1257#define MAX_IPV6_HDR_LEN 256
1258
1259#define OPT_HDR(type, skb, off) \
1260 (type *)(skb_network_header(skb) + (off))
1261
1229static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb, 1262static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
1230 int recalculate_partial_csum) 1263 int recalculate_partial_csum)
1231{ 1264{
1232 int err = -EPROTO; 1265 int err;
1233 struct ipv6hdr *ipv6h = (void *)skb->data;
1234 u8 nexthdr; 1266 u8 nexthdr;
1235 unsigned int header_size;
1236 unsigned int off; 1267 unsigned int off;
1268 unsigned int len;
1237 bool fragment; 1269 bool fragment;
1238 bool done; 1270 bool done;
1239 1271
1272 fragment = false;
1240 done = false; 1273 done = false;
1241 1274
1242 off = sizeof(struct ipv6hdr); 1275 off = sizeof(struct ipv6hdr);
1243 1276
1244 header_size = skb->network_header + off; 1277 err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
1245 maybe_pull_tail(skb, header_size); 1278 if (err < 0)
1279 goto out;
1246 1280
1247 nexthdr = ipv6h->nexthdr; 1281 nexthdr = ipv6_hdr(skb)->nexthdr;
1248 1282
1249 while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) && 1283 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
1250 !done) { 1284 while (off <= len && !done) {
1251 switch (nexthdr) { 1285 switch (nexthdr) {
1252 case IPPROTO_DSTOPTS: 1286 case IPPROTO_DSTOPTS:
1253 case IPPROTO_HOPOPTS: 1287 case IPPROTO_HOPOPTS:
1254 case IPPROTO_ROUTING: { 1288 case IPPROTO_ROUTING: {
1255 struct ipv6_opt_hdr *hp = (void *)(skb->data + off); 1289 struct ipv6_opt_hdr *hp;
1256 1290
1257 header_size = skb->network_header + 1291 err = maybe_pull_tail(skb,
1258 off + 1292 off +
1259 sizeof(struct ipv6_opt_hdr); 1293 sizeof(struct ipv6_opt_hdr),
1260 maybe_pull_tail(skb, header_size); 1294 MAX_IPV6_HDR_LEN);
1295 if (err < 0)
1296 goto out;
1261 1297
1298 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
1262 nexthdr = hp->nexthdr; 1299 nexthdr = hp->nexthdr;
1263 off += ipv6_optlen(hp); 1300 off += ipv6_optlen(hp);
1264 break; 1301 break;
1265 } 1302 }
1266 case IPPROTO_AH: { 1303 case IPPROTO_AH: {
1267 struct ip_auth_hdr *hp = (void *)(skb->data + off); 1304 struct ip_auth_hdr *hp;
1268 1305
1269 header_size = skb->network_header + 1306 err = maybe_pull_tail(skb,
1270 off + 1307 off +
1271 sizeof(struct ip_auth_hdr); 1308 sizeof(struct ip_auth_hdr),
1272 maybe_pull_tail(skb, header_size); 1309 MAX_IPV6_HDR_LEN);
1310 if (err < 0)
1311 goto out;
1273 1312
1313 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
1274 nexthdr = hp->nexthdr; 1314 nexthdr = hp->nexthdr;
1275 off += (hp->hdrlen+2)<<2; 1315 off += ipv6_authlen(hp);
1316 break;
1317 }
1318 case IPPROTO_FRAGMENT: {
1319 struct frag_hdr *hp;
1320
1321 err = maybe_pull_tail(skb,
1322 off +
1323 sizeof(struct frag_hdr),
1324 MAX_IPV6_HDR_LEN);
1325 if (err < 0)
1326 goto out;
1327
1328 hp = OPT_HDR(struct frag_hdr, skb, off);
1329
1330 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
1331 fragment = true;
1332
1333 nexthdr = hp->nexthdr;
1334 off += sizeof(struct frag_hdr);
1276 break; 1335 break;
1277 } 1336 }
1278 case IPPROTO_FRAGMENT:
1279 fragment = true;
1280 /* fall through */
1281 default: 1337 default:
1282 done = true; 1338 done = true;
1283 break; 1339 break;
1284 } 1340 }
1285 } 1341 }
1286 1342
1287 if (!done) { 1343 err = -EPROTO;
1288 if (net_ratelimit())
1289 netdev_err(vif->dev, "Failed to parse packet header\n");
1290 goto out;
1291 }
1292 1344
1293 if (fragment) { 1345 if (!done || fragment)
1294 if (net_ratelimit())
1295 netdev_err(vif->dev, "Packet is a fragment!\n");
1296 goto out; 1346 goto out;
1297 }
1298 1347
1299 switch (nexthdr) { 1348 switch (nexthdr) {
1300 case IPPROTO_TCP: 1349 case IPPROTO_TCP:
1301 if (!skb_partial_csum_set(skb, off, 1350 err = maybe_pull_tail(skb,
1302 offsetof(struct tcphdr, check))) 1351 off + sizeof(struct tcphdr),
1352 MAX_IPV6_HDR_LEN);
1353 if (err < 0)
1303 goto out; 1354 goto out;
1304 1355
1305 if (recalculate_partial_csum) { 1356 if (!skb_partial_csum_set(skb, off,
1306 struct tcphdr *tcph = tcp_hdr(skb); 1357 offsetof(struct tcphdr, check))) {
1307 1358 err = -EPROTO;
1308 header_size = skb->network_header + 1359 goto out;
1309 off +
1310 sizeof(struct tcphdr);
1311 maybe_pull_tail(skb, header_size);
1312
1313 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
1314 &ipv6h->daddr,
1315 skb->len - off,
1316 IPPROTO_TCP, 0);
1317 } 1360 }
1361
1362 if (recalculate_partial_csum)
1363 tcp_hdr(skb)->check =
1364 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1365 &ipv6_hdr(skb)->daddr,
1366 skb->len - off,
1367 IPPROTO_TCP, 0);
1318 break; 1368 break;
1319 case IPPROTO_UDP: 1369 case IPPROTO_UDP:
1320 if (!skb_partial_csum_set(skb, off, 1370 err = maybe_pull_tail(skb,
1321 offsetof(struct udphdr, check))) 1371 off + sizeof(struct udphdr),
1372 MAX_IPV6_HDR_LEN);
1373 if (err < 0)
1322 goto out; 1374 goto out;
1323 1375
1324 if (recalculate_partial_csum) { 1376 if (!skb_partial_csum_set(skb, off,
1325 struct udphdr *udph = udp_hdr(skb); 1377 offsetof(struct udphdr, check))) {
1326 1378 err = -EPROTO;
1327 header_size = skb->network_header + 1379 goto out;
1328 off +
1329 sizeof(struct udphdr);
1330 maybe_pull_tail(skb, header_size);
1331
1332 udph->check = ~csum_ipv6_magic(&ipv6h->saddr,
1333 &ipv6h->daddr,
1334 skb->len - off,
1335 IPPROTO_UDP, 0);
1336 } 1380 }
1381
1382 if (recalculate_partial_csum)
1383 udp_hdr(skb)->check =
1384 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1385 &ipv6_hdr(skb)->daddr,
1386 skb->len - off,
1387 IPPROTO_UDP, 0);
1337 break; 1388 break;
1338 default: 1389 default:
1339 if (net_ratelimit())
1340 netdev_err(vif->dev,
1341 "Attempting to checksum a non-TCP/UDP packet, "
1342 "dropping a protocol %d packet\n",
1343 nexthdr);
1344 goto out; 1390 goto out;
1345 } 1391 }
1346 1392
@@ -1410,14 +1456,15 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1410 return false; 1456 return false;
1411} 1457}
1412 1458
1413static unsigned xenvif_tx_build_gops(struct xenvif *vif) 1459static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1414{ 1460{
1415 struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; 1461 struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
1416 struct sk_buff *skb; 1462 struct sk_buff *skb;
1417 int ret; 1463 int ret;
1418 1464
1419 while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX 1465 while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
1420 < MAX_PENDING_REQS)) { 1466 < MAX_PENDING_REQS) &&
1467 (skb_queue_len(&vif->tx_queue) < budget)) {
1421 struct xen_netif_tx_request txreq; 1468 struct xen_netif_tx_request txreq;
1422 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; 1469 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1423 struct page *page; 1470 struct page *page;
@@ -1439,7 +1486,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif)
1439 continue; 1486 continue;
1440 } 1487 }
1441 1488
1442 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); 1489 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
1443 if (!work_to_do) 1490 if (!work_to_do)
1444 break; 1491 break;
1445 1492
@@ -1579,14 +1626,13 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif)
1579} 1626}
1580 1627
1581 1628
1582static int xenvif_tx_submit(struct xenvif *vif, int budget) 1629static int xenvif_tx_submit(struct xenvif *vif)
1583{ 1630{
1584 struct gnttab_copy *gop = vif->tx_copy_ops; 1631 struct gnttab_copy *gop = vif->tx_copy_ops;
1585 struct sk_buff *skb; 1632 struct sk_buff *skb;
1586 int work_done = 0; 1633 int work_done = 0;
1587 1634
1588 while (work_done < budget && 1635 while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
1589 (skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
1590 struct xen_netif_tx_request *txp; 1636 struct xen_netif_tx_request *txp;
1591 u16 pending_idx; 1637 u16 pending_idx;
1592 unsigned data_len; 1638 unsigned data_len;
@@ -1661,14 +1707,14 @@ int xenvif_tx_action(struct xenvif *vif, int budget)
1661 if (unlikely(!tx_work_todo(vif))) 1707 if (unlikely(!tx_work_todo(vif)))
1662 return 0; 1708 return 0;
1663 1709
1664 nr_gops = xenvif_tx_build_gops(vif); 1710 nr_gops = xenvif_tx_build_gops(vif, budget);
1665 1711
1666 if (nr_gops == 0) 1712 if (nr_gops == 0)
1667 return 0; 1713 return 0;
1668 1714
1669 gnttab_batch_copy(vif->tx_copy_ops, nr_gops); 1715 gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
1670 1716
1671 work_done = xenvif_tx_submit(vif, nr_gops); 1717 work_done = xenvif_tx_submit(vif);
1672 1718
1673 return work_done; 1719 return work_done;
1674} 1720}
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
index 1cb6e51e6bda..170e8e60cdb7 100644
--- a/drivers/ntb/ntb_hw.c
+++ b/drivers/ntb/ntb_hw.c
@@ -141,6 +141,24 @@ void ntb_unregister_event_callback(struct ntb_device *ndev)
141 ndev->event_cb = NULL; 141 ndev->event_cb = NULL;
142} 142}
143 143
144static void ntb_irq_work(unsigned long data)
145{
146 struct ntb_db_cb *db_cb = (struct ntb_db_cb *)data;
147 int rc;
148
149 rc = db_cb->callback(db_cb->data, db_cb->db_num);
150 if (rc)
151 tasklet_schedule(&db_cb->irq_work);
152 else {
153 struct ntb_device *ndev = db_cb->ndev;
154 unsigned long mask;
155
156 mask = readw(ndev->reg_ofs.ldb_mask);
157 clear_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
158 writew(mask, ndev->reg_ofs.ldb_mask);
159 }
160}
161
144/** 162/**
145 * ntb_register_db_callback() - register a callback for doorbell interrupt 163 * ntb_register_db_callback() - register a callback for doorbell interrupt
146 * @ndev: pointer to ntb_device instance 164 * @ndev: pointer to ntb_device instance
@@ -155,7 +173,7 @@ void ntb_unregister_event_callback(struct ntb_device *ndev)
155 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 173 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
156 */ 174 */
157int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx, 175int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
158 void *data, void (*func)(void *data, int db_num)) 176 void *data, int (*func)(void *data, int db_num))
159{ 177{
160 unsigned long mask; 178 unsigned long mask;
161 179
@@ -166,6 +184,10 @@ int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
166 184
167 ndev->db_cb[idx].callback = func; 185 ndev->db_cb[idx].callback = func;
168 ndev->db_cb[idx].data = data; 186 ndev->db_cb[idx].data = data;
187 ndev->db_cb[idx].ndev = ndev;
188
189 tasklet_init(&ndev->db_cb[idx].irq_work, ntb_irq_work,
190 (unsigned long) &ndev->db_cb[idx]);
169 191
170 /* unmask interrupt */ 192 /* unmask interrupt */
171 mask = readw(ndev->reg_ofs.ldb_mask); 193 mask = readw(ndev->reg_ofs.ldb_mask);
@@ -194,6 +216,8 @@ void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx)
194 set_bit(idx * ndev->bits_per_vector, &mask); 216 set_bit(idx * ndev->bits_per_vector, &mask);
195 writew(mask, ndev->reg_ofs.ldb_mask); 217 writew(mask, ndev->reg_ofs.ldb_mask);
196 218
219 tasklet_disable(&ndev->db_cb[idx].irq_work);
220
197 ndev->db_cb[idx].callback = NULL; 221 ndev->db_cb[idx].callback = NULL;
198} 222}
199 223
@@ -678,6 +702,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
678 return -EINVAL; 702 return -EINVAL;
679 703
680 ndev->limits.max_mw = SNB_ERRATA_MAX_MW; 704 ndev->limits.max_mw = SNB_ERRATA_MAX_MW;
705 ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
681 ndev->reg_ofs.spad_write = ndev->mw[1].vbase + 706 ndev->reg_ofs.spad_write = ndev->mw[1].vbase +
682 SNB_SPAD_OFFSET; 707 SNB_SPAD_OFFSET;
683 ndev->reg_ofs.rdb = ndev->mw[1].vbase + 708 ndev->reg_ofs.rdb = ndev->mw[1].vbase +
@@ -688,8 +713,21 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
688 */ 713 */
689 writeq(ndev->mw[1].bar_sz + 0x1000, ndev->reg_base + 714 writeq(ndev->mw[1].bar_sz + 0x1000, ndev->reg_base +
690 SNB_PBAR4LMT_OFFSET); 715 SNB_PBAR4LMT_OFFSET);
716 /* HW errata on the Limit registers. They can only be
717 * written when the base register is 4GB aligned and
718 * < 32bit. This should already be the case based on the
719 * driver defaults, but write the Limit registers first
720 * just in case.
721 */
691 } else { 722 } else {
692 ndev->limits.max_mw = SNB_MAX_MW; 723 ndev->limits.max_mw = SNB_MAX_MW;
724
725 /* HW Errata on bit 14 of b2bdoorbell register. Writes
726 * will not be mirrored to the remote system. Shrink
727 * the number of bits by one, since bit 14 is the last
728 * bit.
729 */
730 ndev->limits.max_db_bits = SNB_MAX_DB_BITS - 1;
693 ndev->reg_ofs.spad_write = ndev->reg_base + 731 ndev->reg_ofs.spad_write = ndev->reg_base +
694 SNB_B2B_SPAD_OFFSET; 732 SNB_B2B_SPAD_OFFSET;
695 ndev->reg_ofs.rdb = ndev->reg_base + 733 ndev->reg_ofs.rdb = ndev->reg_base +
@@ -699,6 +737,12 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
699 * something silly 737 * something silly
700 */ 738 */
701 writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET); 739 writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET);
740 /* HW errata on the Limit registers. They can only be
741 * written when the base register is 4GB aligned and
742 * < 32bit. This should already be the case based on the
743 * driver defaults, but write the Limit registers first
744 * just in case.
745 */
702 } 746 }
703 747
704 /* The Xeon errata workaround requires setting SBAR Base 748 /* The Xeon errata workaround requires setting SBAR Base
@@ -769,6 +813,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
769 * have an equal amount. 813 * have an equal amount.
770 */ 814 */
771 ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2; 815 ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2;
816 ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
772 /* Note: The SDOORBELL is the cause of the errata. You REALLY 817 /* Note: The SDOORBELL is the cause of the errata. You REALLY
773 * don't want to touch it. 818 * don't want to touch it.
774 */ 819 */
@@ -793,6 +838,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
793 * have an equal amount. 838 * have an equal amount.
794 */ 839 */
795 ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2; 840 ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2;
841 ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
796 ndev->reg_ofs.rdb = ndev->reg_base + SNB_PDOORBELL_OFFSET; 842 ndev->reg_ofs.rdb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
797 ndev->reg_ofs.ldb = ndev->reg_base + SNB_SDOORBELL_OFFSET; 843 ndev->reg_ofs.ldb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
798 ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_SDBMSK_OFFSET; 844 ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_SDBMSK_OFFSET;
@@ -819,7 +865,6 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
819 ndev->reg_ofs.lnk_stat = ndev->reg_base + SNB_SLINK_STATUS_OFFSET; 865 ndev->reg_ofs.lnk_stat = ndev->reg_base + SNB_SLINK_STATUS_OFFSET;
820 ndev->reg_ofs.spci_cmd = ndev->reg_base + SNB_PCICMD_OFFSET; 866 ndev->reg_ofs.spci_cmd = ndev->reg_base + SNB_PCICMD_OFFSET;
821 867
822 ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
823 ndev->limits.msix_cnt = SNB_MSIX_CNT; 868 ndev->limits.msix_cnt = SNB_MSIX_CNT;
824 ndev->bits_per_vector = SNB_DB_BITS_PER_VEC; 869 ndev->bits_per_vector = SNB_DB_BITS_PER_VEC;
825 870
@@ -934,12 +979,16 @@ static irqreturn_t bwd_callback_msix_irq(int irq, void *data)
934{ 979{
935 struct ntb_db_cb *db_cb = data; 980 struct ntb_db_cb *db_cb = data;
936 struct ntb_device *ndev = db_cb->ndev; 981 struct ntb_device *ndev = db_cb->ndev;
982 unsigned long mask;
937 983
938 dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq, 984 dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
939 db_cb->db_num); 985 db_cb->db_num);
940 986
941 if (db_cb->callback) 987 mask = readw(ndev->reg_ofs.ldb_mask);
942 db_cb->callback(db_cb->data, db_cb->db_num); 988 set_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
989 writew(mask, ndev->reg_ofs.ldb_mask);
990
991 tasklet_schedule(&db_cb->irq_work);
943 992
944 /* No need to check for the specific HB irq, any interrupt means 993 /* No need to check for the specific HB irq, any interrupt means
945 * we're connected. 994 * we're connected.
@@ -955,12 +1004,16 @@ static irqreturn_t xeon_callback_msix_irq(int irq, void *data)
955{ 1004{
956 struct ntb_db_cb *db_cb = data; 1005 struct ntb_db_cb *db_cb = data;
957 struct ntb_device *ndev = db_cb->ndev; 1006 struct ntb_device *ndev = db_cb->ndev;
1007 unsigned long mask;
958 1008
959 dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq, 1009 dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
960 db_cb->db_num); 1010 db_cb->db_num);
961 1011
962 if (db_cb->callback) 1012 mask = readw(ndev->reg_ofs.ldb_mask);
963 db_cb->callback(db_cb->data, db_cb->db_num); 1013 set_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
1014 writew(mask, ndev->reg_ofs.ldb_mask);
1015
1016 tasklet_schedule(&db_cb->irq_work);
964 1017
965 /* On Sandybridge, there are 16 bits in the interrupt register 1018 /* On Sandybridge, there are 16 bits in the interrupt register
966 * but only 4 vectors. So, 5 bits are assigned to the first 3 1019 * but only 4 vectors. So, 5 bits are assigned to the first 3
@@ -986,7 +1039,7 @@ static irqreturn_t xeon_event_msix_irq(int irq, void *dev)
986 dev_err(&ndev->pdev->dev, "Error determining link status\n"); 1039 dev_err(&ndev->pdev->dev, "Error determining link status\n");
987 1040
988 /* bit 15 is always the link bit */ 1041 /* bit 15 is always the link bit */
989 writew(1 << ndev->limits.max_db_bits, ndev->reg_ofs.ldb); 1042 writew(1 << SNB_LINK_DB, ndev->reg_ofs.ldb);
990 1043
991 return IRQ_HANDLED; 1044 return IRQ_HANDLED;
992} 1045}
@@ -1075,6 +1128,10 @@ static int ntb_setup_msix(struct ntb_device *ndev)
1075 "Only %d MSI-X vectors. Limiting the number of queues to that number.\n", 1128 "Only %d MSI-X vectors. Limiting the number of queues to that number.\n",
1076 rc); 1129 rc);
1077 msix_entries = rc; 1130 msix_entries = rc;
1131
1132 rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries);
1133 if (rc)
1134 goto err1;
1078 } 1135 }
1079 1136
1080 for (i = 0; i < msix_entries; i++) { 1137 for (i = 0; i < msix_entries; i++) {
@@ -1176,9 +1233,10 @@ static int ntb_setup_interrupts(struct ntb_device *ndev)
1176 */ 1233 */
1177 if (ndev->hw_type == BWD_HW) 1234 if (ndev->hw_type == BWD_HW)
1178 writeq(~0, ndev->reg_ofs.ldb_mask); 1235 writeq(~0, ndev->reg_ofs.ldb_mask);
1179 else 1236 else {
1180 writew(~(1 << ndev->limits.max_db_bits), 1237 u16 var = 1 << SNB_LINK_DB;
1181 ndev->reg_ofs.ldb_mask); 1238 writew(~var, ndev->reg_ofs.ldb_mask);
1239 }
1182 1240
1183 rc = ntb_setup_msix(ndev); 1241 rc = ntb_setup_msix(ndev);
1184 if (!rc) 1242 if (!rc)
@@ -1286,6 +1344,39 @@ static void ntb_free_debugfs(struct ntb_device *ndev)
1286 } 1344 }
1287} 1345}
1288 1346
1347static void ntb_hw_link_up(struct ntb_device *ndev)
1348{
1349 if (ndev->conn_type == NTB_CONN_TRANSPARENT)
1350 ntb_link_event(ndev, NTB_LINK_UP);
1351 else {
1352 u32 ntb_cntl;
1353
1354 /* Let's bring the NTB link up */
1355 ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
1356 ntb_cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK);
1357 ntb_cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP;
1358 ntb_cntl |= NTB_CNTL_P2S_BAR45_SNOOP | NTB_CNTL_S2P_BAR45_SNOOP;
1359 writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
1360 }
1361}
1362
1363static void ntb_hw_link_down(struct ntb_device *ndev)
1364{
1365 u32 ntb_cntl;
1366
1367 if (ndev->conn_type == NTB_CONN_TRANSPARENT) {
1368 ntb_link_event(ndev, NTB_LINK_DOWN);
1369 return;
1370 }
1371
1372 /* Bring NTB link down */
1373 ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
1374 ntb_cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP);
1375 ntb_cntl &= ~(NTB_CNTL_P2S_BAR45_SNOOP | NTB_CNTL_S2P_BAR45_SNOOP);
1376 ntb_cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK;
1377 writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
1378}
1379
1289static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1380static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1290{ 1381{
1291 struct ntb_device *ndev; 1382 struct ntb_device *ndev;
@@ -1374,9 +1465,7 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1374 if (rc) 1465 if (rc)
1375 goto err6; 1466 goto err6;
1376 1467
1377 /* Let's bring the NTB link up */ 1468 ntb_hw_link_up(ndev);
1378 writel(NTB_CNTL_BAR23_SNOOP | NTB_CNTL_BAR45_SNOOP,
1379 ndev->reg_ofs.lnk_cntl);
1380 1469
1381 return 0; 1470 return 0;
1382 1471
@@ -1406,12 +1495,8 @@ static void ntb_pci_remove(struct pci_dev *pdev)
1406{ 1495{
1407 struct ntb_device *ndev = pci_get_drvdata(pdev); 1496 struct ntb_device *ndev = pci_get_drvdata(pdev);
1408 int i; 1497 int i;
1409 u32 ntb_cntl;
1410 1498
1411 /* Bring NTB link down */ 1499 ntb_hw_link_down(ndev);
1412 ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
1413 ntb_cntl |= NTB_CNTL_LINK_DISABLE;
1414 writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
1415 1500
1416 ntb_transport_free(ndev->ntb_transport); 1501 ntb_transport_free(ndev->ntb_transport);
1417 1502
diff --git a/drivers/ntb/ntb_hw.h b/drivers/ntb/ntb_hw.h
index 0a31cedae7d4..bbdb7edca10c 100644
--- a/drivers/ntb/ntb_hw.h
+++ b/drivers/ntb/ntb_hw.h
@@ -106,10 +106,11 @@ struct ntb_mw {
106}; 106};
107 107
108struct ntb_db_cb { 108struct ntb_db_cb {
109 void (*callback) (void *data, int db_num); 109 int (*callback)(void *data, int db_num);
110 unsigned int db_num; 110 unsigned int db_num;
111 void *data; 111 void *data;
112 struct ntb_device *ndev; 112 struct ntb_device *ndev;
113 struct tasklet_struct irq_work;
113}; 114};
114 115
115struct ntb_device { 116struct ntb_device {
@@ -228,8 +229,8 @@ struct ntb_device *ntb_register_transport(struct pci_dev *pdev,
228void ntb_unregister_transport(struct ntb_device *ndev); 229void ntb_unregister_transport(struct ntb_device *ndev);
229void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr); 230void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr);
230int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx, 231int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
231 void *data, void (*db_cb_func) (void *data, 232 void *data, int (*db_cb_func)(void *data,
232 int db_num)); 233 int db_num));
233void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx); 234void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx);
234int ntb_register_event_callback(struct ntb_device *ndev, 235int ntb_register_event_callback(struct ntb_device *ndev,
235 void (*event_cb_func) (void *handle, 236 void (*event_cb_func) (void *handle,
diff --git a/drivers/ntb/ntb_regs.h b/drivers/ntb/ntb_regs.h
index aa4bdd393c58..9774506419d7 100644
--- a/drivers/ntb/ntb_regs.h
+++ b/drivers/ntb/ntb_regs.h
@@ -55,6 +55,7 @@
55#define SNB_MAX_COMPAT_SPADS 16 55#define SNB_MAX_COMPAT_SPADS 16
56/* Reserve the uppermost bit for link interrupt */ 56/* Reserve the uppermost bit for link interrupt */
57#define SNB_MAX_DB_BITS 15 57#define SNB_MAX_DB_BITS 15
58#define SNB_LINK_DB 15
58#define SNB_DB_BITS_PER_VEC 5 59#define SNB_DB_BITS_PER_VEC 5
59#define SNB_MAX_MW 2 60#define SNB_MAX_MW 2
60#define SNB_ERRATA_MAX_MW 1 61#define SNB_ERRATA_MAX_MW 1
@@ -75,9 +76,6 @@
75#define SNB_SBAR2XLAT_OFFSET 0x0030 76#define SNB_SBAR2XLAT_OFFSET 0x0030
76#define SNB_SBAR4XLAT_OFFSET 0x0038 77#define SNB_SBAR4XLAT_OFFSET 0x0038
77#define SNB_SBAR0BASE_OFFSET 0x0040 78#define SNB_SBAR0BASE_OFFSET 0x0040
78#define SNB_SBAR0BASE_OFFSET 0x0040
79#define SNB_SBAR2BASE_OFFSET 0x0048
80#define SNB_SBAR4BASE_OFFSET 0x0050
81#define SNB_SBAR2BASE_OFFSET 0x0048 79#define SNB_SBAR2BASE_OFFSET 0x0048
82#define SNB_SBAR4BASE_OFFSET 0x0050 80#define SNB_SBAR4BASE_OFFSET 0x0050
83#define SNB_NTBCNTL_OFFSET 0x0058 81#define SNB_NTBCNTL_OFFSET 0x0058
@@ -145,11 +143,13 @@
145#define BWD_LTSSMSTATEJMP_FORCEDETECT (1 << 2) 143#define BWD_LTSSMSTATEJMP_FORCEDETECT (1 << 2)
146#define BWD_IBIST_ERR_OFLOW 0x7FFF7FFF 144#define BWD_IBIST_ERR_OFLOW 0x7FFF7FFF
147 145
148#define NTB_CNTL_CFG_LOCK (1 << 0) 146#define NTB_CNTL_CFG_LOCK (1 << 0)
149#define NTB_CNTL_LINK_DISABLE (1 << 1) 147#define NTB_CNTL_LINK_DISABLE (1 << 1)
150#define NTB_CNTL_BAR23_SNOOP (1 << 2) 148#define NTB_CNTL_S2P_BAR23_SNOOP (1 << 2)
151#define NTB_CNTL_BAR45_SNOOP (1 << 6) 149#define NTB_CNTL_P2S_BAR23_SNOOP (1 << 4)
152#define BWD_CNTL_LINK_DOWN (1 << 16) 150#define NTB_CNTL_S2P_BAR45_SNOOP (1 << 6)
151#define NTB_CNTL_P2S_BAR45_SNOOP (1 << 8)
152#define BWD_CNTL_LINK_DOWN (1 << 16)
153 153
154#define NTB_PPD_OFFSET 0x00D4 154#define NTB_PPD_OFFSET 0x00D4
155#define SNB_PPD_CONN_TYPE 0x0003 155#define SNB_PPD_CONN_TYPE 0x0003
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index d0222f13d154..3217f394d45b 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -119,7 +119,6 @@ struct ntb_transport_qp {
119 119
120 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, 120 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
121 void *data, int len); 121 void *data, int len);
122 struct tasklet_struct rx_work;
123 struct list_head rx_pend_q; 122 struct list_head rx_pend_q;
124 struct list_head rx_free_q; 123 struct list_head rx_free_q;
125 spinlock_t ntb_rx_pend_q_lock; 124 spinlock_t ntb_rx_pend_q_lock;
@@ -584,11 +583,8 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
584 return 0; 583 return 0;
585} 584}
586 585
587static void ntb_qp_link_cleanup(struct work_struct *work) 586static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
588{ 587{
589 struct ntb_transport_qp *qp = container_of(work,
590 struct ntb_transport_qp,
591 link_cleanup);
592 struct ntb_transport *nt = qp->transport; 588 struct ntb_transport *nt = qp->transport;
593 struct pci_dev *pdev = ntb_query_pdev(nt->ndev); 589 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
594 590
@@ -602,6 +598,16 @@ static void ntb_qp_link_cleanup(struct work_struct *work)
602 598
603 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num); 599 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
604 qp->qp_link = NTB_LINK_DOWN; 600 qp->qp_link = NTB_LINK_DOWN;
601}
602
603static void ntb_qp_link_cleanup_work(struct work_struct *work)
604{
605 struct ntb_transport_qp *qp = container_of(work,
606 struct ntb_transport_qp,
607 link_cleanup);
608 struct ntb_transport *nt = qp->transport;
609
610 ntb_qp_link_cleanup(qp);
605 611
606 if (nt->transport_link == NTB_LINK_UP) 612 if (nt->transport_link == NTB_LINK_UP)
607 schedule_delayed_work(&qp->link_work, 613 schedule_delayed_work(&qp->link_work,
@@ -613,22 +619,20 @@ static void ntb_qp_link_down(struct ntb_transport_qp *qp)
613 schedule_work(&qp->link_cleanup); 619 schedule_work(&qp->link_cleanup);
614} 620}
615 621
616static void ntb_transport_link_cleanup(struct work_struct *work) 622static void ntb_transport_link_cleanup(struct ntb_transport *nt)
617{ 623{
618 struct ntb_transport *nt = container_of(work, struct ntb_transport,
619 link_cleanup);
620 int i; 624 int i;
621 625
626 /* Pass along the info to any clients */
627 for (i = 0; i < nt->max_qps; i++)
628 if (!test_bit(i, &nt->qp_bitmap))
629 ntb_qp_link_cleanup(&nt->qps[i]);
630
622 if (nt->transport_link == NTB_LINK_DOWN) 631 if (nt->transport_link == NTB_LINK_DOWN)
623 cancel_delayed_work_sync(&nt->link_work); 632 cancel_delayed_work_sync(&nt->link_work);
624 else 633 else
625 nt->transport_link = NTB_LINK_DOWN; 634 nt->transport_link = NTB_LINK_DOWN;
626 635
627 /* Pass along the info to any clients */
628 for (i = 0; i < nt->max_qps; i++)
629 if (!test_bit(i, &nt->qp_bitmap))
630 ntb_qp_link_down(&nt->qps[i]);
631
632 /* The scratchpad registers keep the values if the remote side 636 /* The scratchpad registers keep the values if the remote side
633 * goes down, blast them now to give them a sane value the next 637 * goes down, blast them now to give them a sane value the next
634 * time they are accessed 638 * time they are accessed
@@ -637,6 +641,14 @@ static void ntb_transport_link_cleanup(struct work_struct *work)
637 ntb_write_local_spad(nt->ndev, i, 0); 641 ntb_write_local_spad(nt->ndev, i, 0);
638} 642}
639 643
644static void ntb_transport_link_cleanup_work(struct work_struct *work)
645{
646 struct ntb_transport *nt = container_of(work, struct ntb_transport,
647 link_cleanup);
648
649 ntb_transport_link_cleanup(nt);
650}
651
640static void ntb_transport_event_callback(void *data, enum ntb_hw_event event) 652static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
641{ 653{
642 struct ntb_transport *nt = data; 654 struct ntb_transport *nt = data;
@@ -880,7 +892,7 @@ static int ntb_transport_init_queue(struct ntb_transport *nt,
880 } 892 }
881 893
882 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); 894 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
883 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup); 895 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
884 896
885 spin_lock_init(&qp->ntb_rx_pend_q_lock); 897 spin_lock_init(&qp->ntb_rx_pend_q_lock);
886 spin_lock_init(&qp->ntb_rx_free_q_lock); 898 spin_lock_init(&qp->ntb_rx_free_q_lock);
@@ -936,7 +948,7 @@ int ntb_transport_init(struct pci_dev *pdev)
936 } 948 }
937 949
938 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work); 950 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
939 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup); 951 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
940 952
941 rc = ntb_register_event_callback(nt->ndev, 953 rc = ntb_register_event_callback(nt->ndev,
942 ntb_transport_event_callback); 954 ntb_transport_event_callback);
@@ -972,7 +984,7 @@ void ntb_transport_free(void *transport)
972 struct ntb_device *ndev = nt->ndev; 984 struct ntb_device *ndev = nt->ndev;
973 int i; 985 int i;
974 986
975 nt->transport_link = NTB_LINK_DOWN; 987 ntb_transport_link_cleanup(nt);
976 988
977 /* verify that all the qp's are freed */ 989 /* verify that all the qp's are freed */
978 for (i = 0; i < nt->max_qps; i++) { 990 for (i = 0; i < nt->max_qps; i++) {
@@ -1188,11 +1200,14 @@ err:
1188 goto out; 1200 goto out;
1189} 1201}
1190 1202
1191static void ntb_transport_rx(unsigned long data) 1203static int ntb_transport_rxc_db(void *data, int db_num)
1192{ 1204{
1193 struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data; 1205 struct ntb_transport_qp *qp = data;
1194 int rc, i; 1206 int rc, i;
1195 1207
1208 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1209 __func__, db_num);
1210
1196 /* Limit the number of packets processed in a single interrupt to 1211 /* Limit the number of packets processed in a single interrupt to
1197 * provide fairness to others 1212 * provide fairness to others
1198 */ 1213 */
@@ -1204,16 +1219,8 @@ static void ntb_transport_rx(unsigned long data)
1204 1219
1205 if (qp->dma_chan) 1220 if (qp->dma_chan)
1206 dma_async_issue_pending(qp->dma_chan); 1221 dma_async_issue_pending(qp->dma_chan);
1207}
1208
1209static void ntb_transport_rxc_db(void *data, int db_num)
1210{
1211 struct ntb_transport_qp *qp = data;
1212
1213 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1214 __func__, db_num);
1215 1222
1216 tasklet_schedule(&qp->rx_work); 1223 return i;
1217} 1224}
1218 1225
1219static void ntb_tx_copy_callback(void *data) 1226static void ntb_tx_copy_callback(void *data)
@@ -1432,11 +1439,12 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1432 qp->tx_handler = handlers->tx_handler; 1439 qp->tx_handler = handlers->tx_handler;
1433 qp->event_handler = handlers->event_handler; 1440 qp->event_handler = handlers->event_handler;
1434 1441
1442 dmaengine_get();
1435 qp->dma_chan = dma_find_channel(DMA_MEMCPY); 1443 qp->dma_chan = dma_find_channel(DMA_MEMCPY);
1436 if (!qp->dma_chan) 1444 if (!qp->dma_chan) {
1445 dmaengine_put();
1437 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n"); 1446 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
1438 else 1447 }
1439 dmaengine_get();
1440 1448
1441 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1449 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1442 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC); 1450 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
@@ -1458,25 +1466,23 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1458 &qp->tx_free_q); 1466 &qp->tx_free_q);
1459 } 1467 }
1460 1468
1461 tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
1462
1463 rc = ntb_register_db_callback(qp->ndev, free_queue, qp, 1469 rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
1464 ntb_transport_rxc_db); 1470 ntb_transport_rxc_db);
1465 if (rc) 1471 if (rc)
1466 goto err3; 1472 goto err2;
1467 1473
1468 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); 1474 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1469 1475
1470 return qp; 1476 return qp;
1471 1477
1472err3:
1473 tasklet_disable(&qp->rx_work);
1474err2: 1478err2:
1475 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1479 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1476 kfree(entry); 1480 kfree(entry);
1477err1: 1481err1:
1478 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 1482 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1479 kfree(entry); 1483 kfree(entry);
1484 if (qp->dma_chan)
1485 dmaengine_put();
1480 set_bit(free_queue, &nt->qp_bitmap); 1486 set_bit(free_queue, &nt->qp_bitmap);
1481err: 1487err:
1482 return NULL; 1488 return NULL;
@@ -1515,7 +1521,6 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1515 } 1521 }
1516 1522
1517 ntb_unregister_db_callback(qp->ndev, qp->qp_num); 1523 ntb_unregister_db_callback(qp->ndev, qp->qp_num);
1518 tasklet_disable(&qp->rx_work);
1519 1524
1520 cancel_delayed_work_sync(&qp->link_work); 1525 cancel_delayed_work_sync(&qp->link_work);
1521 1526
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index de6f8990246f..c6973f101a3e 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -20,7 +20,7 @@ config OF_SELFTEST
20 depends on OF_IRQ 20 depends on OF_IRQ
21 help 21 help
22 This option builds in test cases for the device tree infrastructure 22 This option builds in test cases for the device tree infrastructure
23 that are executed one at boot time, and the results dumped to the 23 that are executed once at boot time, and the results dumped to the
24 console. 24 console.
25 25
26 If unsure, say N here, but this option is safe to enable. 26 If unsure, say N here, but this option is safe to enable.
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 4b9317bdb81c..d3dd41c840f1 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -69,14 +69,6 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
69 (unsigned long long)cp, (unsigned long long)s, 69 (unsigned long long)cp, (unsigned long long)s,
70 (unsigned long long)da); 70 (unsigned long long)da);
71 71
72 /*
73 * If the number of address cells is larger than 2 we assume the
74 * mapping doesn't specify a physical address. Rather, the address
75 * specifies an identifier that must match exactly.
76 */
77 if (na > 2 && memcmp(range, addr, na * 4) != 0)
78 return OF_BAD_ADDR;
79
80 if (da < cp || da >= (cp + s)) 72 if (da < cp || da >= (cp + s))
81 return OF_BAD_ADDR; 73 return OF_BAD_ADDR;
82 return da - cp; 74 return da - cp;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 2fa024b97c43..758b4f8b30b7 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -922,8 +922,16 @@ void __init unflatten_device_tree(void)
922 */ 922 */
923void __init unflatten_and_copy_device_tree(void) 923void __init unflatten_and_copy_device_tree(void)
924{ 924{
925 int size = __be32_to_cpu(initial_boot_params->totalsize); 925 int size;
926 void *dt = early_init_dt_alloc_memory_arch(size, 926 void *dt;
927
928 if (!initial_boot_params) {
929 pr_warn("No valid device tree found, continuing without\n");
930 return;
931 }
932
933 size = __be32_to_cpu(initial_boot_params->totalsize);
934 dt = early_init_dt_alloc_memory_arch(size,
927 __alignof__(struct boot_param_header)); 935 __alignof__(struct boot_param_header));
928 936
929 if (dt) { 937 if (dt) {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 786b0b47fae4..27212402c532 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -165,7 +165,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
165 if (of_get_property(ipar, "interrupt-controller", NULL) != 165 if (of_get_property(ipar, "interrupt-controller", NULL) !=
166 NULL) { 166 NULL) {
167 pr_debug(" -> got it !\n"); 167 pr_debug(" -> got it !\n");
168 of_node_put(old);
169 return 0; 168 return 0;
170 } 169 }
171 170
@@ -250,8 +249,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
250 * Successfully parsed an interrrupt-map translation; copy new 249 * Successfully parsed an interrrupt-map translation; copy new
251 * interrupt specifier into the out_irq structure 250 * interrupt specifier into the out_irq structure
252 */ 251 */
253 of_node_put(out_irq->np); 252 out_irq->np = newpar;
254 out_irq->np = of_node_get(newpar);
255 253
256 match_array = imap - newaddrsize - newintsize; 254 match_array = imap - newaddrsize - newintsize;
257 for (i = 0; i < newintsize; i++) 255 for (i = 0; i < newintsize; i++)
@@ -268,7 +266,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
268 } 266 }
269 fail: 267 fail:
270 of_node_put(ipar); 268 of_node_put(ipar);
271 of_node_put(out_irq->np);
272 of_node_put(newpar); 269 of_node_put(newpar);
273 270
274 return -EINVAL; 271 return -EINVAL;
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index c269e430c760..2aa7b77c7c88 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -447,6 +447,11 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
447 *value = 0; 447 *value = 0;
448 break; 448 break;
449 449
450 case PCI_INTERRUPT_LINE:
451 /* LINE PIN MIN_GNT MAX_LAT */
452 *value = 0;
453 break;
454
450 default: 455 default:
451 *value = 0xffffffff; 456 *value = 0xffffffff;
452 return PCIBIOS_BAD_REGISTER_NUMBER; 457 return PCIBIOS_BAD_REGISTER_NUMBER;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 1cf605f67673..e86439283a5d 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -279,7 +279,9 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
279 279
280 status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); 280 status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
281 if (ACPI_FAILURE(status)) { 281 if (ACPI_FAILURE(status)) {
282 acpi_handle_warn(handle, "can't evaluate _ADR (%#x)\n", status); 282 if (status != AE_NOT_FOUND)
283 acpi_handle_warn(handle,
284 "can't evaluate _ADR (%#x)\n", status);
283 return AE_OK; 285 return AE_OK;
284 } 286 }
285 287
@@ -643,6 +645,24 @@ static void disable_slot(struct acpiphp_slot *slot)
643 slot->flags &= (~SLOT_ENABLED); 645 slot->flags &= (~SLOT_ENABLED);
644} 646}
645 647
648static bool acpiphp_no_hotplug(acpi_handle handle)
649{
650 struct acpi_device *adev = NULL;
651
652 acpi_bus_get_device(handle, &adev);
653 return adev && adev->flags.no_hotplug;
654}
655
656static bool slot_no_hotplug(struct acpiphp_slot *slot)
657{
658 struct acpiphp_func *func;
659
660 list_for_each_entry(func, &slot->funcs, sibling)
661 if (acpiphp_no_hotplug(func_to_handle(func)))
662 return true;
663
664 return false;
665}
646 666
647/** 667/**
648 * get_slot_status - get ACPI slot status 668 * get_slot_status - get ACPI slot status
@@ -701,7 +721,8 @@ static void trim_stale_devices(struct pci_dev *dev)
701 unsigned long long sta; 721 unsigned long long sta;
702 722
703 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); 723 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
704 alive = ACPI_SUCCESS(status) && sta == ACPI_STA_ALL; 724 alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL)
725 || acpiphp_no_hotplug(handle);
705 } 726 }
706 if (!alive) { 727 if (!alive) {
707 u32 v; 728 u32 v;
@@ -741,8 +762,9 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
741 struct pci_dev *dev, *tmp; 762 struct pci_dev *dev, *tmp;
742 763
743 mutex_lock(&slot->crit_sect); 764 mutex_lock(&slot->crit_sect);
744 /* wake up all functions */ 765 if (slot_no_hotplug(slot)) {
745 if (get_slot_status(slot) == ACPI_STA_ALL) { 766 ; /* do nothing */
767 } else if (get_slot_status(slot) == ACPI_STA_ALL) {
746 /* remove stale devices if any */ 768 /* remove stale devices if any */
747 list_for_each_entry_safe(dev, tmp, &bus->devices, 769 list_for_each_entry_safe(dev, tmp, &bus->devices,
748 bus_list) 770 bus_list)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 577074efbe62..f7ebdba14bde 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -330,29 +330,32 @@ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
330static void pci_acpi_setup(struct device *dev) 330static void pci_acpi_setup(struct device *dev)
331{ 331{
332 struct pci_dev *pci_dev = to_pci_dev(dev); 332 struct pci_dev *pci_dev = to_pci_dev(dev);
333 acpi_handle handle = ACPI_HANDLE(dev); 333 struct acpi_device *adev = ACPI_COMPANION(dev);
334 struct acpi_device *adev;
335 334
336 if (acpi_bus_get_device(handle, &adev) || !adev->wakeup.flags.valid) 335 if (!adev)
336 return;
337
338 pci_acpi_add_pm_notifier(adev, pci_dev);
339 if (!adev->wakeup.flags.valid)
337 return; 340 return;
338 341
339 device_set_wakeup_capable(dev, true); 342 device_set_wakeup_capable(dev, true);
340 acpi_pci_sleep_wake(pci_dev, false); 343 acpi_pci_sleep_wake(pci_dev, false);
341
342 pci_acpi_add_pm_notifier(adev, pci_dev);
343 if (adev->wakeup.flags.run_wake) 344 if (adev->wakeup.flags.run_wake)
344 device_set_run_wake(dev, true); 345 device_set_run_wake(dev, true);
345} 346}
346 347
347static void pci_acpi_cleanup(struct device *dev) 348static void pci_acpi_cleanup(struct device *dev)
348{ 349{
349 acpi_handle handle = ACPI_HANDLE(dev); 350 struct acpi_device *adev = ACPI_COMPANION(dev);
350 struct acpi_device *adev; 351
352 if (!adev)
353 return;
351 354
352 if (!acpi_bus_get_device(handle, &adev) && adev->wakeup.flags.valid) { 355 pci_acpi_remove_pm_notifier(adev);
356 if (adev->wakeup.flags.valid) {
353 device_set_wakeup_capable(dev, false); 357 device_set_wakeup_capable(dev, false);
354 device_set_run_wake(dev, false); 358 device_set_run_wake(dev, false);
355 pci_acpi_remove_pm_notifier(adev);
356 } 359 }
357} 360}
358 361
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 9042fdbd7244..25f0bc659164 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -19,6 +19,7 @@
19#include <linux/cpu.h> 19#include <linux/cpu.h>
20#include <linux/pm_runtime.h> 20#include <linux/pm_runtime.h>
21#include <linux/suspend.h> 21#include <linux/suspend.h>
22#include <linux/kexec.h>
22#include "pci.h" 23#include "pci.h"
23 24
24struct pci_dynid { 25struct pci_dynid {
@@ -288,12 +289,27 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
288 int error, node; 289 int error, node;
289 struct drv_dev_and_id ddi = { drv, dev, id }; 290 struct drv_dev_and_id ddi = { drv, dev, id };
290 291
291 /* Execute driver initialization on node where the device's 292 /*
292 bus is attached to. This way the driver likely allocates 293 * Execute driver initialization on node where the device is
293 its local memory on the right node without any need to 294 * attached. This way the driver likely allocates its local memory
294 change it. */ 295 * on the right node.
296 */
295 node = dev_to_node(&dev->dev); 297 node = dev_to_node(&dev->dev);
296 if (node >= 0) { 298
299 /*
300 * On NUMA systems, we are likely to call a PF probe function using
301 * work_on_cpu(). If that probe calls pci_enable_sriov() (which
302 * adds the VF devices via pci_bus_add_device()), we may re-enter
303 * this function to call the VF probe function. Calling
304 * work_on_cpu() again will cause a lockdep warning. Since VFs are
305 * always on the same node as the PF, we can work around this by
306 * avoiding work_on_cpu() when we're already on the correct node.
307 *
308 * Preemption is enabled, so it's theoretically unsafe to use
309 * numa_node_id(), but even if we run the probe function on the
310 * wrong node, it should be functionally correct.
311 */
312 if (node >= 0 && node != numa_node_id()) {
297 int cpu; 313 int cpu;
298 314
299 get_online_cpus(); 315 get_online_cpus();
@@ -305,6 +321,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
305 put_online_cpus(); 321 put_online_cpus();
306 } else 322 } else
307 error = local_pci_probe(&ddi); 323 error = local_pci_probe(&ddi);
324
308 return error; 325 return error;
309} 326}
310 327
@@ -399,12 +416,17 @@ static void pci_device_shutdown(struct device *dev)
399 pci_msi_shutdown(pci_dev); 416 pci_msi_shutdown(pci_dev);
400 pci_msix_shutdown(pci_dev); 417 pci_msix_shutdown(pci_dev);
401 418
419#ifdef CONFIG_KEXEC
402 /* 420 /*
403 * Turn off Bus Master bit on the device to tell it to not 421 * If this is a kexec reboot, turn off Bus Master bit on the
404 * continue to do DMA. Don't touch devices in D3cold or unknown states. 422 * device to tell it to not continue to do DMA. Don't touch
423 * devices in D3cold or unknown states.
424 * If it is not a kexec reboot, firmware will hit the PCI
425 * devices with big hammer and stop their DMA any way.
405 */ 426 */
406 if (pci_dev->current_state <= PCI_D3hot) 427 if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
407 pci_clear_master(pci_dev); 428 pci_clear_master(pci_dev);
429#endif
408} 430}
409 431
410#ifdef CONFIG_PM 432#ifdef CONFIG_PM
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 33120d156668..07369f32e8bb 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -4165,6 +4165,14 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
4165 return 0; 4165 return 0;
4166} 4166}
4167 4167
4168bool pci_device_is_present(struct pci_dev *pdev)
4169{
4170 u32 v;
4171
4172 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
4173}
4174EXPORT_SYMBOL_GPL(pci_device_is_present);
4175
4168#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE 4176#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
4169static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; 4177static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
4170static DEFINE_SPINLOCK(resource_alignment_lock); 4178static DEFINE_SPINLOCK(resource_alignment_lock);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index b3b1b9aa8863..3a02717473ad 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -9,10 +9,6 @@
9 * 9 *
10 * Init/reset quirks for USB host controllers should be in the 10 * Init/reset quirks for USB host controllers should be in the
11 * USB quirks file, where their drivers can access reuse it. 11 * USB quirks file, where their drivers can access reuse it.
12 *
13 * The bridge optimization stuff has been removed. If you really
14 * have a silly BIOS which is unable to set your host bridge right,
15 * use the PowerTweak utility (see http://powertweak.sourceforge.net).
16 */ 12 */
17 13
18#include <linux/types.h> 14#include <linux/types.h>
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 1576851028db..cc9337a71529 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -24,7 +24,7 @@ static void pci_stop_dev(struct pci_dev *dev)
24 if (dev->is_added) { 24 if (dev->is_added) {
25 pci_proc_detach_device(dev); 25 pci_proc_detach_device(dev);
26 pci_remove_sysfs_dev_files(dev); 26 pci_remove_sysfs_dev_files(dev);
27 device_del(&dev->dev); 27 device_release_driver(&dev->dev);
28 dev->is_added = 0; 28 dev->is_added = 0;
29 } 29 }
30 30
@@ -34,6 +34,8 @@ static void pci_stop_dev(struct pci_dev *dev)
34 34
35static void pci_destroy_dev(struct pci_dev *dev) 35static void pci_destroy_dev(struct pci_dev *dev)
36{ 36{
37 device_del(&dev->dev);
38
37 down_write(&pci_bus_sem); 39 down_write(&pci_bus_sem);
38 list_del(&dev->bus_list); 40 list_del(&dev->bus_list);
39 up_write(&pci_bus_sem); 41 up_write(&pci_bus_sem);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index a344f3d52361..330ef2d06567 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -24,8 +24,8 @@ config PHY_EXYNOS_MIPI_VIDEO
24config OMAP_USB2 24config OMAP_USB2
25 tristate "OMAP USB2 PHY Driver" 25 tristate "OMAP USB2 PHY Driver"
26 depends on ARCH_OMAP2PLUS 26 depends on ARCH_OMAP2PLUS
27 depends on USB_PHY
27 select GENERIC_PHY 28 select GENERIC_PHY
28 select USB_PHY
29 select OMAP_CONTROL_USB 29 select OMAP_CONTROL_USB
30 help 30 help
31 Enable this to support the transceiver that is part of SOC. This 31 Enable this to support the transceiver that is part of SOC. This
@@ -36,8 +36,8 @@ config OMAP_USB2
36config TWL4030_USB 36config TWL4030_USB
37 tristate "TWL4030 USB Transceiver Driver" 37 tristate "TWL4030 USB Transceiver Driver"
38 depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS 38 depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
39 depends on USB_PHY
39 select GENERIC_PHY 40 select GENERIC_PHY
40 select USB_PHY
41 help 41 help
42 Enable this to support the USB OTG transceiver on TWL4030 42 Enable this to support the USB OTG transceiver on TWL4030
43 family chips (including the TWL5030 and TPS659x0 devices). 43 family chips (including the TWL5030 and TPS659x0 devices).
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 03cf8fb81554..58e0e9739028 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -437,23 +437,18 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
437 int id; 437 int id;
438 struct phy *phy; 438 struct phy *phy;
439 439
440 if (!dev) { 440 if (WARN_ON(!dev))
441 dev_WARN(dev, "no device provided for PHY\n"); 441 return ERR_PTR(-EINVAL);
442 ret = -EINVAL;
443 goto err0;
444 }
445 442
446 phy = kzalloc(sizeof(*phy), GFP_KERNEL); 443 phy = kzalloc(sizeof(*phy), GFP_KERNEL);
447 if (!phy) { 444 if (!phy)
448 ret = -ENOMEM; 445 return ERR_PTR(-ENOMEM);
449 goto err0;
450 }
451 446
452 id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL); 447 id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
453 if (id < 0) { 448 if (id < 0) {
454 dev_err(dev, "unable to get id\n"); 449 dev_err(dev, "unable to get id\n");
455 ret = id; 450 ret = id;
456 goto err0; 451 goto free_phy;
457 } 452 }
458 453
459 device_initialize(&phy->dev); 454 device_initialize(&phy->dev);
@@ -468,11 +463,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
468 463
469 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id); 464 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
470 if (ret) 465 if (ret)
471 goto err1; 466 goto put_dev;
472 467
473 ret = device_add(&phy->dev); 468 ret = device_add(&phy->dev);
474 if (ret) 469 if (ret)
475 goto err1; 470 goto put_dev;
476 471
477 if (pm_runtime_enabled(dev)) { 472 if (pm_runtime_enabled(dev)) {
478 pm_runtime_enable(&phy->dev); 473 pm_runtime_enable(&phy->dev);
@@ -481,12 +476,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
481 476
482 return phy; 477 return phy;
483 478
484err1: 479put_dev:
485 ida_remove(&phy_ida, phy->id);
486 put_device(&phy->dev); 480 put_device(&phy->dev);
481 ida_remove(&phy_ida, phy->id);
482free_phy:
487 kfree(phy); 483 kfree(phy);
488
489err0:
490 return ERR_PTR(ret); 484 return ERR_PTR(ret);
491} 485}
492EXPORT_SYMBOL_GPL(phy_create); 486EXPORT_SYMBOL_GPL(phy_create);
diff --git a/drivers/pinctrl/pinctrl-abx500.c b/drivers/pinctrl/pinctrl-abx500.c
index 4780959e11d4..5183e7bb8de3 100644
--- a/drivers/pinctrl/pinctrl-abx500.c
+++ b/drivers/pinctrl/pinctrl-abx500.c
@@ -418,7 +418,7 @@ static int abx500_set_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
418 ret = abx500_gpio_set_bits(chip, 418 ret = abx500_gpio_set_bits(chip,
419 AB8500_GPIO_ALTFUN_REG, 419 AB8500_GPIO_ALTFUN_REG,
420 af.alt_bit1, 420 af.alt_bit1,
421 !!(af.alta_val && BIT(0))); 421 !!(af.alta_val & BIT(0)));
422 if (ret < 0) 422 if (ret < 0)
423 goto out; 423 goto out;
424 424
@@ -439,7 +439,7 @@ static int abx500_set_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
439 goto out; 439 goto out;
440 440
441 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG, 441 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG,
442 af.alt_bit1, !!(af.altb_val && BIT(0))); 442 af.alt_bit1, !!(af.altb_val & BIT(0)));
443 if (ret < 0) 443 if (ret < 0)
444 goto out; 444 goto out;
445 445
@@ -462,7 +462,7 @@ static int abx500_set_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
462 goto out; 462 goto out;
463 463
464 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG, 464 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG,
465 af.alt_bit2, !!(af.altc_val && BIT(1))); 465 af.alt_bit2, !!(af.altc_val & BIT(1)));
466 break; 466 break;
467 467
468 default: 468 default:
diff --git a/drivers/pinctrl/pinctrl-abx500.h b/drivers/pinctrl/pinctrl-abx500.h
index eeca8f973999..82293806e842 100644
--- a/drivers/pinctrl/pinctrl-abx500.h
+++ b/drivers/pinctrl/pinctrl-abx500.h
@@ -1,4 +1,4 @@
1#ifndef PINCTRL_PINCTRL_ABx5O0_H 1#ifndef PINCTRL_PINCTRL_ABx500_H
2#define PINCTRL_PINCTRL_ABx500_H 2#define PINCTRL_PINCTRL_ABx500_H
3 3
4/* Package definitions */ 4/* Package definitions */
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
index 2832576d8b12..114f5ef4b73a 100644
--- a/drivers/pinctrl/pinctrl-baytrail.c
+++ b/drivers/pinctrl/pinctrl-baytrail.c
@@ -512,6 +512,7 @@ static const struct dev_pm_ops byt_gpio_pm_ops = {
512 512
513static const struct acpi_device_id byt_gpio_acpi_match[] = { 513static const struct acpi_device_id byt_gpio_acpi_match[] = {
514 { "INT33B2", 0 }, 514 { "INT33B2", 0 },
515 { "INT33FC", 0 },
515 { } 516 { }
516}; 517};
517MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match); 518MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index e939c28cbf1f..46dddc159286 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -504,6 +504,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
504 data |= (3 << bit); 504 data |= (3 << bit);
505 break; 505 break;
506 default: 506 default:
507 spin_unlock_irqrestore(&bank->slock, flags);
507 dev_err(info->dev, "unsupported pull setting %d\n", 508 dev_err(info->dev, "unsupported pull setting %d\n",
508 pull); 509 pull);
509 return -EINVAL; 510 return -EINVAL;
@@ -1453,8 +1454,8 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
1453 if (ctrl->type == RK3188) { 1454 if (ctrl->type == RK3188) {
1454 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1455 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1455 info->reg_pull = devm_ioremap_resource(&pdev->dev, res); 1456 info->reg_pull = devm_ioremap_resource(&pdev->dev, res);
1456 if (IS_ERR(info->reg_base)) 1457 if (IS_ERR(info->reg_pull))
1457 return PTR_ERR(info->reg_base); 1458 return PTR_ERR(info->reg_pull);
1458 } 1459 }
1459 1460
1460 ret = rockchip_gpiolib_register(pdev, info); 1461 ret = rockchip_gpiolib_register(pdev, info);
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
index 009174d07767..bc5eb453a45c 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
@@ -3720,7 +3720,7 @@ static void __iomem *r8a7740_pinmux_portcr(struct sh_pfc *pfc, unsigned int pin)
3720 const struct r8a7740_portcr_group *group = 3720 const struct r8a7740_portcr_group *group =
3721 &r8a7740_portcr_offsets[i]; 3721 &r8a7740_portcr_offsets[i];
3722 3722
3723 if (i <= group->end_pin) 3723 if (pin <= group->end_pin)
3724 return pfc->window->virt + group->offset + pin; 3724 return pfc->window->virt + group->offset + pin;
3725 } 3725 }
3726 3726
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7372.c b/drivers/pinctrl/sh-pfc/pfc-sh7372.c
index 70b522d34821..cc097b693820 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7372.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7372.c
@@ -2584,7 +2584,7 @@ static void __iomem *sh7372_pinmux_portcr(struct sh_pfc *pfc, unsigned int pin)
2584 const struct sh7372_portcr_group *group = 2584 const struct sh7372_portcr_group *group =
2585 &sh7372_portcr_offsets[i]; 2585 &sh7372_portcr_offsets[i];
2586 2586
2587 if (i <= group->end_pin) 2587 if (pin <= group->end_pin)
2588 return pfc->window->virt + group->offset + pin; 2588 return pfc->window->virt + group->offset + pin;
2589 } 2589 }
2590 2590
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 11bd0d970a52..e2142956a8e5 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -254,7 +254,7 @@ struct sh_pfc_soc_info {
254#define PINMUX_GPIO(_pin) \ 254#define PINMUX_GPIO(_pin) \
255 [GPIO_##_pin] = { \ 255 [GPIO_##_pin] = { \
256 .pin = (u16)-1, \ 256 .pin = (u16)-1, \
257 .name = __stringify(name), \ 257 .name = __stringify(GPIO_##_pin), \
258 .enum_id = _pin##_DATA, \ 258 .enum_id = _pin##_DATA, \
259 } 259 }
260 260
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 69616aeaa966..09fde58b12e0 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -5,3 +5,4 @@ if GOLDFISH
5source "drivers/platform/goldfish/Kconfig" 5source "drivers/platform/goldfish/Kconfig"
6endif 6endif
7 7
8source "drivers/platform/chrome/Kconfig"
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index 8a44a4cd6d1e..3656b7b17b99 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -5,3 +5,4 @@
5obj-$(CONFIG_X86) += x86/ 5obj-$(CONFIG_X86) += x86/
6obj-$(CONFIG_OLPC) += olpc/ 6obj-$(CONFIG_OLPC) += olpc/
7obj-$(CONFIG_GOLDFISH) += goldfish/ 7obj-$(CONFIG_GOLDFISH) += goldfish/
8obj-$(CONFIG_CHROME_PLATFORMS) += chrome/
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
new file mode 100644
index 000000000000..b13303e75a34
--- /dev/null
+++ b/drivers/platform/chrome/Kconfig
@@ -0,0 +1,28 @@
1#
2# Platform support for Chrome OS hardware (Chromebooks and Chromeboxes)
3#
4
5menuconfig CHROME_PLATFORMS
6 bool "Platform support for Chrome hardware"
7 depends on X86
8 ---help---
9 Say Y here to get to see options for platform support for
10 various Chromebooks and Chromeboxes. This option alone does
11 not add any kernel code.
12
13 If you say N, all options in this submenu will be skipped and disabled.
14
15if CHROME_PLATFORMS
16
17config CHROMEOS_LAPTOP
18 tristate "Chrome OS Laptop"
19 depends on I2C
20 depends on DMI
21 ---help---
22 This driver instantiates i2c and smbus devices such as
23 light sensors and touchpads.
24
25 If you have a supported Chromebook, choose Y or M here.
26 The module will be called chromeos_laptop.
27
28endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
new file mode 100644
index 000000000000..015e9195e226
--- /dev/null
+++ b/drivers/platform/chrome/Makefile
@@ -0,0 +1,2 @@
1
2obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 3e5b4497a1d0..3e5b4497a1d0 100644
--- a/drivers/platform/x86/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index b51a7460cc49..d9dcd37b5a52 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -79,17 +79,6 @@ config ASUS_LAPTOP
79 79
80 If you have an ACPI-compatible ASUS laptop, say Y or M here. 80 If you have an ACPI-compatible ASUS laptop, say Y or M here.
81 81
82config CHROMEOS_LAPTOP
83 tristate "Chrome OS Laptop"
84 depends on I2C
85 depends on DMI
86 ---help---
87 This driver instantiates i2c and smbus devices such as
88 light sensors and touchpads.
89
90 If you have a supported Chromebook, choose Y or M here.
91 The module will be called chromeos_laptop.
92
93config DELL_LAPTOP 82config DELL_LAPTOP
94 tristate "Dell Laptop Extras" 83 tristate "Dell Laptop Extras"
95 depends on X86 84 depends on X86
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 5dbe19324351..f0e6aa407ffb 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -50,7 +50,6 @@ obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o
50obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o 50obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
51obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o 51obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o
52obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o 52obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o
53obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
54obj-$(CONFIG_INTEL_RST) += intel-rst.o 53obj-$(CONFIG_INTEL_RST) += intel-rst.o
55obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o 54obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o
56 55
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 0e9c169b42f8..594323a926cf 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -1494,10 +1494,9 @@ static int asus_input_init(struct asus_laptop *asus)
1494 int error; 1494 int error;
1495 1495
1496 input = input_allocate_device(); 1496 input = input_allocate_device();
1497 if (!input) { 1497 if (!input)
1498 pr_warn("Unable to allocate input device\n");
1499 return -ENOMEM; 1498 return -ENOMEM;
1500 } 1499
1501 input->name = "Asus Laptop extra buttons"; 1500 input->name = "Asus Laptop extra buttons";
1502 input->phys = ASUS_LAPTOP_FILE "/input0"; 1501 input->phys = ASUS_LAPTOP_FILE "/input0";
1503 input->id.bustype = BUS_HOST; 1502 input->id.bustype = BUS_HOST;
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index bb77e18b3dd4..c608b1d33f4a 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -21,6 +21,7 @@
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/dmi.h> 22#include <linux/dmi.h>
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/rfkill.h>
24#include <linux/power_supply.h> 25#include <linux/power_supply.h>
25#include <linux/acpi.h> 26#include <linux/acpi.h>
26#include <linux/mm.h> 27#include <linux/mm.h>
@@ -89,6 +90,13 @@ static struct platform_driver platform_driver = {
89 90
90static struct platform_device *platform_device; 91static struct platform_device *platform_device;
91static struct backlight_device *dell_backlight_device; 92static struct backlight_device *dell_backlight_device;
93static struct rfkill *wifi_rfkill;
94static struct rfkill *bluetooth_rfkill;
95static struct rfkill *wwan_rfkill;
96static bool force_rfkill;
97
98module_param(force_rfkill, bool, 0444);
99MODULE_PARM_DESC(force_rfkill, "enable rfkill on non whitelisted models");
92 100
93static const struct dmi_system_id dell_device_table[] __initconst = { 101static const struct dmi_system_id dell_device_table[] __initconst = {
94 { 102 {
@@ -355,6 +363,108 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
355 return buffer; 363 return buffer;
356} 364}
357 365
366/* Derived from information in DellWirelessCtl.cpp:
367 Class 17, select 11 is radio control. It returns an array of 32-bit values.
368
369 Input byte 0 = 0: Wireless information
370
371 result[0]: return code
372 result[1]:
373 Bit 0: Hardware switch supported
374 Bit 1: Wifi locator supported
375 Bit 2: Wifi is supported
376 Bit 3: Bluetooth is supported
377 Bit 4: WWAN is supported
378 Bit 5: Wireless keyboard supported
379 Bits 6-7: Reserved
380 Bit 8: Wifi is installed
381 Bit 9: Bluetooth is installed
382 Bit 10: WWAN is installed
383 Bits 11-15: Reserved
384 Bit 16: Hardware switch is on
385 Bit 17: Wifi is blocked
386 Bit 18: Bluetooth is blocked
387 Bit 19: WWAN is blocked
388 Bits 20-31: Reserved
389 result[2]: NVRAM size in bytes
390 result[3]: NVRAM format version number
391
392 Input byte 0 = 2: Wireless switch configuration
393 result[0]: return code
394 result[1]:
395 Bit 0: Wifi controlled by switch
396 Bit 1: Bluetooth controlled by switch
397 Bit 2: WWAN controlled by switch
398 Bits 3-6: Reserved
399 Bit 7: Wireless switch config locked
400 Bit 8: Wifi locator enabled
401 Bits 9-14: Reserved
402 Bit 15: Wifi locator setting locked
403 Bits 16-31: Reserved
404*/
405
406static int dell_rfkill_set(void *data, bool blocked)
407{
408 int disable = blocked ? 1 : 0;
409 unsigned long radio = (unsigned long)data;
410 int hwswitch_bit = (unsigned long)data - 1;
411
412 get_buffer();
413 dell_send_request(buffer, 17, 11);
414
415 /* If the hardware switch controls this radio, and the hardware
416 switch is disabled, always disable the radio */
417 if ((hwswitch_state & BIT(hwswitch_bit)) &&
418 !(buffer->output[1] & BIT(16)))
419 disable = 1;
420
421 buffer->input[0] = (1 | (radio<<8) | (disable << 16));
422 dell_send_request(buffer, 17, 11);
423
424 release_buffer();
425 return 0;
426}
427
428/* Must be called with the buffer held */
429static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
430 int status)
431{
432 if (status & BIT(0)) {
433 /* Has hw-switch, sync sw_state to BIOS */
434 int block = rfkill_blocked(rfkill);
435 buffer->input[0] = (1 | (radio << 8) | (block << 16));
436 dell_send_request(buffer, 17, 11);
437 } else {
438 /* No hw-switch, sync BIOS state to sw_state */
439 rfkill_set_sw_state(rfkill, !!(status & BIT(radio + 16)));
440 }
441}
442
443static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio,
444 int status)
445{
446 if (hwswitch_state & (BIT(radio - 1)))
447 rfkill_set_hw_state(rfkill, !(status & BIT(16)));
448}
449
450static void dell_rfkill_query(struct rfkill *rfkill, void *data)
451{
452 int status;
453
454 get_buffer();
455 dell_send_request(buffer, 17, 11);
456 status = buffer->output[1];
457
458 dell_rfkill_update_hw_state(rfkill, (unsigned long)data, status);
459
460 release_buffer();
461}
462
463static const struct rfkill_ops dell_rfkill_ops = {
464 .set_block = dell_rfkill_set,
465 .query = dell_rfkill_query,
466};
467
358static struct dentry *dell_laptop_dir; 468static struct dentry *dell_laptop_dir;
359 469
360static int dell_debugfs_show(struct seq_file *s, void *data) 470static int dell_debugfs_show(struct seq_file *s, void *data)
@@ -424,6 +534,136 @@ static const struct file_operations dell_debugfs_fops = {
424 .release = single_release, 534 .release = single_release,
425}; 535};
426 536
537static void dell_update_rfkill(struct work_struct *ignored)
538{
539 int status;
540
541 get_buffer();
542 dell_send_request(buffer, 17, 11);
543 status = buffer->output[1];
544
545 if (wifi_rfkill) {
546 dell_rfkill_update_hw_state(wifi_rfkill, 1, status);
547 dell_rfkill_update_sw_state(wifi_rfkill, 1, status);
548 }
549 if (bluetooth_rfkill) {
550 dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status);
551 dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status);
552 }
553 if (wwan_rfkill) {
554 dell_rfkill_update_hw_state(wwan_rfkill, 3, status);
555 dell_rfkill_update_sw_state(wwan_rfkill, 3, status);
556 }
557
558 release_buffer();
559}
560static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
561
562
563static int __init dell_setup_rfkill(void)
564{
565 int status;
566 int ret;
567 const char *product;
568
569 /*
570 * rfkill causes trouble on various non Latitudes, according to Dell
571 * actually testing the rfkill functionality is only done on Latitudes.
572 */
573 product = dmi_get_system_info(DMI_PRODUCT_NAME);
574 if (!force_rfkill && (!product || strncmp(product, "Latitude", 8)))
575 return 0;
576
577 get_buffer();
578 dell_send_request(buffer, 17, 11);
579 status = buffer->output[1];
580 buffer->input[0] = 0x2;
581 dell_send_request(buffer, 17, 11);
582 hwswitch_state = buffer->output[1];
583 release_buffer();
584
585 if (!(status & BIT(0))) {
586 if (force_rfkill) {
587 /* No hwsitch, clear all hw-controlled bits */
588 hwswitch_state &= ~7;
589 } else {
590 /* rfkill is only tested on laptops with a hwswitch */
591 return 0;
592 }
593 }
594
595 if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
596 wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev,
597 RFKILL_TYPE_WLAN,
598 &dell_rfkill_ops, (void *) 1);
599 if (!wifi_rfkill) {
600 ret = -ENOMEM;
601 goto err_wifi;
602 }
603 ret = rfkill_register(wifi_rfkill);
604 if (ret)
605 goto err_wifi;
606 }
607
608 if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) {
609 bluetooth_rfkill = rfkill_alloc("dell-bluetooth",
610 &platform_device->dev,
611 RFKILL_TYPE_BLUETOOTH,
612 &dell_rfkill_ops, (void *) 2);
613 if (!bluetooth_rfkill) {
614 ret = -ENOMEM;
615 goto err_bluetooth;
616 }
617 ret = rfkill_register(bluetooth_rfkill);
618 if (ret)
619 goto err_bluetooth;
620 }
621
622 if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) {
623 wwan_rfkill = rfkill_alloc("dell-wwan",
624 &platform_device->dev,
625 RFKILL_TYPE_WWAN,
626 &dell_rfkill_ops, (void *) 3);
627 if (!wwan_rfkill) {
628 ret = -ENOMEM;
629 goto err_wwan;
630 }
631 ret = rfkill_register(wwan_rfkill);
632 if (ret)
633 goto err_wwan;
634 }
635
636 return 0;
637err_wwan:
638 rfkill_destroy(wwan_rfkill);
639 if (bluetooth_rfkill)
640 rfkill_unregister(bluetooth_rfkill);
641err_bluetooth:
642 rfkill_destroy(bluetooth_rfkill);
643 if (wifi_rfkill)
644 rfkill_unregister(wifi_rfkill);
645err_wifi:
646 rfkill_destroy(wifi_rfkill);
647
648 return ret;
649}
650
651static void dell_cleanup_rfkill(void)
652{
653 if (wifi_rfkill) {
654 rfkill_unregister(wifi_rfkill);
655 rfkill_destroy(wifi_rfkill);
656 }
657 if (bluetooth_rfkill) {
658 rfkill_unregister(bluetooth_rfkill);
659 rfkill_destroy(bluetooth_rfkill);
660 }
661 if (wwan_rfkill) {
662 rfkill_unregister(wwan_rfkill);
663 rfkill_destroy(wwan_rfkill);
664 }
665}
666
427static int dell_send_intensity(struct backlight_device *bd) 667static int dell_send_intensity(struct backlight_device *bd)
428{ 668{
429 int ret = 0; 669 int ret = 0;
@@ -515,6 +755,30 @@ static void touchpad_led_exit(void)
515 led_classdev_unregister(&touchpad_led); 755 led_classdev_unregister(&touchpad_led);
516} 756}
517 757
758static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
759 struct serio *port)
760{
761 static bool extended;
762
763 if (str & 0x20)
764 return false;
765
766 if (unlikely(data == 0xe0)) {
767 extended = true;
768 return false;
769 } else if (unlikely(extended)) {
770 switch (data) {
771 case 0x8:
772 schedule_delayed_work(&dell_rfkill_work,
773 round_jiffies_relative(HZ / 4));
774 break;
775 }
776 extended = false;
777 }
778
779 return false;
780}
781
518static int __init dell_init(void) 782static int __init dell_init(void)
519{ 783{
520 int max_intensity = 0; 784 int max_intensity = 0;
@@ -557,10 +821,26 @@ static int __init dell_init(void)
557 } 821 }
558 buffer = page_address(bufferpage); 822 buffer = page_address(bufferpage);
559 823
824 ret = dell_setup_rfkill();
825
826 if (ret) {
827 pr_warn("Unable to setup rfkill\n");
828 goto fail_rfkill;
829 }
830
831 ret = i8042_install_filter(dell_laptop_i8042_filter);
832 if (ret) {
833 pr_warn("Unable to install key filter\n");
834 goto fail_filter;
835 }
836
560 if (quirks && quirks->touchpad_led) 837 if (quirks && quirks->touchpad_led)
561 touchpad_led_init(&platform_device->dev); 838 touchpad_led_init(&platform_device->dev);
562 839
563 dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL); 840 dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
841 if (dell_laptop_dir != NULL)
842 debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
843 &dell_debugfs_fops);
564 844
565#ifdef CONFIG_ACPI 845#ifdef CONFIG_ACPI
566 /* In the event of an ACPI backlight being available, don't 846 /* In the event of an ACPI backlight being available, don't
@@ -603,6 +883,11 @@ static int __init dell_init(void)
603 return 0; 883 return 0;
604 884
605fail_backlight: 885fail_backlight:
886 i8042_remove_filter(dell_laptop_i8042_filter);
887 cancel_delayed_work_sync(&dell_rfkill_work);
888fail_filter:
889 dell_cleanup_rfkill();
890fail_rfkill:
606 free_page((unsigned long)bufferpage); 891 free_page((unsigned long)bufferpage);
607fail_buffer: 892fail_buffer:
608 platform_device_del(platform_device); 893 platform_device_del(platform_device);
@@ -620,7 +905,10 @@ static void __exit dell_exit(void)
620 debugfs_remove_recursive(dell_laptop_dir); 905 debugfs_remove_recursive(dell_laptop_dir);
621 if (quirks && quirks->touchpad_led) 906 if (quirks && quirks->touchpad_led)
622 touchpad_led_exit(); 907 touchpad_led_exit();
908 i8042_remove_filter(dell_laptop_i8042_filter);
909 cancel_delayed_work_sync(&dell_rfkill_work);
623 backlight_device_unregister(dell_backlight_device); 910 backlight_device_unregister(dell_backlight_device);
911 dell_cleanup_rfkill();
624 if (platform_device) { 912 if (platform_device) {
625 platform_device_unregister(platform_device); 913 platform_device_unregister(platform_device);
626 platform_driver_unregister(&platform_driver); 914 platform_driver_unregister(&platform_driver);
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index fa9a2171cc13..60e0900bc117 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -130,7 +130,8 @@ static const u16 bios_to_linux_keycode[256] __initconst = {
130 KEY_BRIGHTNESSUP, KEY_UNKNOWN, KEY_KBDILLUMTOGGLE, 130 KEY_BRIGHTNESSUP, KEY_UNKNOWN, KEY_KBDILLUMTOGGLE,
131 KEY_UNKNOWN, KEY_SWITCHVIDEOMODE, KEY_UNKNOWN, KEY_UNKNOWN, 131 KEY_UNKNOWN, KEY_SWITCHVIDEOMODE, KEY_UNKNOWN, KEY_UNKNOWN,
132 KEY_SWITCHVIDEOMODE, KEY_UNKNOWN, KEY_UNKNOWN, KEY_PROG2, 132 KEY_SWITCHVIDEOMODE, KEY_UNKNOWN, KEY_UNKNOWN, KEY_PROG2,
133 KEY_UNKNOWN, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 133 KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
134 KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_MICMUTE,
134 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 135 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
135 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 136 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
136 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 137 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -139,8 +140,8 @@ static const u16 bios_to_linux_keycode[256] __initconst = {
139 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 140 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
140 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 141 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
141 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 142 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
142 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
143 KEY_PROG3 144 0, 0, 0, 0, 0, 0, 0, 0, 0, KEY_PROG3
144}; 145};
145 146
146static struct input_dev *dell_wmi_input_dev; 147static struct input_dev *dell_wmi_input_dev;
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index aefcc32e5634..dec68e7a99c7 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -1203,10 +1203,8 @@ static int eeepc_input_init(struct eeepc_laptop *eeepc)
1203 int error; 1203 int error;
1204 1204
1205 input = input_allocate_device(); 1205 input = input_allocate_device();
1206 if (!input) { 1206 if (!input)
1207 pr_info("Unable to allocate input device\n");
1208 return -ENOMEM; 1207 return -ENOMEM;
1209 }
1210 1208
1211 input->name = "Asus EeePC extra buttons"; 1209 input->name = "Asus EeePC extra buttons";
1212 input->phys = EEEPC_LAPTOP_FILE "/input0"; 1210 input->phys = EEEPC_LAPTOP_FILE "/input0";
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 1c86fa0857c8..8ba8956b5a48 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -54,6 +54,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
54#define HPWMI_HARDWARE_QUERY 0x4 54#define HPWMI_HARDWARE_QUERY 0x4
55#define HPWMI_WIRELESS_QUERY 0x5 55#define HPWMI_WIRELESS_QUERY 0x5
56#define HPWMI_HOTKEY_QUERY 0xc 56#define HPWMI_HOTKEY_QUERY 0xc
57#define HPWMI_FEATURE_QUERY 0xd
57#define HPWMI_WIRELESS2_QUERY 0x1b 58#define HPWMI_WIRELESS2_QUERY 0x1b
58#define HPWMI_POSTCODEERROR_QUERY 0x2a 59#define HPWMI_POSTCODEERROR_QUERY 0x2a
59 60
@@ -292,6 +293,17 @@ static int hp_wmi_tablet_state(void)
292 return (state & 0x4) ? 1 : 0; 293 return (state & 0x4) ? 1 : 0;
293} 294}
294 295
296static int hp_wmi_bios_2009_later(void)
297{
298 int state = 0;
299 int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, 0, &state,
300 sizeof(state), sizeof(state));
301 if (ret)
302 return ret;
303
304 return (state & 0x10) ? 1 : 0;
305}
306
295static int hp_wmi_set_block(void *data, bool blocked) 307static int hp_wmi_set_block(void *data, bool blocked)
296{ 308{
297 enum hp_wmi_radio r = (enum hp_wmi_radio) data; 309 enum hp_wmi_radio r = (enum hp_wmi_radio) data;
@@ -871,7 +883,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
871 gps_rfkill = NULL; 883 gps_rfkill = NULL;
872 rfkill2_count = 0; 884 rfkill2_count = 0;
873 885
874 if (hp_wmi_rfkill_setup(device)) 886 if (hp_wmi_bios_2009_later() || hp_wmi_rfkill_setup(device))
875 hp_wmi_rfkill2_setup(device); 887 hp_wmi_rfkill2_setup(device);
876 888
877 err = device_create_file(&device->dev, &dev_attr_display); 889 err = device_create_file(&device->dev, &dev_attr_display);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 6788acc22ab9..19ec95147f69 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -570,10 +570,8 @@ static int ideapad_input_init(struct ideapad_private *priv)
570 int error; 570 int error;
571 571
572 inputdev = input_allocate_device(); 572 inputdev = input_allocate_device();
573 if (!inputdev) { 573 if (!inputdev)
574 pr_info("Unable to allocate input device\n");
575 return -ENOMEM; 574 return -ENOMEM;
576 }
577 575
578 inputdev->name = "Ideapad extra buttons"; 576 inputdev->name = "Ideapad extra buttons";
579 inputdev->phys = "ideapad/input0"; 577 inputdev->phys = "ideapad/input0";
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 6b18aba82cfa..8d6775266d66 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -66,10 +66,8 @@ static int mfld_pb_probe(struct platform_device *pdev)
66 return -EINVAL; 66 return -EINVAL;
67 67
68 input = input_allocate_device(); 68 input = input_allocate_device();
69 if (!input) { 69 if (!input)
70 dev_err(&pdev->dev, "Input device allocation error\n");
71 return -ENOMEM; 70 return -ENOMEM;
72 }
73 71
74 input->name = pdev->name; 72 input->name = pdev->name;
75 input->phys = "power-button/input0"; 73 input->phys = "power-button/input0";
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index d654f831410d..60ea476a9130 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -58,12 +58,56 @@
58 * message handler is called within firmware. 58 * message handler is called within firmware.
59 */ 59 */
60 60
61#define IPC_BASE_ADDR 0xFF11C000 /* IPC1 base register address */
62#define IPC_MAX_ADDR 0x100 /* Maximum IPC regisers */
63#define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */ 61#define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */
64#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */ 62#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
65#define IPC_I2C_BASE 0xFF12B000 /* I2C control register base address */ 63#define IPC_IOC 0x100 /* IPC command register IOC bit */
66#define IPC_I2C_MAX_ADDR 0x10 /* Maximum I2C regisers */ 64
65enum {
66 SCU_IPC_LINCROFT,
67 SCU_IPC_PENWELL,
68 SCU_IPC_CLOVERVIEW,
69 SCU_IPC_TANGIER,
70};
71
72/* intel scu ipc driver data*/
73struct intel_scu_ipc_pdata_t {
74 u32 ipc_base;
75 u32 i2c_base;
76 u32 ipc_len;
77 u32 i2c_len;
78 u8 irq_mode;
79};
80
81static struct intel_scu_ipc_pdata_t intel_scu_ipc_pdata[] = {
82 [SCU_IPC_LINCROFT] = {
83 .ipc_base = 0xff11c000,
84 .i2c_base = 0xff12b000,
85 .ipc_len = 0x100,
86 .i2c_len = 0x10,
87 .irq_mode = 0,
88 },
89 [SCU_IPC_PENWELL] = {
90 .ipc_base = 0xff11c000,
91 .i2c_base = 0xff12b000,
92 .ipc_len = 0x100,
93 .i2c_len = 0x10,
94 .irq_mode = 1,
95 },
96 [SCU_IPC_CLOVERVIEW] = {
97 .ipc_base = 0xff11c000,
98 .i2c_base = 0xff12b000,
99 .ipc_len = 0x100,
100 .i2c_len = 0x10,
101 .irq_mode = 1,
102 },
103 [SCU_IPC_TANGIER] = {
104 .ipc_base = 0xff009000,
105 .i2c_base = 0xff00d000,
106 .ipc_len = 0x100,
107 .i2c_len = 0x10,
108 .irq_mode = 0,
109 },
110};
67 111
68static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id); 112static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id);
69static void ipc_remove(struct pci_dev *pdev); 113static void ipc_remove(struct pci_dev *pdev);
@@ -72,6 +116,8 @@ struct intel_scu_ipc_dev {
72 struct pci_dev *pdev; 116 struct pci_dev *pdev;
73 void __iomem *ipc_base; 117 void __iomem *ipc_base;
74 void __iomem *i2c_base; 118 void __iomem *i2c_base;
119 struct completion cmd_complete;
120 u8 irq_mode;
75}; 121};
76 122
77static struct intel_scu_ipc_dev ipcdev; /* Only one for now */ 123static struct intel_scu_ipc_dev ipcdev; /* Only one for now */
@@ -98,6 +144,10 @@ static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
98 */ 144 */
99static inline void ipc_command(u32 cmd) /* Send ipc command */ 145static inline void ipc_command(u32 cmd) /* Send ipc command */
100{ 146{
147 if (ipcdev.irq_mode) {
148 reinit_completion(&ipcdev.cmd_complete);
149 writel(cmd | IPC_IOC, ipcdev.ipc_base);
150 }
101 writel(cmd, ipcdev.ipc_base); 151 writel(cmd, ipcdev.ipc_base);
102} 152}
103 153
@@ -156,6 +206,30 @@ static inline int busy_loop(void) /* Wait till scu status is busy */
156 return 0; 206 return 0;
157} 207}
158 208
209/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
210static inline int ipc_wait_for_interrupt(void)
211{
212 int status;
213
214 if (!wait_for_completion_timeout(&ipcdev.cmd_complete, 3 * HZ)) {
215 struct device *dev = &ipcdev.pdev->dev;
216 dev_err(dev, "IPC timed out\n");
217 return -ETIMEDOUT;
218 }
219
220 status = ipc_read_status();
221
222 if ((status >> 1) & 1)
223 return -EIO;
224
225 return 0;
226}
227
228int intel_scu_ipc_check_status(void)
229{
230 return ipcdev.irq_mode ? ipc_wait_for_interrupt() : busy_loop();
231}
232
159/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */ 233/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
160static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id) 234static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
161{ 235{
@@ -196,8 +270,8 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
196 ipc_command(4 << 16 | id << 12 | 0 << 8 | op); 270 ipc_command(4 << 16 | id << 12 | 0 << 8 | op);
197 } 271 }
198 272
199 err = busy_loop(); 273 err = intel_scu_ipc_check_status();
200 if (id == IPC_CMD_PCNTRL_R) { /* Read rbuf */ 274 if (!err && id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
201 /* Workaround: values are read as 0 without memcpy_fromio */ 275 /* Workaround: values are read as 0 without memcpy_fromio */
202 memcpy_fromio(cbuf, ipcdev.ipc_base + 0x90, 16); 276 memcpy_fromio(cbuf, ipcdev.ipc_base + 0x90, 16);
203 for (nc = 0; nc < count; nc++) 277 for (nc = 0; nc < count; nc++)
@@ -391,7 +465,7 @@ int intel_scu_ipc_simple_command(int cmd, int sub)
391 return -ENODEV; 465 return -ENODEV;
392 } 466 }
393 ipc_command(sub << 12 | cmd); 467 ipc_command(sub << 12 | cmd);
394 err = busy_loop(); 468 err = intel_scu_ipc_check_status();
395 mutex_unlock(&ipclock); 469 mutex_unlock(&ipclock);
396 return err; 470 return err;
397} 471}
@@ -425,10 +499,12 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
425 ipc_data_writel(*in++, 4 * i); 499 ipc_data_writel(*in++, 4 * i);
426 500
427 ipc_command((inlen << 16) | (sub << 12) | cmd); 501 ipc_command((inlen << 16) | (sub << 12) | cmd);
428 err = busy_loop(); 502 err = intel_scu_ipc_check_status();
429 503
430 for (i = 0; i < outlen; i++) 504 if (!err) {
431 *out++ = ipc_data_readl(4 * i); 505 for (i = 0; i < outlen; i++)
506 *out++ = ipc_data_readl(4 * i);
507 }
432 508
433 mutex_unlock(&ipclock); 509 mutex_unlock(&ipclock);
434 return err; 510 return err;
@@ -491,6 +567,9 @@ EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
491 */ 567 */
492static irqreturn_t ioc(int irq, void *dev_id) 568static irqreturn_t ioc(int irq, void *dev_id)
493{ 569{
570 if (ipcdev.irq_mode)
571 complete(&ipcdev.cmd_complete);
572
494 return IRQ_HANDLED; 573 return IRQ_HANDLED;
495} 574}
496 575
@@ -504,13 +583,18 @@ static irqreturn_t ioc(int irq, void *dev_id)
504 */ 583 */
505static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id) 584static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
506{ 585{
507 int err; 586 int err, pid;
587 struct intel_scu_ipc_pdata_t *pdata;
508 resource_size_t pci_resource; 588 resource_size_t pci_resource;
509 589
510 if (ipcdev.pdev) /* We support only one SCU */ 590 if (ipcdev.pdev) /* We support only one SCU */
511 return -EBUSY; 591 return -EBUSY;
512 592
593 pid = id->driver_data;
594 pdata = &intel_scu_ipc_pdata[pid];
595
513 ipcdev.pdev = pci_dev_get(dev); 596 ipcdev.pdev = pci_dev_get(dev);
597 ipcdev.irq_mode = pdata->irq_mode;
514 598
515 err = pci_enable_device(dev); 599 err = pci_enable_device(dev);
516 if (err) 600 if (err)
@@ -524,14 +608,16 @@ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
524 if (!pci_resource) 608 if (!pci_resource)
525 return -ENOMEM; 609 return -ENOMEM;
526 610
611 init_completion(&ipcdev.cmd_complete);
612
527 if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev)) 613 if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev))
528 return -EBUSY; 614 return -EBUSY;
529 615
530 ipcdev.ipc_base = ioremap_nocache(IPC_BASE_ADDR, IPC_MAX_ADDR); 616 ipcdev.ipc_base = ioremap_nocache(pdata->ipc_base, pdata->ipc_len);
531 if (!ipcdev.ipc_base) 617 if (!ipcdev.ipc_base)
532 return -ENOMEM; 618 return -ENOMEM;
533 619
534 ipcdev.i2c_base = ioremap_nocache(IPC_I2C_BASE, IPC_I2C_MAX_ADDR); 620 ipcdev.i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len);
535 if (!ipcdev.i2c_base) { 621 if (!ipcdev.i2c_base) {
536 iounmap(ipcdev.ipc_base); 622 iounmap(ipcdev.ipc_base);
537 return -ENOMEM; 623 return -ENOMEM;
@@ -564,7 +650,10 @@ static void ipc_remove(struct pci_dev *pdev)
564} 650}
565 651
566static DEFINE_PCI_DEVICE_TABLE(pci_ids) = { 652static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
567 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x082a)}, 653 {PCI_VDEVICE(INTEL, 0x082a), SCU_IPC_LINCROFT},
654 {PCI_VDEVICE(INTEL, 0x080e), SCU_IPC_PENWELL},
655 {PCI_VDEVICE(INTEL, 0x08ea), SCU_IPC_CLOVERVIEW},
656 {PCI_VDEVICE(INTEL, 0x11a0), SCU_IPC_TANGIER},
568 { 0,} 657 { 0,}
569}; 658};
570MODULE_DEVICE_TABLE(pci, pci_ids); 659MODULE_DEVICE_TABLE(pci, pci_ids);
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 10d12b221601..3008fd20572e 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -490,11 +490,8 @@ static int acpi_pcc_init_input(struct pcc_acpi *pcc)
490 int error; 490 int error;
491 491
492 input_dev = input_allocate_device(); 492 input_dev = input_allocate_device();
493 if (!input_dev) { 493 if (!input_dev)
494 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
495 "Couldn't allocate input device for hotkey"));
496 return -ENOMEM; 494 return -ENOMEM;
497 }
498 495
499 input_dev->name = ACPI_PCC_DRIVER_NAME; 496 input_dev->name = ACPI_PCC_DRIVER_NAME;
500 input_dev->phys = ACPI_PCC_INPUT_PHYS; 497 input_dev->phys = ACPI_PCC_INPUT_PHYS;
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 47caab0ea7a1..fb233ae7bb0e 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -140,12 +140,12 @@ MODULE_PARM_DESC(kbd_backlight_timeout,
140 "on the model (default: no change from current value)"); 140 "on the model (default: no change from current value)");
141 141
142#ifdef CONFIG_PM_SLEEP 142#ifdef CONFIG_PM_SLEEP
143static void sony_nc_kbd_backlight_resume(void);
144static void sony_nc_thermal_resume(void); 143static void sony_nc_thermal_resume(void);
145#endif 144#endif
146static int sony_nc_kbd_backlight_setup(struct platform_device *pd, 145static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
147 unsigned int handle); 146 unsigned int handle);
148static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd); 147static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd,
148 unsigned int handle);
149 149
150static int sony_nc_battery_care_setup(struct platform_device *pd, 150static int sony_nc_battery_care_setup(struct platform_device *pd,
151 unsigned int handle); 151 unsigned int handle);
@@ -304,8 +304,8 @@ static int sony_laptop_input_keycode_map[] = {
304 KEY_FN_F10, /* 14 SONYPI_EVENT_FNKEY_F10 */ 304 KEY_FN_F10, /* 14 SONYPI_EVENT_FNKEY_F10 */
305 KEY_FN_F11, /* 15 SONYPI_EVENT_FNKEY_F11 */ 305 KEY_FN_F11, /* 15 SONYPI_EVENT_FNKEY_F11 */
306 KEY_FN_F12, /* 16 SONYPI_EVENT_FNKEY_F12 */ 306 KEY_FN_F12, /* 16 SONYPI_EVENT_FNKEY_F12 */
307 KEY_FN_F1, /* 17 SONYPI_EVENT_FNKEY_1 */ 307 KEY_FN_1, /* 17 SONYPI_EVENT_FNKEY_1 */
308 KEY_FN_F2, /* 18 SONYPI_EVENT_FNKEY_2 */ 308 KEY_FN_2, /* 18 SONYPI_EVENT_FNKEY_2 */
309 KEY_FN_D, /* 19 SONYPI_EVENT_FNKEY_D */ 309 KEY_FN_D, /* 19 SONYPI_EVENT_FNKEY_D */
310 KEY_FN_E, /* 20 SONYPI_EVENT_FNKEY_E */ 310 KEY_FN_E, /* 20 SONYPI_EVENT_FNKEY_E */
311 KEY_FN_F, /* 21 SONYPI_EVENT_FNKEY_F */ 311 KEY_FN_F, /* 21 SONYPI_EVENT_FNKEY_F */
@@ -1444,7 +1444,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
1444 case 0x014b: 1444 case 0x014b:
1445 case 0x014c: 1445 case 0x014c:
1446 case 0x0163: 1446 case 0x0163:
1447 sony_nc_kbd_backlight_cleanup(pd); 1447 sony_nc_kbd_backlight_cleanup(pd, handle);
1448 break; 1448 break;
1449 default: 1449 default:
1450 continue; 1450 continue;
@@ -1486,13 +1486,6 @@ static void sony_nc_function_resume(void)
1486 case 0x0135: 1486 case 0x0135:
1487 sony_nc_rfkill_update(); 1487 sony_nc_rfkill_update();
1488 break; 1488 break;
1489 case 0x0137:
1490 case 0x0143:
1491 case 0x014b:
1492 case 0x014c:
1493 case 0x0163:
1494 sony_nc_kbd_backlight_resume();
1495 break;
1496 default: 1489 default:
1497 continue; 1490 continue;
1498 } 1491 }
@@ -1822,6 +1815,12 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
1822 int result; 1815 int result;
1823 int ret = 0; 1816 int ret = 0;
1824 1817
1818 if (kbdbl_ctl) {
1819 pr_warn("handle 0x%.4x: keyboard backlight setup already done for 0x%.4x\n",
1820 handle, kbdbl_ctl->handle);
1821 return -EBUSY;
1822 }
1823
1825 /* verify the kbd backlight presence, these handles are not used for 1824 /* verify the kbd backlight presence, these handles are not used for
1826 * keyboard backlight only 1825 * keyboard backlight only
1827 */ 1826 */
@@ -1881,9 +1880,10 @@ outkzalloc:
1881 return ret; 1880 return ret;
1882} 1881}
1883 1882
1884static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd) 1883static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd,
1884 unsigned int handle)
1885{ 1885{
1886 if (kbdbl_ctl) { 1886 if (kbdbl_ctl && handle == kbdbl_ctl->handle) {
1887 device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr); 1887 device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
1888 device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr); 1888 device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr);
1889 kfree(kbdbl_ctl); 1889 kfree(kbdbl_ctl);
@@ -1891,25 +1891,6 @@ static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
1891 } 1891 }
1892} 1892}
1893 1893
1894#ifdef CONFIG_PM_SLEEP
1895static void sony_nc_kbd_backlight_resume(void)
1896{
1897 int ignore = 0;
1898
1899 if (!kbdbl_ctl)
1900 return;
1901
1902 if (kbdbl_ctl->mode == 0)
1903 sony_call_snc_handle(kbdbl_ctl->handle, kbdbl_ctl->base,
1904 &ignore);
1905
1906 if (kbdbl_ctl->timeout != 0)
1907 sony_call_snc_handle(kbdbl_ctl->handle,
1908 (kbdbl_ctl->base + 0x200) |
1909 (kbdbl_ctl->timeout << 0x10), &ignore);
1910}
1911#endif
1912
1913struct battery_care_control { 1894struct battery_care_control {
1914 struct device_attribute attrs[2]; 1895 struct device_attribute attrs[2];
1915 unsigned int handle; 1896 unsigned int handle;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 05e046aa5e31..58b0274d24cc 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -6438,7 +6438,12 @@ static struct ibm_struct brightness_driver_data = {
6438#define TPACPI_ALSA_SHRTNAME "ThinkPad Console Audio Control" 6438#define TPACPI_ALSA_SHRTNAME "ThinkPad Console Audio Control"
6439#define TPACPI_ALSA_MIXERNAME TPACPI_ALSA_SHRTNAME 6439#define TPACPI_ALSA_MIXERNAME TPACPI_ALSA_SHRTNAME
6440 6440
6441static int alsa_index = ~((1 << (SNDRV_CARDS - 3)) - 1); /* last three slots */ 6441#if SNDRV_CARDS <= 32
6442#define DEFAULT_ALSA_IDX ~((1 << (SNDRV_CARDS - 3)) - 1)
6443#else
6444#define DEFAULT_ALSA_IDX ~((1 << (32 - 3)) - 1)
6445#endif
6446static int alsa_index = DEFAULT_ALSA_IDX; /* last three slots */
6442static char *alsa_id = "ThinkPadEC"; 6447static char *alsa_id = "ThinkPadEC";
6443static bool alsa_enable = SNDRV_DEFAULT_ENABLE1; 6448static bool alsa_enable = SNDRV_DEFAULT_ENABLE1;
6444 6449
@@ -9163,7 +9168,6 @@ static int __init thinkpad_acpi_module_init(void)
9163 mutex_init(&tpacpi_inputdev_send_mutex); 9168 mutex_init(&tpacpi_inputdev_send_mutex);
9164 tpacpi_inputdev = input_allocate_device(); 9169 tpacpi_inputdev = input_allocate_device();
9165 if (!tpacpi_inputdev) { 9170 if (!tpacpi_inputdev) {
9166 pr_err("unable to allocate input device\n");
9167 thinkpad_acpi_module_exit(); 9171 thinkpad_acpi_module_exit();
9168 return -ENOMEM; 9172 return -ENOMEM;
9169 } else { 9173 } else {
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index 67897c8740ba..e597de05e6c2 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -97,10 +97,8 @@ static int acpi_topstar_init_hkey(struct topstar_hkey *hkey)
97 int error; 97 int error;
98 98
99 input = input_allocate_device(); 99 input = input_allocate_device();
100 if (!input) { 100 if (!input)
101 pr_err("Unable to allocate input device\n");
102 return -ENOMEM; 101 return -ENOMEM;
103 }
104 102
105 input->name = "Topstar Laptop extra buttons"; 103 input->name = "Topstar Laptop extra buttons";
106 input->phys = "topstar/input0"; 104 input->phys = "topstar/input0";
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 0cfadb65f7c6..7fce391818d3 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -975,10 +975,8 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
975 u32 hci_result; 975 u32 hci_result;
976 976
977 dev->hotkey_dev = input_allocate_device(); 977 dev->hotkey_dev = input_allocate_device();
978 if (!dev->hotkey_dev) { 978 if (!dev->hotkey_dev)
979 pr_info("Unable to register input device\n");
980 return -ENOMEM; 979 return -ENOMEM;
981 }
982 980
983 dev->hotkey_dev->name = "Toshiba input device"; 981 dev->hotkey_dev->name = "Toshiba input device";
984 dev->hotkey_dev->phys = "toshiba_acpi/input0"; 982 dev->hotkey_dev->phys = "toshiba_acpi/input0";
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 62e8c221d01e..c2e7b2657aeb 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -672,8 +672,10 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
672 struct wmi_block *wblock; 672 struct wmi_block *wblock;
673 673
674 wblock = dev_get_drvdata(dev); 674 wblock = dev_get_drvdata(dev);
675 if (!wblock) 675 if (!wblock) {
676 return -ENOMEM; 676 strcat(buf, "\n");
677 return strlen(buf);
678 }
677 679
678 wmi_gtoa(wblock->gblock.guid, guid_string); 680 wmi_gtoa(wblock->gblock.guid, guid_string);
679 681
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 6936e0acedcd..f748cc8cbb03 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -197,6 +197,11 @@ static int pnp_bus_freeze(struct device *dev)
197 return __pnp_bus_suspend(dev, PMSG_FREEZE); 197 return __pnp_bus_suspend(dev, PMSG_FREEZE);
198} 198}
199 199
200static int pnp_bus_poweroff(struct device *dev)
201{
202 return __pnp_bus_suspend(dev, PMSG_HIBERNATE);
203}
204
200static int pnp_bus_resume(struct device *dev) 205static int pnp_bus_resume(struct device *dev)
201{ 206{
202 struct pnp_dev *pnp_dev = to_pnp_dev(dev); 207 struct pnp_dev *pnp_dev = to_pnp_dev(dev);
@@ -234,9 +239,14 @@ static int pnp_bus_resume(struct device *dev)
234} 239}
235 240
236static const struct dev_pm_ops pnp_bus_dev_pm_ops = { 241static const struct dev_pm_ops pnp_bus_dev_pm_ops = {
242 /* Suspend callbacks */
237 .suspend = pnp_bus_suspend, 243 .suspend = pnp_bus_suspend,
238 .freeze = pnp_bus_freeze,
239 .resume = pnp_bus_resume, 244 .resume = pnp_bus_resume,
245 /* Hibernate callbacks */
246 .freeze = pnp_bus_freeze,
247 .thaw = pnp_bus_resume,
248 .poweroff = pnp_bus_poweroff,
249 .restore = pnp_bus_resume,
240}; 250};
241 251
242struct bus_type pnp_bus_type = { 252struct bus_type pnp_bus_type = {
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 5e2054afe840..85ad58c6da17 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -196,6 +196,7 @@ config BATTERY_MAX17040
196config BATTERY_MAX17042 196config BATTERY_MAX17042
197 tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge" 197 tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
198 depends on I2C 198 depends on I2C
199 select REGMAP_I2C
199 help 200 help
200 MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries 201 MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
201 in handheld and portable equipment. The MAX17042 is configured 202 in handheld and portable equipment. The MAX17042 is configured
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 00e667296360..557af943b2f5 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -511,6 +511,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
511 dev_set_drvdata(dev, psy); 511 dev_set_drvdata(dev, psy);
512 psy->dev = dev; 512 psy->dev = dev;
513 513
514 rc = dev_set_name(dev, "%s", psy->name);
515 if (rc)
516 goto dev_set_name_failed;
517
514 INIT_WORK(&psy->changed_work, power_supply_changed_work); 518 INIT_WORK(&psy->changed_work, power_supply_changed_work);
515 519
516 rc = power_supply_check_supplies(psy); 520 rc = power_supply_check_supplies(psy);
@@ -524,10 +528,6 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
524 if (rc) 528 if (rc)
525 goto wakeup_init_failed; 529 goto wakeup_init_failed;
526 530
527 rc = kobject_set_name(&dev->kobj, "%s", psy->name);
528 if (rc)
529 goto kobject_set_name_failed;
530
531 rc = device_add(dev); 531 rc = device_add(dev);
532 if (rc) 532 if (rc)
533 goto device_add_failed; 533 goto device_add_failed;
@@ -553,11 +553,11 @@ create_triggers_failed:
553register_cooler_failed: 553register_cooler_failed:
554 psy_unregister_thermal(psy); 554 psy_unregister_thermal(psy);
555register_thermal_failed: 555register_thermal_failed:
556wakeup_init_failed:
557 device_del(dev); 556 device_del(dev);
558kobject_set_name_failed:
559device_add_failed: 557device_add_failed:
558wakeup_init_failed:
560check_supplies_failed: 559check_supplies_failed:
560dev_set_name_failed:
561 put_device(dev); 561 put_device(dev);
562success: 562success:
563 return rc; 563 return rc;
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 2a786c504460..3c6768378a94 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -833,6 +833,11 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
833 return 0; 833 return 0;
834} 834}
835 835
836static const struct x86_cpu_id energy_unit_quirk_ids[] = {
837 { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
838 {}
839};
840
836static int rapl_check_unit(struct rapl_package *rp, int cpu) 841static int rapl_check_unit(struct rapl_package *rp, int cpu)
837{ 842{
838 u64 msr_val; 843 u64 msr_val;
@@ -853,8 +858,11 @@ static int rapl_check_unit(struct rapl_package *rp, int cpu)
853 * time unit: 1/time_unit_divisor Seconds 858 * time unit: 1/time_unit_divisor Seconds
854 */ 859 */
855 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; 860 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
856 rp->energy_unit_divisor = 1 << value; 861 /* some CPUs have different way to calculate energy unit */
857 862 if (x86_match_cpu(energy_unit_quirk_ids))
863 rp->energy_unit_divisor = 1000000 / (1 << value);
864 else
865 rp->energy_unit_divisor = 1 << value;
858 866
859 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; 867 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
860 rp->power_unit_divisor = 1 << value; 868 rp->power_unit_divisor = 1 << value;
@@ -941,6 +949,7 @@ static void package_power_limit_irq_restore(int package_id)
941static const struct x86_cpu_id rapl_ids[] = { 949static const struct x86_cpu_id rapl_ids[] = {
942 { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */ 950 { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */
943 { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */ 951 { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */
952 { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
944 { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */ 953 { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */
945 { X86_VENDOR_INTEL, 6, 0x45},/* HSW */ 954 { X86_VENDOR_INTEL, 6, 0x45},/* HSW */
946 /* TODO: Add more CPU IDs after testing */ 955 /* TODO: Add more CPU IDs after testing */
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 8d0fe431dbdd..84419af16f77 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -377,9 +377,14 @@ static void create_power_zone_common_attributes(
377 if (power_zone->ops->get_max_energy_range_uj) 377 if (power_zone->ops->get_max_energy_range_uj)
378 power_zone->zone_dev_attrs[count++] = 378 power_zone->zone_dev_attrs[count++] =
379 &dev_attr_max_energy_range_uj.attr; 379 &dev_attr_max_energy_range_uj.attr;
380 if (power_zone->ops->get_energy_uj) 380 if (power_zone->ops->get_energy_uj) {
381 if (power_zone->ops->reset_energy_uj)
382 dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
383 else
384 dev_attr_energy_uj.attr.mode = S_IRUGO;
381 power_zone->zone_dev_attrs[count++] = 385 power_zone->zone_dev_attrs[count++] =
382 &dev_attr_energy_uj.attr; 386 &dev_attr_energy_uj.attr;
387 }
383 if (power_zone->ops->get_power_uw) 388 if (power_zone->ops->get_power_uw)
384 power_zone->zone_dev_attrs[count++] = 389 power_zone->zone_dev_attrs[count++] =
385 &dev_attr_power_uw.attr; 390 &dev_attr_power_uw.attr;
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index 724706a97dc4..fd3154d86901 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -174,6 +174,33 @@ static const struct regulator_desc arizona_micsupp = {
174 .owner = THIS_MODULE, 174 .owner = THIS_MODULE,
175}; 175};
176 176
177static const struct regulator_linear_range arizona_micsupp_ext_ranges[] = {
178 REGULATOR_LINEAR_RANGE(900000, 0, 0x14, 25000),
179 REGULATOR_LINEAR_RANGE(1500000, 0x15, 0x27, 100000),
180};
181
182static const struct regulator_desc arizona_micsupp_ext = {
183 .name = "MICVDD",
184 .supply_name = "CPVDD",
185 .type = REGULATOR_VOLTAGE,
186 .n_voltages = 40,
187 .ops = &arizona_micsupp_ops,
188
189 .vsel_reg = ARIZONA_LDO2_CONTROL_1,
190 .vsel_mask = ARIZONA_LDO2_VSEL_MASK,
191 .enable_reg = ARIZONA_MIC_CHARGE_PUMP_1,
192 .enable_mask = ARIZONA_CPMIC_ENA,
193 .bypass_reg = ARIZONA_MIC_CHARGE_PUMP_1,
194 .bypass_mask = ARIZONA_CPMIC_BYPASS,
195
196 .linear_ranges = arizona_micsupp_ext_ranges,
197 .n_linear_ranges = ARRAY_SIZE(arizona_micsupp_ext_ranges),
198
199 .enable_time = 3000,
200
201 .owner = THIS_MODULE,
202};
203
177static const struct regulator_init_data arizona_micsupp_default = { 204static const struct regulator_init_data arizona_micsupp_default = {
178 .constraints = { 205 .constraints = {
179 .valid_ops_mask = REGULATOR_CHANGE_STATUS | 206 .valid_ops_mask = REGULATOR_CHANGE_STATUS |
@@ -186,9 +213,22 @@ static const struct regulator_init_data arizona_micsupp_default = {
186 .num_consumer_supplies = 1, 213 .num_consumer_supplies = 1,
187}; 214};
188 215
216static const struct regulator_init_data arizona_micsupp_ext_default = {
217 .constraints = {
218 .valid_ops_mask = REGULATOR_CHANGE_STATUS |
219 REGULATOR_CHANGE_VOLTAGE |
220 REGULATOR_CHANGE_BYPASS,
221 .min_uV = 900000,
222 .max_uV = 3300000,
223 },
224
225 .num_consumer_supplies = 1,
226};
227
189static int arizona_micsupp_probe(struct platform_device *pdev) 228static int arizona_micsupp_probe(struct platform_device *pdev)
190{ 229{
191 struct arizona *arizona = dev_get_drvdata(pdev->dev.parent); 230 struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
231 const struct regulator_desc *desc;
192 struct regulator_config config = { }; 232 struct regulator_config config = { };
193 struct arizona_micsupp *micsupp; 233 struct arizona_micsupp *micsupp;
194 int ret; 234 int ret;
@@ -207,7 +247,17 @@ static int arizona_micsupp_probe(struct platform_device *pdev)
207 * default init_data for it. This will be overridden with 247 * default init_data for it. This will be overridden with
208 * platform data if provided. 248 * platform data if provided.
209 */ 249 */
210 micsupp->init_data = arizona_micsupp_default; 250 switch (arizona->type) {
251 case WM5110:
252 desc = &arizona_micsupp_ext;
253 micsupp->init_data = arizona_micsupp_ext_default;
254 break;
255 default:
256 desc = &arizona_micsupp;
257 micsupp->init_data = arizona_micsupp_default;
258 break;
259 }
260
211 micsupp->init_data.consumer_supplies = &micsupp->supply; 261 micsupp->init_data.consumer_supplies = &micsupp->supply;
212 micsupp->supply.supply = "MICVDD"; 262 micsupp->supply.supply = "MICVDD";
213 micsupp->supply.dev_name = dev_name(arizona->dev); 263 micsupp->supply.dev_name = dev_name(arizona->dev);
@@ -226,7 +276,7 @@ static int arizona_micsupp_probe(struct platform_device *pdev)
226 ARIZONA_CPMIC_BYPASS, 0); 276 ARIZONA_CPMIC_BYPASS, 0);
227 277
228 micsupp->regulator = devm_regulator_register(&pdev->dev, 278 micsupp->regulator = devm_regulator_register(&pdev->dev,
229 &arizona_micsupp, 279 desc,
230 &config); 280 &config);
231 if (IS_ERR(micsupp->regulator)) { 281 if (IS_ERR(micsupp->regulator)) {
232 ret = PTR_ERR(micsupp->regulator); 282 ret = PTR_ERR(micsupp->regulator);
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
index 5917fe3dc983..b9f1d24c6812 100644
--- a/drivers/regulator/as3722-regulator.c
+++ b/drivers/regulator/as3722-regulator.c
@@ -590,8 +590,8 @@ static int as3722_sd016_set_current_limit(struct regulator_dev *rdev,
590 default: 590 default:
591 return -EINVAL; 591 return -EINVAL;
592 } 592 }
593 ret <<= ffs(mask) - 1;
593 val = ret & mask; 594 val = ret & mask;
594 val <<= ffs(mask) - 1;
595 return as3722_update_bits(as3722, reg, mask, val); 595 return as3722_update_bits(as3722, reg, mask, val);
596} 596}
597 597
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 6382f0af353b..d85f31385b24 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -119,6 +119,11 @@ static const char *rdev_get_name(struct regulator_dev *rdev)
119 return ""; 119 return "";
120} 120}
121 121
122static bool have_full_constraints(void)
123{
124 return has_full_constraints || of_have_populated_dt();
125}
126
122/** 127/**
123 * of_get_regulator - get a regulator device node based on supply name 128 * of_get_regulator - get a regulator device node based on supply name
124 * @dev: Device pointer for the consumer (of regulator) device 129 * @dev: Device pointer for the consumer (of regulator) device
@@ -1340,7 +1345,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
1340 * Assume that a regulator is physically present and enabled 1345 * Assume that a regulator is physically present and enabled
1341 * even if it isn't hooked up and just provide a dummy. 1346 * even if it isn't hooked up and just provide a dummy.
1342 */ 1347 */
1343 if (has_full_constraints && allow_dummy) { 1348 if (have_full_constraints() && allow_dummy) {
1344 pr_warn("%s supply %s not found, using dummy regulator\n", 1349 pr_warn("%s supply %s not found, using dummy regulator\n",
1345 devname, id); 1350 devname, id);
1346 1351
@@ -2184,6 +2189,9 @@ int regulator_list_voltage(struct regulator *regulator, unsigned selector)
2184 struct regulator_ops *ops = rdev->desc->ops; 2189 struct regulator_ops *ops = rdev->desc->ops;
2185 int ret; 2190 int ret;
2186 2191
2192 if (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1 && !selector)
2193 return rdev->desc->fixed_uV;
2194
2187 if (!ops->list_voltage || selector >= rdev->desc->n_voltages) 2195 if (!ops->list_voltage || selector >= rdev->desc->n_voltages)
2188 return -EINVAL; 2196 return -EINVAL;
2189 2197
@@ -3624,7 +3632,7 @@ int regulator_suspend_finish(void)
3624 if (error) 3632 if (error)
3625 ret = error; 3633 ret = error;
3626 } else { 3634 } else {
3627 if (!has_full_constraints) 3635 if (!have_full_constraints())
3628 goto unlock; 3636 goto unlock;
3629 if (!ops->disable) 3637 if (!ops->disable)
3630 goto unlock; 3638 goto unlock;
@@ -3822,7 +3830,7 @@ static int __init regulator_init_complete(void)
3822 if (!enabled) 3830 if (!enabled)
3823 goto unlock; 3831 goto unlock;
3824 3832
3825 if (has_full_constraints) { 3833 if (have_full_constraints()) {
3826 /* We log since this may kill the system if it 3834 /* We log since this may kill the system if it
3827 * goes wrong. */ 3835 * goes wrong. */
3828 rdev_info(rdev, "disabling\n"); 3836 rdev_info(rdev, "disabling\n");
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 04406a918c04..234960dc9607 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -139,6 +139,7 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
139 struct property *prop; 139 struct property *prop;
140 const char *regtype; 140 const char *regtype;
141 int proplen, gpio, i; 141 int proplen, gpio, i;
142 int ret;
142 143
143 config = devm_kzalloc(dev, 144 config = devm_kzalloc(dev,
144 sizeof(struct gpio_regulator_config), 145 sizeof(struct gpio_regulator_config),
@@ -202,7 +203,11 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
202 } 203 }
203 config->nr_states = i; 204 config->nr_states = i;
204 205
205 of_property_read_string(np, "regulator-type", &regtype); 206 ret = of_property_read_string(np, "regulator-type", &regtype);
207 if (ret < 0) {
208 dev_err(dev, "Missing 'regulator-type' property\n");
209 return ERR_PTR(-EINVAL);
210 }
206 211
207 if (!strncmp("voltage", regtype, 7)) 212 if (!strncmp("voltage", regtype, 7))
208 config->type = REGULATOR_VOLTAGE; 213 config->type = REGULATOR_VOLTAGE;
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index ba67b2c4e2e7..8b5e4c712a01 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -38,7 +38,7 @@
38 38
39#define PFUZE100_DEVICEID 0x0 39#define PFUZE100_DEVICEID 0x0
40#define PFUZE100_REVID 0x3 40#define PFUZE100_REVID 0x3
41#define PFUZE100_FABID 0x3 41#define PFUZE100_FABID 0x4
42 42
43#define PFUZE100_SW1ABVOL 0x20 43#define PFUZE100_SW1ABVOL 0x20
44#define PFUZE100_SW1CVOL 0x2e 44#define PFUZE100_SW1CVOL 0x2e
@@ -308,9 +308,15 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip)
308 if (ret) 308 if (ret)
309 return ret; 309 return ret;
310 310
311 if (value & 0x0f) { 311 switch (value & 0x0f) {
312 dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value); 312 /* Freescale misprogrammed 1-3% of parts prior to week 8 of 2013 as ID=8 */
313 return -ENODEV; 313 case 0x8:
314 dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8");
315 case 0x0:
316 break;
317 default:
318 dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
319 return -ENODEV;
314 } 320 }
315 321
316 ret = regmap_read(pfuze_chip->regmap, PFUZE100_REVID, &value); 322 ret = regmap_read(pfuze_chip->regmap, PFUZE100_REVID, &value);
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 333677d68d0e..9e61922d8230 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -438,7 +438,7 @@ common_reg:
438 platform_set_drvdata(pdev, s2mps11); 438 platform_set_drvdata(pdev, s2mps11);
439 439
440 config.dev = &pdev->dev; 440 config.dev = &pdev->dev;
441 config.regmap = iodev->regmap; 441 config.regmap = iodev->regmap_pmic;
442 config.driver_data = s2mps11; 442 config.driver_data = s2mps11;
443 for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) { 443 for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
444 if (!reg_np) { 444 if (!reg_np) {
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index cbf91e25cf7f..aeb40aad0ae7 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -925,7 +925,7 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
925 config.dev = s5m8767->dev; 925 config.dev = s5m8767->dev;
926 config.init_data = pdata->regulators[i].initdata; 926 config.init_data = pdata->regulators[i].initdata;
927 config.driver_data = s5m8767; 927 config.driver_data = s5m8767;
928 config.regmap = iodev->regmap; 928 config.regmap = iodev->regmap_pmic;
929 config.of_node = pdata->regulators[i].reg_node; 929 config.of_node = pdata->regulators[i].reg_node;
930 930
931 rdev[i] = devm_regulator_register(&pdev->dev, &regulators[id], 931 rdev[i] = devm_regulator_register(&pdev->dev, &regulators[id],
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index c0da95e95702..3281c90691c3 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -220,6 +220,8 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
220 220
221 at91_alarm_year = tm.tm_year; 221 at91_alarm_year = tm.tm_year;
222 222
223 tm.tm_mon = alrm->time.tm_mon;
224 tm.tm_mday = alrm->time.tm_mday;
223 tm.tm_hour = alrm->time.tm_hour; 225 tm.tm_hour = alrm->time.tm_hour;
224 tm.tm_min = alrm->time.tm_min; 226 tm.tm_min = alrm->time.tm_min;
225 tm.tm_sec = alrm->time.tm_sec; 227 tm.tm_sec = alrm->time.tm_sec;
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index b7fd02bc0a14..ae8119dc2846 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -28,10 +28,20 @@
28#include <linux/mfd/samsung/irq.h> 28#include <linux/mfd/samsung/irq.h>
29#include <linux/mfd/samsung/rtc.h> 29#include <linux/mfd/samsung/rtc.h>
30 30
31/*
32 * Maximum number of retries for checking changes in UDR field
33 * of SEC_RTC_UDR_CON register (to limit possible endless loop).
34 *
35 * After writing to RTC registers (setting time or alarm) read the UDR field
36 * in SEC_RTC_UDR_CON register. UDR is auto-cleared when data have
37 * been transferred.
38 */
39#define UDR_READ_RETRY_CNT 5
40
31struct s5m_rtc_info { 41struct s5m_rtc_info {
32 struct device *dev; 42 struct device *dev;
33 struct sec_pmic_dev *s5m87xx; 43 struct sec_pmic_dev *s5m87xx;
34 struct regmap *rtc; 44 struct regmap *regmap;
35 struct rtc_device *rtc_dev; 45 struct rtc_device *rtc_dev;
36 int irq; 46 int irq;
37 int device_type; 47 int device_type;
@@ -84,12 +94,31 @@ static int s5m8767_tm_to_data(struct rtc_time *tm, u8 *data)
84 } 94 }
85} 95}
86 96
97/*
98 * Read RTC_UDR_CON register and wait till UDR field is cleared.
99 * This indicates that time/alarm update ended.
100 */
101static inline int s5m8767_wait_for_udr_update(struct s5m_rtc_info *info)
102{
103 int ret, retry = UDR_READ_RETRY_CNT;
104 unsigned int data;
105
106 do {
107 ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data);
108 } while (--retry && (data & RTC_UDR_MASK) && !ret);
109
110 if (!retry)
111 dev_err(info->dev, "waiting for UDR update, reached max number of retries\n");
112
113 return ret;
114}
115
87static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info) 116static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info)
88{ 117{
89 int ret; 118 int ret;
90 unsigned int data; 119 unsigned int data;
91 120
92 ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data); 121 ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data);
93 if (ret < 0) { 122 if (ret < 0) {
94 dev_err(info->dev, "failed to read update reg(%d)\n", ret); 123 dev_err(info->dev, "failed to read update reg(%d)\n", ret);
95 return ret; 124 return ret;
@@ -98,15 +127,13 @@ static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info)
98 data |= RTC_TIME_EN_MASK; 127 data |= RTC_TIME_EN_MASK;
99 data |= RTC_UDR_MASK; 128 data |= RTC_UDR_MASK;
100 129
101 ret = regmap_write(info->rtc, SEC_RTC_UDR_CON, data); 130 ret = regmap_write(info->regmap, SEC_RTC_UDR_CON, data);
102 if (ret < 0) { 131 if (ret < 0) {
103 dev_err(info->dev, "failed to write update reg(%d)\n", ret); 132 dev_err(info->dev, "failed to write update reg(%d)\n", ret);
104 return ret; 133 return ret;
105 } 134 }
106 135
107 do { 136 ret = s5m8767_wait_for_udr_update(info);
108 ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
109 } while ((data & RTC_UDR_MASK) && !ret);
110 137
111 return ret; 138 return ret;
112} 139}
@@ -116,7 +143,7 @@ static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
116 int ret; 143 int ret;
117 unsigned int data; 144 unsigned int data;
118 145
119 ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data); 146 ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data);
120 if (ret < 0) { 147 if (ret < 0) {
121 dev_err(info->dev, "%s: fail to read update reg(%d)\n", 148 dev_err(info->dev, "%s: fail to read update reg(%d)\n",
122 __func__, ret); 149 __func__, ret);
@@ -126,16 +153,14 @@ static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
126 data &= ~RTC_TIME_EN_MASK; 153 data &= ~RTC_TIME_EN_MASK;
127 data |= RTC_UDR_MASK; 154 data |= RTC_UDR_MASK;
128 155
129 ret = regmap_write(info->rtc, SEC_RTC_UDR_CON, data); 156 ret = regmap_write(info->regmap, SEC_RTC_UDR_CON, data);
130 if (ret < 0) { 157 if (ret < 0) {
131 dev_err(info->dev, "%s: fail to write update reg(%d)\n", 158 dev_err(info->dev, "%s: fail to write update reg(%d)\n",
132 __func__, ret); 159 __func__, ret);
133 return ret; 160 return ret;
134 } 161 }
135 162
136 do { 163 ret = s5m8767_wait_for_udr_update(info);
137 ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
138 } while ((data & RTC_UDR_MASK) && !ret);
139 164
140 return ret; 165 return ret;
141} 166}
@@ -178,7 +203,7 @@ static int s5m_rtc_read_time(struct device *dev, struct rtc_time *tm)
178 u8 data[8]; 203 u8 data[8];
179 int ret; 204 int ret;
180 205
181 ret = regmap_bulk_read(info->rtc, SEC_RTC_SEC, data, 8); 206 ret = regmap_bulk_read(info->regmap, SEC_RTC_SEC, data, 8);
182 if (ret < 0) 207 if (ret < 0)
183 return ret; 208 return ret;
184 209
@@ -226,7 +251,7 @@ static int s5m_rtc_set_time(struct device *dev, struct rtc_time *tm)
226 1900 + tm->tm_year, 1 + tm->tm_mon, tm->tm_mday, 251 1900 + tm->tm_year, 1 + tm->tm_mon, tm->tm_mday,
227 tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday); 252 tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday);
228 253
229 ret = regmap_raw_write(info->rtc, SEC_RTC_SEC, data, 8); 254 ret = regmap_raw_write(info->regmap, SEC_RTC_SEC, data, 8);
230 if (ret < 0) 255 if (ret < 0)
231 return ret; 256 return ret;
232 257
@@ -242,20 +267,20 @@ static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
242 unsigned int val; 267 unsigned int val;
243 int ret, i; 268 int ret, i;
244 269
245 ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8); 270 ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8);
246 if (ret < 0) 271 if (ret < 0)
247 return ret; 272 return ret;
248 273
249 switch (info->device_type) { 274 switch (info->device_type) {
250 case S5M8763X: 275 case S5M8763X:
251 s5m8763_data_to_tm(data, &alrm->time); 276 s5m8763_data_to_tm(data, &alrm->time);
252 ret = regmap_read(info->rtc, SEC_ALARM0_CONF, &val); 277 ret = regmap_read(info->regmap, SEC_ALARM0_CONF, &val);
253 if (ret < 0) 278 if (ret < 0)
254 return ret; 279 return ret;
255 280
256 alrm->enabled = !!val; 281 alrm->enabled = !!val;
257 282
258 ret = regmap_read(info->rtc, SEC_RTC_STATUS, &val); 283 ret = regmap_read(info->regmap, SEC_RTC_STATUS, &val);
259 if (ret < 0) 284 if (ret < 0)
260 return ret; 285 return ret;
261 286
@@ -278,7 +303,7 @@ static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
278 } 303 }
279 304
280 alrm->pending = 0; 305 alrm->pending = 0;
281 ret = regmap_read(info->rtc, SEC_RTC_STATUS, &val); 306 ret = regmap_read(info->regmap, SEC_RTC_STATUS, &val);
282 if (ret < 0) 307 if (ret < 0)
283 return ret; 308 return ret;
284 break; 309 break;
@@ -301,7 +326,7 @@ static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info)
301 int ret, i; 326 int ret, i;
302 struct rtc_time tm; 327 struct rtc_time tm;
303 328
304 ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8); 329 ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8);
305 if (ret < 0) 330 if (ret < 0)
306 return ret; 331 return ret;
307 332
@@ -312,14 +337,14 @@ static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info)
312 337
313 switch (info->device_type) { 338 switch (info->device_type) {
314 case S5M8763X: 339 case S5M8763X:
315 ret = regmap_write(info->rtc, SEC_ALARM0_CONF, 0); 340 ret = regmap_write(info->regmap, SEC_ALARM0_CONF, 0);
316 break; 341 break;
317 342
318 case S5M8767X: 343 case S5M8767X:
319 for (i = 0; i < 7; i++) 344 for (i = 0; i < 7; i++)
320 data[i] &= ~ALARM_ENABLE_MASK; 345 data[i] &= ~ALARM_ENABLE_MASK;
321 346
322 ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8); 347 ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8);
323 if (ret < 0) 348 if (ret < 0)
324 return ret; 349 return ret;
325 350
@@ -341,7 +366,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
341 u8 alarm0_conf; 366 u8 alarm0_conf;
342 struct rtc_time tm; 367 struct rtc_time tm;
343 368
344 ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8); 369 ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8);
345 if (ret < 0) 370 if (ret < 0)
346 return ret; 371 return ret;
347 372
@@ -353,7 +378,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
353 switch (info->device_type) { 378 switch (info->device_type) {
354 case S5M8763X: 379 case S5M8763X:
355 alarm0_conf = 0x77; 380 alarm0_conf = 0x77;
356 ret = regmap_write(info->rtc, SEC_ALARM0_CONF, alarm0_conf); 381 ret = regmap_write(info->regmap, SEC_ALARM0_CONF, alarm0_conf);
357 break; 382 break;
358 383
359 case S5M8767X: 384 case S5M8767X:
@@ -368,7 +393,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
368 if (data[RTC_YEAR1] & 0x7f) 393 if (data[RTC_YEAR1] & 0x7f)
369 data[RTC_YEAR1] |= ALARM_ENABLE_MASK; 394 data[RTC_YEAR1] |= ALARM_ENABLE_MASK;
370 395
371 ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8); 396 ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8);
372 if (ret < 0) 397 if (ret < 0)
373 return ret; 398 return ret;
374 ret = s5m8767_rtc_set_alarm_reg(info); 399 ret = s5m8767_rtc_set_alarm_reg(info);
@@ -410,7 +435,7 @@ static int s5m_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
410 if (ret < 0) 435 if (ret < 0)
411 return ret; 436 return ret;
412 437
413 ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8); 438 ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8);
414 if (ret < 0) 439 if (ret < 0)
415 return ret; 440 return ret;
416 441
@@ -455,7 +480,7 @@ static const struct rtc_class_ops s5m_rtc_ops = {
455static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable) 480static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable)
456{ 481{
457 int ret; 482 int ret;
458 ret = regmap_update_bits(info->rtc, SEC_WTSR_SMPL_CNTL, 483 ret = regmap_update_bits(info->regmap, SEC_WTSR_SMPL_CNTL,
459 WTSR_ENABLE_MASK, 484 WTSR_ENABLE_MASK,
460 enable ? WTSR_ENABLE_MASK : 0); 485 enable ? WTSR_ENABLE_MASK : 0);
461 if (ret < 0) 486 if (ret < 0)
@@ -466,7 +491,7 @@ static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable)
466static void s5m_rtc_enable_smpl(struct s5m_rtc_info *info, bool enable) 491static void s5m_rtc_enable_smpl(struct s5m_rtc_info *info, bool enable)
467{ 492{
468 int ret; 493 int ret;
469 ret = regmap_update_bits(info->rtc, SEC_WTSR_SMPL_CNTL, 494 ret = regmap_update_bits(info->regmap, SEC_WTSR_SMPL_CNTL,
470 SMPL_ENABLE_MASK, 495 SMPL_ENABLE_MASK,
471 enable ? SMPL_ENABLE_MASK : 0); 496 enable ? SMPL_ENABLE_MASK : 0);
472 if (ret < 0) 497 if (ret < 0)
@@ -481,7 +506,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
481 int ret; 506 int ret;
482 struct rtc_time tm; 507 struct rtc_time tm;
483 508
484 ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &tp_read); 509 ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &tp_read);
485 if (ret < 0) { 510 if (ret < 0) {
486 dev_err(info->dev, "%s: fail to read control reg(%d)\n", 511 dev_err(info->dev, "%s: fail to read control reg(%d)\n",
487 __func__, ret); 512 __func__, ret);
@@ -493,7 +518,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
493 data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT); 518 data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
494 519
495 info->rtc_24hr_mode = 1; 520 info->rtc_24hr_mode = 1;
496 ret = regmap_raw_write(info->rtc, SEC_ALARM0_CONF, data, 2); 521 ret = regmap_raw_write(info->regmap, SEC_ALARM0_CONF, data, 2);
497 if (ret < 0) { 522 if (ret < 0) {
498 dev_err(info->dev, "%s: fail to write controlm reg(%d)\n", 523 dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
499 __func__, ret); 524 __func__, ret);
@@ -515,7 +540,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
515 ret = s5m_rtc_set_time(info->dev, &tm); 540 ret = s5m_rtc_set_time(info->dev, &tm);
516 } 541 }
517 542
518 ret = regmap_update_bits(info->rtc, SEC_RTC_UDR_CON, 543 ret = regmap_update_bits(info->regmap, SEC_RTC_UDR_CON,
519 RTC_TCON_MASK, tp_read | RTC_TCON_MASK); 544 RTC_TCON_MASK, tp_read | RTC_TCON_MASK);
520 if (ret < 0) 545 if (ret < 0)
521 dev_err(info->dev, "%s: fail to update TCON reg(%d)\n", 546 dev_err(info->dev, "%s: fail to update TCON reg(%d)\n",
@@ -542,17 +567,19 @@ static int s5m_rtc_probe(struct platform_device *pdev)
542 567
543 info->dev = &pdev->dev; 568 info->dev = &pdev->dev;
544 info->s5m87xx = s5m87xx; 569 info->s5m87xx = s5m87xx;
545 info->rtc = s5m87xx->rtc; 570 info->regmap = s5m87xx->regmap_rtc;
546 info->device_type = s5m87xx->device_type; 571 info->device_type = s5m87xx->device_type;
547 info->wtsr_smpl = s5m87xx->wtsr_smpl; 572 info->wtsr_smpl = s5m87xx->wtsr_smpl;
548 573
549 switch (pdata->device_type) { 574 switch (pdata->device_type) {
550 case S5M8763X: 575 case S5M8763X:
551 info->irq = s5m87xx->irq_base + S5M8763_IRQ_ALARM0; 576 info->irq = regmap_irq_get_virq(s5m87xx->irq_data,
577 S5M8763_IRQ_ALARM0);
552 break; 578 break;
553 579
554 case S5M8767X: 580 case S5M8767X:
555 info->irq = s5m87xx->irq_base + S5M8767_IRQ_RTCA1; 581 info->irq = regmap_irq_get_virq(s5m87xx->irq_data,
582 S5M8767_IRQ_RTCA1);
556 break; 583 break;
557 584
558 default: 585 default:
@@ -596,7 +623,7 @@ static void s5m_rtc_shutdown(struct platform_device *pdev)
596 if (info->wtsr_smpl) { 623 if (info->wtsr_smpl) {
597 for (i = 0; i < 3; i++) { 624 for (i = 0; i < 3; i++) {
598 s5m_rtc_enable_wtsr(info, false); 625 s5m_rtc_enable_wtsr(info, false);
599 regmap_read(info->rtc, SEC_WTSR_SMPL_CNTL, &val); 626 regmap_read(info->regmap, SEC_WTSR_SMPL_CNTL, &val);
600 pr_debug("%s: WTSR_SMPL reg(0x%02x)\n", __func__, val); 627 pr_debug("%s: WTSR_SMPL reg(0x%02x)\n", __func__, val);
601 if (val & WTSR_ENABLE_MASK) 628 if (val & WTSR_ENABLE_MASK)
602 pr_emerg("%s: fail to disable WTSR\n", 629 pr_emerg("%s: fail to disable WTSR\n",
@@ -612,6 +639,30 @@ static void s5m_rtc_shutdown(struct platform_device *pdev)
612 s5m_rtc_enable_smpl(info, false); 639 s5m_rtc_enable_smpl(info, false);
613} 640}
614 641
642static int s5m_rtc_resume(struct device *dev)
643{
644 struct s5m_rtc_info *info = dev_get_drvdata(dev);
645 int ret = 0;
646
647 if (device_may_wakeup(dev))
648 ret = disable_irq_wake(info->irq);
649
650 return ret;
651}
652
653static int s5m_rtc_suspend(struct device *dev)
654{
655 struct s5m_rtc_info *info = dev_get_drvdata(dev);
656 int ret = 0;
657
658 if (device_may_wakeup(dev))
659 ret = enable_irq_wake(info->irq);
660
661 return ret;
662}
663
664static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
665
615static const struct platform_device_id s5m_rtc_id[] = { 666static const struct platform_device_id s5m_rtc_id[] = {
616 { "s5m-rtc", 0 }, 667 { "s5m-rtc", 0 },
617}; 668};
@@ -620,6 +671,7 @@ static struct platform_driver s5m_rtc_driver = {
620 .driver = { 671 .driver = {
621 .name = "s5m-rtc", 672 .name = "s5m-rtc",
622 .owner = THIS_MODULE, 673 .owner = THIS_MODULE,
674 .pm = &s5m_rtc_pm_ops,
623 }, 675 },
624 .probe = s5m_rtc_probe, 676 .probe = s5m_rtc_probe,
625 .shutdown = s5m_rtc_shutdown, 677 .shutdown = s5m_rtc_shutdown,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index cee7e2708a1f..95e45782692f 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3224,6 +3224,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
3224 3224
3225 fcx_multitrack = private->features.feature[40] & 0x20; 3225 fcx_multitrack = private->features.feature[40] & 0x20;
3226 data_size = blk_rq_bytes(req); 3226 data_size = blk_rq_bytes(req);
3227 if (data_size % blksize)
3228 return ERR_PTR(-EINVAL);
3227 /* tpm write request add CBC data on each track boundary */ 3229 /* tpm write request add CBC data on each track boundary */
3228 if (rq_data_dir(req) == WRITE) 3230 if (rq_data_dir(req) == WRITE)
3229 data_size += (last_trk - first_trk) * 4; 3231 data_size += (last_trk - first_trk) * 4;
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index f64921756ad6..f224d59c4b6b 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -87,7 +87,6 @@ void dasd_gendisk_free(struct dasd_block *block)
87{ 87{
88 if (block->gdp) { 88 if (block->gdp) {
89 del_gendisk(block->gdp); 89 del_gendisk(block->gdp);
90 block->gdp->queue = NULL;
91 block->gdp->private_data = NULL; 90 block->gdp->private_data = NULL;
92 put_disk(block->gdp); 91 put_disk(block->gdp);
93 block->gdp = NULL; 92 block->gdp = NULL;
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index f7aa080e9b28..1465e9563101 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -35,7 +35,6 @@ struct read_info_sccb {
35 u8 _reserved5[4096 - 112]; /* 112-4095 */ 35 u8 _reserved5[4096 - 112]; /* 112-4095 */
36} __packed __aligned(PAGE_SIZE); 36} __packed __aligned(PAGE_SIZE);
37 37
38static __initdata struct init_sccb early_event_mask_sccb __aligned(PAGE_SIZE);
39static __initdata struct read_info_sccb early_read_info_sccb; 38static __initdata struct read_info_sccb early_read_info_sccb;
40static __initdata char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE); 39static __initdata char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE);
41static unsigned long sclp_hsa_size; 40static unsigned long sclp_hsa_size;
@@ -113,7 +112,7 @@ static void __init sclp_facilities_detect(void)
113 112
114bool __init sclp_has_linemode(void) 113bool __init sclp_has_linemode(void)
115{ 114{
116 struct init_sccb *sccb = &early_event_mask_sccb; 115 struct init_sccb *sccb = (void *) &sccb_early;
117 116
118 if (sccb->header.response_code != 0x20) 117 if (sccb->header.response_code != 0x20)
119 return 0; 118 return 0;
@@ -126,7 +125,7 @@ bool __init sclp_has_linemode(void)
126 125
127bool __init sclp_has_vt220(void) 126bool __init sclp_has_vt220(void)
128{ 127{
129 struct init_sccb *sccb = &early_event_mask_sccb; 128 struct init_sccb *sccb = (void *) &sccb_early;
130 129
131 if (sccb->header.response_code != 0x20) 130 if (sccb->header.response_code != 0x20)
132 return 0; 131 return 0;
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 3f4ca4e09a4c..34629ea913d4 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -942,7 +942,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
942 return rc; 942 return rc;
943 } 943 }
944 944
945 tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows); 945 tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols);
946 if (IS_ERR(tp->screen)) { 946 if (IS_ERR(tp->screen)) {
947 rc = PTR_ERR(tp->screen); 947 rc = PTR_ERR(tp->screen);
948 raw3270_put_view(&tp->view); 948 raw3270_put_view(&tp->view);
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 5e1e12c0cf42..0a7325361d29 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2025,7 +2025,8 @@ static struct scsi_host_template driver_template = {
2025 .cmd_per_lun = TW_MAX_CMDS_PER_LUN, 2025 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
2026 .use_clustering = ENABLE_CLUSTERING, 2026 .use_clustering = ENABLE_CLUSTERING,
2027 .shost_attrs = twa_host_attrs, 2027 .shost_attrs = twa_host_attrs,
2028 .emulated = 1 2028 .emulated = 1,
2029 .no_write_same = 1,
2029}; 2030};
2030 2031
2031/* This function will probe and initialize a card */ 2032/* This function will probe and initialize a card */
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index c845bdbeb6c0..4de346017e9f 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1600,7 +1600,8 @@ static struct scsi_host_template driver_template = {
1600 .cmd_per_lun = TW_MAX_CMDS_PER_LUN, 1600 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
1601 .use_clustering = ENABLE_CLUSTERING, 1601 .use_clustering = ENABLE_CLUSTERING,
1602 .shost_attrs = twl_host_attrs, 1602 .shost_attrs = twl_host_attrs,
1603 .emulated = 1 1603 .emulated = 1,
1604 .no_write_same = 1,
1604}; 1605};
1605 1606
1606/* This function will probe and initialize a card */ 1607/* This function will probe and initialize a card */
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index b9276d10b25c..752624e6bc00 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -2279,7 +2279,8 @@ static struct scsi_host_template driver_template = {
2279 .cmd_per_lun = TW_MAX_CMDS_PER_LUN, 2279 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
2280 .use_clustering = ENABLE_CLUSTERING, 2280 .use_clustering = ENABLE_CLUSTERING,
2281 .shost_attrs = tw_host_attrs, 2281 .shost_attrs = tw_host_attrs,
2282 .emulated = 1 2282 .emulated = 1,
2283 .no_write_same = 1,
2283}; 2284};
2284 2285
2285/* This function will probe and initialize a card */ 2286/* This function will probe and initialize a card */
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index f0d432c139d0..4921ed19a027 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1081,6 +1081,7 @@ static struct scsi_host_template aac_driver_template = {
1081#endif 1081#endif
1082 .use_clustering = ENABLE_CLUSTERING, 1082 .use_clustering = ENABLE_CLUSTERING,
1083 .emulated = 1, 1083 .emulated = 1,
1084 .no_write_same = 1,
1084}; 1085};
1085 1086
1086static void __aac_shutdown(struct aac_dev * aac) 1087static void __aac_shutdown(struct aac_dev * aac)
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 97fd450aff09..4f6a30b8e5f9 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -137,6 +137,7 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
137 .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN, 137 .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
138 .use_clustering = ENABLE_CLUSTERING, 138 .use_clustering = ENABLE_CLUSTERING,
139 .shost_attrs = arcmsr_host_attrs, 139 .shost_attrs = arcmsr_host_attrs,
140 .no_write_same = 1,
140}; 141};
141static struct pci_device_id arcmsr_device_id_table[] = { 142static struct pci_device_id arcmsr_device_id_table[] = {
142 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, 143 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index 94d5d0102f7d..42bcb970445a 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -296,6 +296,7 @@ wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn,
296struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, 296struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs,
297 u16 vf_id, wwn_t lpwwn); 297 u16 vf_id, wwn_t lpwwn);
298 298
299void bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, char *symname);
299void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port, 300void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port,
300 struct bfa_lport_info_s *port_info); 301 struct bfa_lport_info_s *port_info);
301void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port, 302void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port,
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 2f61a5af3658..f5e4e61a0fd7 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -1097,6 +1097,17 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
1097 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); 1097 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
1098} 1098}
1099 1099
1100void
1101bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port,
1102 char *symname)
1103{
1104 strcpy(port->port_cfg.sym_name.symname, symname);
1105
1106 if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online))
1107 bfa_fcs_lport_ns_util_send_rspn_id(
1108 BFA_FCS_GET_NS_FROM_PORT(port), NULL);
1109}
1110
1100/* 1111/*
1101 * fcs_lport_api 1112 * fcs_lport_api
1102 */ 1113 */
@@ -5140,9 +5151,6 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
5140 u8 *psymbl = &symbl[0]; 5151 u8 *psymbl = &symbl[0];
5141 int len; 5152 int len;
5142 5153
5143 if (!bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online))
5144 return;
5145
5146 /* Avoid sending RSPN in the following states. */ 5154 /* Avoid sending RSPN in the following states. */
5147 if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_offline) || 5155 if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_offline) ||
5148 bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_sending) || 5156 bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_sending) ||
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index e9a681d31223..40be670a1cbc 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -593,11 +593,8 @@ bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport)
593 return; 593 return;
594 594
595 spin_lock_irqsave(&bfad->bfad_lock, flags); 595 spin_lock_irqsave(&bfad->bfad_lock, flags);
596 if (strlen(sym_name) > 0) { 596 if (strlen(sym_name) > 0)
597 strcpy(fcs_vport->lport.port_cfg.sym_name.symname, sym_name); 597 bfa_fcs_lport_set_symname(&fcs_vport->lport, sym_name);
598 bfa_fcs_lport_ns_util_send_rspn_id(
599 BFA_FCS_GET_NS_FROM_PORT((&fcs_vport->lport)), NULL);
600 }
601 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 598 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
602} 599}
603 600
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index ee4fa40a50b1..ce5ef0190bad 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -4684,6 +4684,7 @@ static struct scsi_host_template gdth_template = {
4684 .cmd_per_lun = GDTH_MAXC_P_L, 4684 .cmd_per_lun = GDTH_MAXC_P_L,
4685 .unchecked_isa_dma = 1, 4685 .unchecked_isa_dma = 1,
4686 .use_clustering = ENABLE_CLUSTERING, 4686 .use_clustering = ENABLE_CLUSTERING,
4687 .no_write_same = 1,
4687}; 4688};
4688 4689
4689#ifdef CONFIG_ISA 4690#ifdef CONFIG_ISA
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index f334859024c0..f2c5005f312a 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -395,6 +395,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
395 shost->use_clustering = sht->use_clustering; 395 shost->use_clustering = sht->use_clustering;
396 shost->ordered_tag = sht->ordered_tag; 396 shost->ordered_tag = sht->ordered_tag;
397 shost->eh_deadline = shost_eh_deadline * HZ; 397 shost->eh_deadline = shost_eh_deadline * HZ;
398 shost->no_write_same = sht->no_write_same;
398 399
399 if (sht->supported_mode == MODE_UNKNOWN) 400 if (sht->supported_mode == MODE_UNKNOWN)
400 /* means we didn't set it ... default to INITIATOR */ 401 /* means we didn't set it ... default to INITIATOR */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 22f6432eb475..20a5e6ecf945 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -561,6 +561,7 @@ static struct scsi_host_template hpsa_driver_template = {
561 .sdev_attrs = hpsa_sdev_attrs, 561 .sdev_attrs = hpsa_sdev_attrs,
562 .shost_attrs = hpsa_shost_attrs, 562 .shost_attrs = hpsa_shost_attrs,
563 .max_sectors = 8192, 563 .max_sectors = 8192,
564 .no_write_same = 1,
564}; 565};
565 566
566 567
@@ -1288,7 +1289,7 @@ static void complete_scsi_command(struct CommandList *cp)
1288 "has check condition: aborted command: " 1289 "has check condition: aborted command: "
1289 "ASC: 0x%x, ASCQ: 0x%x\n", 1290 "ASC: 0x%x, ASCQ: 0x%x\n",
1290 cp, asc, ascq); 1291 cp, asc, ascq);
1291 cmd->result = DID_SOFT_ERROR << 16; 1292 cmd->result |= DID_SOFT_ERROR << 16;
1292 break; 1293 break;
1293 } 1294 }
1294 /* Must be some other type of check condition */ 1295 /* Must be some other type of check condition */
@@ -4925,7 +4926,7 @@ reinit_after_soft_reset:
4925 hpsa_hba_inquiry(h); 4926 hpsa_hba_inquiry(h);
4926 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ 4927 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
4927 start_controller_lockup_detector(h); 4928 start_controller_lockup_detector(h);
4928 return 1; 4929 return 0;
4929 4930
4930clean4: 4931clean4:
4931 hpsa_free_sg_chain_blocks(h); 4932 hpsa_free_sg_chain_blocks(h);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 36ac1c34ce97..573f4128b6b6 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6305,7 +6305,8 @@ static struct scsi_host_template driver_template = {
6305 .use_clustering = ENABLE_CLUSTERING, 6305 .use_clustering = ENABLE_CLUSTERING,
6306 .shost_attrs = ipr_ioa_attrs, 6306 .shost_attrs = ipr_ioa_attrs,
6307 .sdev_attrs = ipr_dev_attrs, 6307 .sdev_attrs = ipr_dev_attrs,
6308 .proc_name = IPR_NAME 6308 .proc_name = IPR_NAME,
6309 .no_write_same = 1,
6309}; 6310};
6310 6311
6311/** 6312/**
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 8d5ea8a1e5a6..52a216f21ae5 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -374,6 +374,7 @@ static struct scsi_host_template ips_driver_template = {
374 .sg_tablesize = IPS_MAX_SG, 374 .sg_tablesize = IPS_MAX_SG,
375 .cmd_per_lun = 3, 375 .cmd_per_lun = 3,
376 .use_clustering = ENABLE_CLUSTERING, 376 .use_clustering = ENABLE_CLUSTERING,
377 .no_write_same = 1,
377}; 378};
378 379
379 380
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 161c98efade9..d2895836f9fa 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -211,7 +211,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
211 qc->tf.nsect = 0; 211 qc->tf.nsect = 0;
212 } 212 }
213 213
214 ata_tf_to_fis(&qc->tf, 1, 0, (u8*)&task->ata_task.fis); 214 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis);
215 task->uldd_task = qc; 215 task->uldd_task = qc;
216 if (ata_is_atapi(qc->tf.protocol)) { 216 if (ata_is_atapi(qc->tf.protocol)) {
217 memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); 217 memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 90c95a3385d1..816db12ef5d5 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4244,6 +4244,7 @@ static struct scsi_host_template megaraid_template = {
4244 .eh_device_reset_handler = megaraid_reset, 4244 .eh_device_reset_handler = megaraid_reset,
4245 .eh_bus_reset_handler = megaraid_reset, 4245 .eh_bus_reset_handler = megaraid_reset,
4246 .eh_host_reset_handler = megaraid_reset, 4246 .eh_host_reset_handler = megaraid_reset,
4247 .no_write_same = 1,
4247}; 4248};
4248 4249
4249static int 4250static int
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index d1a4b82836ea..e2237a97cb9d 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -367,6 +367,7 @@ static struct scsi_host_template megaraid_template_g = {
367 .eh_host_reset_handler = megaraid_reset_handler, 367 .eh_host_reset_handler = megaraid_reset_handler,
368 .change_queue_depth = megaraid_change_queue_depth, 368 .change_queue_depth = megaraid_change_queue_depth,
369 .use_clustering = ENABLE_CLUSTERING, 369 .use_clustering = ENABLE_CLUSTERING,
370 .no_write_same = 1,
370 .sdev_attrs = megaraid_sdev_attrs, 371 .sdev_attrs = megaraid_sdev_attrs,
371 .shost_attrs = megaraid_shost_attrs, 372 .shost_attrs = megaraid_shost_attrs,
372}; 373};
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 0a743a5d1647..c99812bf2a73 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2148,6 +2148,7 @@ static struct scsi_host_template megasas_template = {
2148 .bios_param = megasas_bios_param, 2148 .bios_param = megasas_bios_param,
2149 .use_clustering = ENABLE_CLUSTERING, 2149 .use_clustering = ENABLE_CLUSTERING,
2150 .change_queue_depth = megasas_change_queue_depth, 2150 .change_queue_depth = megasas_change_queue_depth,
2151 .no_write_same = 1,
2151}; 2152};
2152 2153
2153/** 2154/**
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index f16ece91b94a..0a1296a87d66 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3403,6 +3403,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
3403 unsigned long flags; 3403 unsigned long flags;
3404 u8 deviceType = pPayload->sas_identify.dev_type; 3404 u8 deviceType = pPayload->sas_identify.dev_type;
3405 port->port_state = portstate; 3405 port->port_state = portstate;
3406 phy->phy_state = PHY_STATE_LINK_UP_SPC;
3406 PM8001_MSG_DBG(pm8001_ha, 3407 PM8001_MSG_DBG(pm8001_ha,
3407 pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n", 3408 pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n",
3408 port_id, phy_id)); 3409 port_id, phy_id));
@@ -3483,6 +3484,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
3483 pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d," 3484 pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
3484 " phy id = %d\n", port_id, phy_id)); 3485 " phy id = %d\n", port_id, phy_id));
3485 port->port_state = portstate; 3486 port->port_state = portstate;
3487 phy->phy_state = PHY_STATE_LINK_UP_SPC;
3486 port->port_attached = 1; 3488 port->port_attached = 1;
3487 pm8001_get_lrate_mode(phy, link_rate); 3489 pm8001_get_lrate_mode(phy, link_rate);
3488 phy->phy_type |= PORT_TYPE_SATA; 3490 phy->phy_type |= PORT_TYPE_SATA;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index 6d91e2446542..e4867e690c84 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -131,6 +131,10 @@
131#define LINKRATE_30 (0x02 << 8) 131#define LINKRATE_30 (0x02 << 8)
132#define LINKRATE_60 (0x04 << 8) 132#define LINKRATE_60 (0x04 << 8)
133 133
134/* for phy state */
135
136#define PHY_STATE_LINK_UP_SPC 0x1
137
134/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */ 138/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */
135#define GSM_SM_BASE 0x4F0000 139#define GSM_SM_BASE 0x4F0000
136struct mpi_msg_hdr{ 140struct mpi_msg_hdr{
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 34f5f5ffef05..73a120d81b4d 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -175,20 +175,16 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
175static void pm8001_tasklet(unsigned long opaque) 175static void pm8001_tasklet(unsigned long opaque)
176{ 176{
177 struct pm8001_hba_info *pm8001_ha; 177 struct pm8001_hba_info *pm8001_ha;
178 u32 vec; 178 struct isr_param *irq_vector;
179 pm8001_ha = (struct pm8001_hba_info *)opaque; 179
180 irq_vector = (struct isr_param *)opaque;
181 pm8001_ha = irq_vector->drv_inst;
180 if (unlikely(!pm8001_ha)) 182 if (unlikely(!pm8001_ha))
181 BUG_ON(1); 183 BUG_ON(1);
182 vec = pm8001_ha->int_vector; 184 PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id);
183 PM8001_CHIP_DISP->isr(pm8001_ha, vec);
184} 185}
185#endif 186#endif
186 187
187static struct pm8001_hba_info *outq_to_hba(u8 *outq)
188{
189 return container_of((outq - *outq), struct pm8001_hba_info, outq[0]);
190}
191
192/** 188/**
193 * pm8001_interrupt_handler_msix - main MSIX interrupt handler. 189 * pm8001_interrupt_handler_msix - main MSIX interrupt handler.
194 * It obtains the vector number and calls the equivalent bottom 190 * It obtains the vector number and calls the equivalent bottom
@@ -198,18 +194,20 @@ static struct pm8001_hba_info *outq_to_hba(u8 *outq)
198 */ 194 */
199static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque) 195static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
200{ 196{
201 struct pm8001_hba_info *pm8001_ha = outq_to_hba(opaque); 197 struct isr_param *irq_vector;
202 u8 outq = *(u8 *)opaque; 198 struct pm8001_hba_info *pm8001_ha;
203 irqreturn_t ret = IRQ_HANDLED; 199 irqreturn_t ret = IRQ_HANDLED;
200 irq_vector = (struct isr_param *)opaque;
201 pm8001_ha = irq_vector->drv_inst;
202
204 if (unlikely(!pm8001_ha)) 203 if (unlikely(!pm8001_ha))
205 return IRQ_NONE; 204 return IRQ_NONE;
206 if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) 205 if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
207 return IRQ_NONE; 206 return IRQ_NONE;
208 pm8001_ha->int_vector = outq;
209#ifdef PM8001_USE_TASKLET 207#ifdef PM8001_USE_TASKLET
210 tasklet_schedule(&pm8001_ha->tasklet); 208 tasklet_schedule(&pm8001_ha->tasklet[irq_vector->irq_id]);
211#else 209#else
212 ret = PM8001_CHIP_DISP->isr(pm8001_ha, outq); 210 ret = PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id);
213#endif 211#endif
214 return ret; 212 return ret;
215} 213}
@@ -230,9 +228,8 @@ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
230 if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) 228 if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
231 return IRQ_NONE; 229 return IRQ_NONE;
232 230
233 pm8001_ha->int_vector = 0;
234#ifdef PM8001_USE_TASKLET 231#ifdef PM8001_USE_TASKLET
235 tasklet_schedule(&pm8001_ha->tasklet); 232 tasklet_schedule(&pm8001_ha->tasklet[0]);
236#else 233#else
237 ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0); 234 ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0);
238#endif 235#endif
@@ -457,7 +454,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
457{ 454{
458 struct pm8001_hba_info *pm8001_ha; 455 struct pm8001_hba_info *pm8001_ha;
459 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 456 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
460 457 int j;
461 458
462 pm8001_ha = sha->lldd_ha; 459 pm8001_ha = sha->lldd_ha;
463 if (!pm8001_ha) 460 if (!pm8001_ha)
@@ -480,12 +477,14 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
480 pm8001_ha->iomb_size = IOMB_SIZE_SPC; 477 pm8001_ha->iomb_size = IOMB_SIZE_SPC;
481 478
482#ifdef PM8001_USE_TASKLET 479#ifdef PM8001_USE_TASKLET
483 /** 480 /* Tasklet for non msi-x interrupt handler */
484 * default tasklet for non msi-x interrupt handler/first msi-x 481 if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001))
485 * interrupt handler 482 tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet,
486 **/ 483 (unsigned long)&(pm8001_ha->irq_vector[0]));
487 tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, 484 else
488 (unsigned long)pm8001_ha); 485 for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
486 tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet,
487 (unsigned long)&(pm8001_ha->irq_vector[j]));
489#endif 488#endif
490 pm8001_ioremap(pm8001_ha); 489 pm8001_ioremap(pm8001_ha);
491 if (!pm8001_alloc(pm8001_ha, ent)) 490 if (!pm8001_alloc(pm8001_ha, ent))
@@ -733,19 +732,20 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
733 "pci_enable_msix request ret:%d no of intr %d\n", 732 "pci_enable_msix request ret:%d no of intr %d\n",
734 rc, pm8001_ha->number_of_intr)); 733 rc, pm8001_ha->number_of_intr));
735 734
736 for (i = 0; i < number_of_intr; i++)
737 pm8001_ha->outq[i] = i;
738 735
739 for (i = 0; i < number_of_intr; i++) { 736 for (i = 0; i < number_of_intr; i++) {
740 snprintf(intr_drvname[i], sizeof(intr_drvname[0]), 737 snprintf(intr_drvname[i], sizeof(intr_drvname[0]),
741 DRV_NAME"%d", i); 738 DRV_NAME"%d", i);
739 pm8001_ha->irq_vector[i].irq_id = i;
740 pm8001_ha->irq_vector[i].drv_inst = pm8001_ha;
741
742 if (request_irq(pm8001_ha->msix_entries[i].vector, 742 if (request_irq(pm8001_ha->msix_entries[i].vector,
743 pm8001_interrupt_handler_msix, flag, 743 pm8001_interrupt_handler_msix, flag,
744 intr_drvname[i], &pm8001_ha->outq[i])) { 744 intr_drvname[i], &(pm8001_ha->irq_vector[i]))) {
745 for (j = 0; j < i; j++) 745 for (j = 0; j < i; j++)
746 free_irq( 746 free_irq(
747 pm8001_ha->msix_entries[j].vector, 747 pm8001_ha->msix_entries[j].vector,
748 &pm8001_ha->outq[j]); 748 &(pm8001_ha->irq_vector[i]));
749 pci_disable_msix(pm8001_ha->pdev); 749 pci_disable_msix(pm8001_ha->pdev);
750 break; 750 break;
751 } 751 }
@@ -907,7 +907,7 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
907{ 907{
908 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 908 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
909 struct pm8001_hba_info *pm8001_ha; 909 struct pm8001_hba_info *pm8001_ha;
910 int i; 910 int i, j;
911 pm8001_ha = sha->lldd_ha; 911 pm8001_ha = sha->lldd_ha;
912 sas_unregister_ha(sha); 912 sas_unregister_ha(sha);
913 sas_remove_host(pm8001_ha->shost); 913 sas_remove_host(pm8001_ha->shost);
@@ -921,13 +921,18 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
921 synchronize_irq(pm8001_ha->msix_entries[i].vector); 921 synchronize_irq(pm8001_ha->msix_entries[i].vector);
922 for (i = 0; i < pm8001_ha->number_of_intr; i++) 922 for (i = 0; i < pm8001_ha->number_of_intr; i++)
923 free_irq(pm8001_ha->msix_entries[i].vector, 923 free_irq(pm8001_ha->msix_entries[i].vector,
924 &pm8001_ha->outq[i]); 924 &(pm8001_ha->irq_vector[i]));
925 pci_disable_msix(pdev); 925 pci_disable_msix(pdev);
926#else 926#else
927 free_irq(pm8001_ha->irq, sha); 927 free_irq(pm8001_ha->irq, sha);
928#endif 928#endif
929#ifdef PM8001_USE_TASKLET 929#ifdef PM8001_USE_TASKLET
930 tasklet_kill(&pm8001_ha->tasklet); 930 /* For non-msix and msix interrupts */
931 if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001))
932 tasklet_kill(&pm8001_ha->tasklet[0]);
933 else
934 for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
935 tasklet_kill(&pm8001_ha->tasklet[j]);
931#endif 936#endif
932 pm8001_free(pm8001_ha); 937 pm8001_free(pm8001_ha);
933 kfree(sha->sas_phy); 938 kfree(sha->sas_phy);
@@ -948,7 +953,7 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
948{ 953{
949 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 954 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
950 struct pm8001_hba_info *pm8001_ha; 955 struct pm8001_hba_info *pm8001_ha;
951 int i; 956 int i, j;
952 u32 device_state; 957 u32 device_state;
953 pm8001_ha = sha->lldd_ha; 958 pm8001_ha = sha->lldd_ha;
954 flush_workqueue(pm8001_wq); 959 flush_workqueue(pm8001_wq);
@@ -964,13 +969,18 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
964 synchronize_irq(pm8001_ha->msix_entries[i].vector); 969 synchronize_irq(pm8001_ha->msix_entries[i].vector);
965 for (i = 0; i < pm8001_ha->number_of_intr; i++) 970 for (i = 0; i < pm8001_ha->number_of_intr; i++)
966 free_irq(pm8001_ha->msix_entries[i].vector, 971 free_irq(pm8001_ha->msix_entries[i].vector,
967 &pm8001_ha->outq[i]); 972 &(pm8001_ha->irq_vector[i]));
968 pci_disable_msix(pdev); 973 pci_disable_msix(pdev);
969#else 974#else
970 free_irq(pm8001_ha->irq, sha); 975 free_irq(pm8001_ha->irq, sha);
971#endif 976#endif
972#ifdef PM8001_USE_TASKLET 977#ifdef PM8001_USE_TASKLET
973 tasklet_kill(&pm8001_ha->tasklet); 978 /* For non-msix and msix interrupts */
979 if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001))
980 tasklet_kill(&pm8001_ha->tasklet[0]);
981 else
982 for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
983 tasklet_kill(&pm8001_ha->tasklet[j]);
974#endif 984#endif
975 device_state = pci_choose_state(pdev, state); 985 device_state = pci_choose_state(pdev, state);
976 pm8001_printk("pdev=0x%p, slot=%s, entering " 986 pm8001_printk("pdev=0x%p, slot=%s, entering "
@@ -993,7 +1003,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
993 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 1003 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
994 struct pm8001_hba_info *pm8001_ha; 1004 struct pm8001_hba_info *pm8001_ha;
995 int rc; 1005 int rc;
996 u8 i = 0; 1006 u8 i = 0, j;
997 u32 device_state; 1007 u32 device_state;
998 pm8001_ha = sha->lldd_ha; 1008 pm8001_ha = sha->lldd_ha;
999 device_state = pdev->current_state; 1009 device_state = pdev->current_state;
@@ -1033,10 +1043,14 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
1033 if (rc) 1043 if (rc)
1034 goto err_out_disable; 1044 goto err_out_disable;
1035#ifdef PM8001_USE_TASKLET 1045#ifdef PM8001_USE_TASKLET
1036 /* default tasklet for non msi-x interrupt handler/first msi-x 1046 /* Tasklet for non msi-x interrupt handler */
1037 * interrupt handler */ 1047 if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001))
1038 tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, 1048 tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet,
1039 (unsigned long)pm8001_ha); 1049 (unsigned long)&(pm8001_ha->irq_vector[0]));
1050 else
1051 for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
1052 tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet,
1053 (unsigned long)&(pm8001_ha->irq_vector[j]));
1040#endif 1054#endif
1041 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0); 1055 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
1042 if (pm8001_ha->chip_id != chip_8001) { 1056 if (pm8001_ha->chip_id != chip_8001) {
@@ -1169,6 +1183,7 @@ module_exit(pm8001_exit);
1169MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>"); 1183MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
1170MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>"); 1184MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>");
1171MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>"); 1185MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>");
1186MODULE_AUTHOR("Nikith Ganigarakoppal <Nikith.Ganigarakoppal@pmcs.com>");
1172MODULE_DESCRIPTION( 1187MODULE_DESCRIPTION(
1173 "PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 " 1188 "PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 "
1174 "SAS/SATA controller driver"); 1189 "SAS/SATA controller driver");
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index f4eb18e51631..f50ac44b950e 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -1098,15 +1098,17 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
1098 struct pm8001_tmf_task tmf_task; 1098 struct pm8001_tmf_task tmf_task;
1099 struct pm8001_device *pm8001_dev = dev->lldd_dev; 1099 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1100 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); 1100 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1101 DECLARE_COMPLETION_ONSTACK(completion_setstate);
1101 if (dev_is_sata(dev)) { 1102 if (dev_is_sata(dev)) {
1102 struct sas_phy *phy = sas_get_local_phy(dev); 1103 struct sas_phy *phy = sas_get_local_phy(dev);
1103 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , 1104 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1104 dev, 1, 0); 1105 dev, 1, 0);
1105 rc = sas_phy_reset(phy, 1); 1106 rc = sas_phy_reset(phy, 1);
1106 sas_put_local_phy(phy); 1107 sas_put_local_phy(phy);
1108 pm8001_dev->setds_completion = &completion_setstate;
1107 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 1109 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1108 pm8001_dev, 0x01); 1110 pm8001_dev, 0x01);
1109 msleep(2000); 1111 wait_for_completion(&completion_setstate);
1110 } else { 1112 } else {
1111 tmf_task.tmf = TMF_LU_RESET; 1113 tmf_task.tmf = TMF_LU_RESET;
1112 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); 1114 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 6037d477a183..6c5fd5ee22d3 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -466,6 +466,10 @@ struct pm8001_hba_memspace {
466 u64 membase; 466 u64 membase;
467 u32 memsize; 467 u32 memsize;
468}; 468};
469struct isr_param {
470 struct pm8001_hba_info *drv_inst;
471 u32 irq_id;
472};
469struct pm8001_hba_info { 473struct pm8001_hba_info {
470 char name[PM8001_NAME_LENGTH]; 474 char name[PM8001_NAME_LENGTH];
471 struct list_head list; 475 struct list_head list;
@@ -519,14 +523,13 @@ struct pm8001_hba_info {
519 int number_of_intr;/*will be used in remove()*/ 523 int number_of_intr;/*will be used in remove()*/
520#endif 524#endif
521#ifdef PM8001_USE_TASKLET 525#ifdef PM8001_USE_TASKLET
522 struct tasklet_struct tasklet; 526 struct tasklet_struct tasklet[PM8001_MAX_MSIX_VEC];
523#endif 527#endif
524 u32 logging_level; 528 u32 logging_level;
525 u32 fw_status; 529 u32 fw_status;
526 u32 smp_exp_mode; 530 u32 smp_exp_mode;
527 u32 int_vector;
528 const struct firmware *fw_image; 531 const struct firmware *fw_image;
529 u8 outq[PM8001_MAX_MSIX_VEC]; 532 struct isr_param irq_vector[PM8001_MAX_MSIX_VEC];
530}; 533};
531 534
532struct pm8001_work { 535struct pm8001_work {
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 8987b1706216..c950dc5c9943 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2894,6 +2894,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2894 unsigned long flags; 2894 unsigned long flags;
2895 u8 deviceType = pPayload->sas_identify.dev_type; 2895 u8 deviceType = pPayload->sas_identify.dev_type;
2896 port->port_state = portstate; 2896 port->port_state = portstate;
2897 phy->phy_state = PHY_STATE_LINK_UP_SPCV;
2897 PM8001_MSG_DBG(pm8001_ha, pm8001_printk( 2898 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
2898 "portid:%d; phyid:%d; linkrate:%d; " 2899 "portid:%d; phyid:%d; linkrate:%d; "
2899 "portstate:%x; devicetype:%x\n", 2900 "portstate:%x; devicetype:%x\n",
@@ -2978,6 +2979,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2978 port_id, phy_id, link_rate, portstate)); 2979 port_id, phy_id, link_rate, portstate));
2979 2980
2980 port->port_state = portstate; 2981 port->port_state = portstate;
2982 phy->phy_state = PHY_STATE_LINK_UP_SPCV;
2981 port->port_attached = 1; 2983 port->port_attached = 1;
2982 pm8001_get_lrate_mode(phy, link_rate); 2984 pm8001_get_lrate_mode(phy, link_rate);
2983 phy->phy_type |= PORT_TYPE_SATA; 2985 phy->phy_type |= PORT_TYPE_SATA;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index c86816bea424..9970a385795d 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -215,6 +215,8 @@
215#define SAS_DOPNRJT_RTRY_TMO 128 215#define SAS_DOPNRJT_RTRY_TMO 128
216#define SAS_COPNRJT_RTRY_TMO 128 216#define SAS_COPNRJT_RTRY_TMO 128
217 217
218/* for phy state */
219#define PHY_STATE_LINK_UP_SPCV 0x2
218/* 220/*
219 Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second. 221 Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
220 Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128 222 Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index bd6f743d87a7..be8ce54f99b2 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1404,11 +1404,22 @@ enum {
1404}; 1404};
1405#define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1) 1405#define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1)
1406 1406
1407static struct genl_multicast_group pmcraid_mcgrps[] = {
1408 { .name = "events", /* not really used - see ID discussion below */ },
1409};
1410
1407static struct genl_family pmcraid_event_family = { 1411static struct genl_family pmcraid_event_family = {
1408 .id = GENL_ID_GENERATE, 1412 /*
1413 * Due to prior multicast group abuse (the code having assumed that
1414 * the family ID can be used as a multicast group ID) we need to
1415 * statically allocate a family (and thus group) ID.
1416 */
1417 .id = GENL_ID_PMCRAID,
1409 .name = "pmcraid", 1418 .name = "pmcraid",
1410 .version = 1, 1419 .version = 1,
1411 .maxattr = PMCRAID_AEN_ATTR_MAX 1420 .maxattr = PMCRAID_AEN_ATTR_MAX,
1421 .mcgrps = pmcraid_mcgrps,
1422 .n_mcgrps = ARRAY_SIZE(pmcraid_mcgrps),
1412}; 1423};
1413 1424
1414/** 1425/**
@@ -1511,9 +1522,8 @@ static int pmcraid_notify_aen(
1511 return result; 1522 return result;
1512 } 1523 }
1513 1524
1514 result = 1525 result = genlmsg_multicast(&pmcraid_event_family, skb,
1515 genlmsg_multicast(&pmcraid_event_family, skb, 0, 1526 0, 0, GFP_ATOMIC);
1516 pmcraid_event_family.id, GFP_ATOMIC);
1517 1527
1518 /* If there are no listeners, genlmsg_multicast may return non-zero 1528 /* If there are no listeners, genlmsg_multicast may return non-zero
1519 * value. 1529 * value.
@@ -4315,6 +4325,7 @@ static struct scsi_host_template pmcraid_host_template = {
4315 .this_id = -1, 4325 .this_id = -1,
4316 .sg_tablesize = PMCRAID_MAX_IOADLS, 4326 .sg_tablesize = PMCRAID_MAX_IOADLS,
4317 .max_sectors = PMCRAID_IOA_MAX_SECTORS, 4327 .max_sectors = PMCRAID_IOA_MAX_SECTORS,
4328 .no_write_same = 1,
4318 .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN, 4329 .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
4319 .use_clustering = ENABLE_CLUSTERING, 4330 .use_clustering = ENABLE_CLUSTERING,
4320 .shost_attrs = pmcraid_host_attrs, 4331 .shost_attrs = pmcraid_host_attrs,
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 596480022b0a..38a1257e76e1 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -471,7 +471,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
471 schedule_delayed_work(&tgt->sess_del_work, 0); 471 schedule_delayed_work(&tgt->sess_del_work, 0);
472 else 472 else
473 schedule_delayed_work(&tgt->sess_del_work, 473 schedule_delayed_work(&tgt->sess_del_work,
474 jiffies - sess->expires); 474 sess->expires - jiffies);
475} 475}
476 476
477/* ha->hardware_lock supposed to be held on entry */ 477/* ha->hardware_lock supposed to be held on entry */
@@ -550,13 +550,14 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
550 struct scsi_qla_host *vha = tgt->vha; 550 struct scsi_qla_host *vha = tgt->vha;
551 struct qla_hw_data *ha = vha->hw; 551 struct qla_hw_data *ha = vha->hw;
552 struct qla_tgt_sess *sess; 552 struct qla_tgt_sess *sess;
553 unsigned long flags; 553 unsigned long flags, elapsed;
554 554
555 spin_lock_irqsave(&ha->hardware_lock, flags); 555 spin_lock_irqsave(&ha->hardware_lock, flags);
556 while (!list_empty(&tgt->del_sess_list)) { 556 while (!list_empty(&tgt->del_sess_list)) {
557 sess = list_entry(tgt->del_sess_list.next, typeof(*sess), 557 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
558 del_list_entry); 558 del_list_entry);
559 if (time_after_eq(jiffies, sess->expires)) { 559 elapsed = jiffies;
560 if (time_after_eq(elapsed, sess->expires)) {
560 qlt_undelete_sess(sess); 561 qlt_undelete_sess(sess);
561 562
562 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 563 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
@@ -566,7 +567,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
566 ha->tgt.tgt_ops->put_sess(sess); 567 ha->tgt.tgt_ops->put_sess(sess);
567 } else { 568 } else {
568 schedule_delayed_work(&tgt->sess_del_work, 569 schedule_delayed_work(&tgt->sess_del_work,
569 jiffies - sess->expires); 570 sess->expires - elapsed);
570 break; 571 break;
571 } 572 }
572 } 573 }
@@ -4290,6 +4291,7 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
4290 if (rc != 0) { 4291 if (rc != 0) {
4291 ha->tgt.tgt_ops = NULL; 4292 ha->tgt.tgt_ops = NULL;
4292 ha->tgt.target_lport_ptr = NULL; 4293 ha->tgt.target_lport_ptr = NULL;
4294 scsi_host_put(host);
4293 } 4295 }
4294 mutex_unlock(&qla_tgt_mutex); 4296 mutex_unlock(&qla_tgt_mutex);
4295 return rc; 4297 return rc;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e6c4bff04339..69725f7c32c1 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2659,6 +2659,12 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
2659{ 2659{
2660 struct scsi_device *sdev = sdkp->device; 2660 struct scsi_device *sdev = sdkp->device;
2661 2661
2662 if (sdev->host->no_write_same) {
2663 sdev->no_write_same = 1;
2664
2665 return;
2666 }
2667
2662 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) { 2668 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
2663 /* too large values might cause issues with arcmsr */ 2669 /* too large values might cause issues with arcmsr */
2664 int vpd_buf_len = 64; 2670 int vpd_buf_len = 64;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 1a28f5632797..17d740427240 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1697,6 +1697,7 @@ static struct scsi_host_template scsi_driver = {
1697 .use_clustering = DISABLE_CLUSTERING, 1697 .use_clustering = DISABLE_CLUSTERING,
1698 /* Make sure we dont get a sg segment crosses a page boundary */ 1698 /* Make sure we dont get a sg segment crosses a page boundary */
1699 .dma_boundary = PAGE_SIZE-1, 1699 .dma_boundary = PAGE_SIZE-1,
1700 .no_write_same = 1,
1700}; 1701};
1701 1702
1702enum { 1703enum {
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 3ed666fe840a..9025edd7dc45 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -377,7 +377,7 @@ out_master_put:
377 377
378static int bcm2835_spi_remove(struct platform_device *pdev) 378static int bcm2835_spi_remove(struct platform_device *pdev)
379{ 379{
380 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); 380 struct spi_master *master = platform_get_drvdata(pdev);
381 struct bcm2835_spi *bs = spi_master_get_devdata(master); 381 struct bcm2835_spi *bs = spi_master_get_devdata(master);
382 382
383 free_irq(bs->irq, master); 383 free_irq(bs->irq, master);
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 80d56b214eb5..469ecd876358 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -435,7 +435,7 @@ out:
435 435
436static int bcm63xx_spi_remove(struct platform_device *pdev) 436static int bcm63xx_spi_remove(struct platform_device *pdev)
437{ 437{
438 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); 438 struct spi_master *master = platform_get_drvdata(pdev);
439 struct bcm63xx_spi *bs = spi_master_get_devdata(master); 439 struct bcm63xx_spi *bs = spi_master_get_devdata(master);
440 440
441 /* reset spi block */ 441 /* reset spi block */
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 9602bbd8d7ea..87676587d783 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -557,7 +557,7 @@ free_master:
557 557
558static int mpc512x_psc_spi_do_remove(struct device *dev) 558static int mpc512x_psc_spi_do_remove(struct device *dev)
559{ 559{
560 struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); 560 struct spi_master *master = dev_get_drvdata(dev);
561 struct mpc512x_psc_spi *mps = spi_master_get_devdata(master); 561 struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
562 562
563 clk_disable_unprepare(mps->clk_mclk); 563 clk_disable_unprepare(mps->clk_mclk);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 73afb56c08cc..3adebfa22e3d 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -565,7 +565,7 @@ static int mxs_spi_remove(struct platform_device *pdev)
565 struct mxs_spi *spi; 565 struct mxs_spi *spi;
566 struct mxs_ssp *ssp; 566 struct mxs_ssp *ssp;
567 567
568 master = spi_master_get(platform_get_drvdata(pdev)); 568 master = platform_get_drvdata(pdev);
569 spi = spi_master_get_devdata(master); 569 spi = spi_master_get_devdata(master);
570 ssp = &spi->ssp; 570 ssp = &spi->ssp;
571 571
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index cb0e1f1137ad..7765b1999537 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1073,6 +1073,8 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
1073static struct acpi_device_id pxa2xx_spi_acpi_match[] = { 1073static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
1074 { "INT33C0", 0 }, 1074 { "INT33C0", 0 },
1075 { "INT33C1", 0 }, 1075 { "INT33C1", 0 },
1076 { "INT3430", 0 },
1077 { "INT3431", 0 },
1076 { "80860F0E", 0 }, 1078 { "80860F0E", 0 },
1077 { }, 1079 { },
1078}; 1080};
@@ -1291,6 +1293,9 @@ static int pxa2xx_spi_resume(struct device *dev)
1291 /* Enable the SSP clock */ 1293 /* Enable the SSP clock */
1292 clk_prepare_enable(ssp->clk); 1294 clk_prepare_enable(ssp->clk);
1293 1295
1296 /* Restore LPSS private register bits */
1297 lpss_ssp_setup(drv_data);
1298
1294 /* Start the queue running */ 1299 /* Start the queue running */
1295 status = spi_master_resume(drv_data->master); 1300 status = spi_master_resume(drv_data->master);
1296 if (status != 0) { 1301 if (status != 0) {
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 58449ad4ad0d..9e829cee7357 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -885,14 +885,13 @@ static void rspi_release_dma(struct rspi_data *rspi)
885 885
886static int rspi_remove(struct platform_device *pdev) 886static int rspi_remove(struct platform_device *pdev)
887{ 887{
888 struct rspi_data *rspi = spi_master_get(platform_get_drvdata(pdev)); 888 struct rspi_data *rspi = platform_get_drvdata(pdev);
889 889
890 spi_unregister_master(rspi->master); 890 spi_unregister_master(rspi->master);
891 rspi_release_dma(rspi); 891 rspi_release_dma(rspi);
892 free_irq(platform_get_irq(pdev, 0), rspi); 892 free_irq(platform_get_irq(pdev, 0), rspi);
893 clk_put(rspi->clk); 893 clk_put(rspi->clk);
894 iounmap(rspi->addr); 894 iounmap(rspi->addr);
895 spi_master_put(rspi->master);
896 895
897 return 0; 896 return 0;
898} 897}
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 0b71270fbf67..4396bd448540 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -161,7 +161,7 @@ static int ti_qspi_setup(struct spi_device *spi)
161 qspi->spi_max_frequency, clk_div); 161 qspi->spi_max_frequency, clk_div);
162 162
163 ret = pm_runtime_get_sync(qspi->dev); 163 ret = pm_runtime_get_sync(qspi->dev);
164 if (ret) { 164 if (ret < 0) {
165 dev_err(qspi->dev, "pm_runtime_get_sync() failed\n"); 165 dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
166 return ret; 166 return ret;
167 } 167 }
@@ -459,11 +459,10 @@ static int ti_qspi_probe(struct platform_device *pdev)
459 if (!of_property_read_u32(np, "num-cs", &num_cs)) 459 if (!of_property_read_u32(np, "num-cs", &num_cs))
460 master->num_chipselect = num_cs; 460 master->num_chipselect = num_cs;
461 461
462 platform_set_drvdata(pdev, master);
463
464 qspi = spi_master_get_devdata(master); 462 qspi = spi_master_get_devdata(master);
465 qspi->master = master; 463 qspi->master = master;
466 qspi->dev = &pdev->dev; 464 qspi->dev = &pdev->dev;
465 platform_set_drvdata(pdev, qspi);
467 466
468 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 467 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
469 468
@@ -517,10 +516,26 @@ free_master:
517 516
518static int ti_qspi_remove(struct platform_device *pdev) 517static int ti_qspi_remove(struct platform_device *pdev)
519{ 518{
520 struct ti_qspi *qspi = platform_get_drvdata(pdev); 519 struct spi_master *master;
520 struct ti_qspi *qspi;
521 int ret;
522
523 master = platform_get_drvdata(pdev);
524 qspi = spi_master_get_devdata(master);
525
526 ret = pm_runtime_get_sync(qspi->dev);
527 if (ret < 0) {
528 dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
529 return ret;
530 }
521 531
522 ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG); 532 ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG);
523 533
534 pm_runtime_put(qspi->dev);
535 pm_runtime_disable(&pdev->dev);
536
537 spi_unregister_master(master);
538
524 return 0; 539 return 0;
525} 540}
526 541
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index 637cce2b8bdd..18c9bb2b5f39 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -425,7 +425,7 @@ exit:
425 425
426static int txx9spi_remove(struct platform_device *dev) 426static int txx9spi_remove(struct platform_device *dev)
427{ 427{
428 struct spi_master *master = spi_master_get(platform_get_drvdata(dev)); 428 struct spi_master *master = platform_get_drvdata(dev);
429 struct txx9spi *c = spi_master_get_devdata(master); 429 struct txx9spi *c = spi_master_get_devdata(master);
430 430
431 destroy_workqueue(c->workqueue); 431 destroy_workqueue(c->workqueue);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 9f26797e4319..63613a96233c 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -370,6 +370,17 @@ static void spi_dev_set_name(struct spi_device *spi)
370 spi->chip_select); 370 spi->chip_select);
371} 371}
372 372
373static int spi_dev_check(struct device *dev, void *data)
374{
375 struct spi_device *spi = to_spi_device(dev);
376 struct spi_device *new_spi = data;
377
378 if (spi->master == new_spi->master &&
379 spi->chip_select == new_spi->chip_select)
380 return -EBUSY;
381 return 0;
382}
383
373/** 384/**
374 * spi_add_device - Add spi_device allocated with spi_alloc_device 385 * spi_add_device - Add spi_device allocated with spi_alloc_device
375 * @spi: spi_device to register 386 * @spi: spi_device to register
@@ -384,7 +395,6 @@ int spi_add_device(struct spi_device *spi)
384 static DEFINE_MUTEX(spi_add_lock); 395 static DEFINE_MUTEX(spi_add_lock);
385 struct spi_master *master = spi->master; 396 struct spi_master *master = spi->master;
386 struct device *dev = master->dev.parent; 397 struct device *dev = master->dev.parent;
387 struct device *d;
388 int status; 398 int status;
389 399
390 /* Chipselects are numbered 0..max; validate. */ 400 /* Chipselects are numbered 0..max; validate. */
@@ -404,12 +414,10 @@ int spi_add_device(struct spi_device *spi)
404 */ 414 */
405 mutex_lock(&spi_add_lock); 415 mutex_lock(&spi_add_lock);
406 416
407 d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev)); 417 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
408 if (d != NULL) { 418 if (status) {
409 dev_err(dev, "chipselect %d already in use\n", 419 dev_err(dev, "chipselect %d already in use\n",
410 spi->chip_select); 420 spi->chip_select);
411 put_device(d);
412 status = -EBUSY;
413 goto done; 421 goto done;
414 } 422 }
415 423
@@ -591,8 +599,10 @@ static int spi_transfer_one_message(struct spi_master *master,
591 goto out; 599 goto out;
592 } 600 }
593 601
594 if (ret > 0) 602 if (ret > 0) {
603 ret = 0;
595 wait_for_completion(&master->xfer_completion); 604 wait_for_completion(&master->xfer_completion);
605 }
596 606
597 trace_spi_transfer_stop(msg, xfer); 607 trace_spi_transfer_stop(msg, xfer);
598 608
@@ -632,7 +642,7 @@ out:
632 * 642 *
633 * Called by SPI drivers using the core transfer_one_message() 643 * Called by SPI drivers using the core transfer_one_message()
634 * implementation to notify it that the current interrupt driven 644 * implementation to notify it that the current interrupt driven
635 * transfer has finised and the next one may be scheduled. 645 * transfer has finished and the next one may be scheduled.
636 */ 646 */
637void spi_finalize_current_transfer(struct spi_master *master) 647void spi_finalize_current_transfer(struct spi_master *master)
638{ 648{
@@ -735,7 +745,9 @@ static void spi_pump_messages(struct kthread_work *work)
735 ret = master->transfer_one_message(master, master->cur_msg); 745 ret = master->transfer_one_message(master, master->cur_msg);
736 if (ret) { 746 if (ret) {
737 dev_err(&master->dev, 747 dev_err(&master->dev,
738 "failed to transfer one message from queue\n"); 748 "failed to transfer one message from queue: %d\n", ret);
749 master->cur_msg->status = ret;
750 spi_finalize_current_message(master);
739 return; 751 return;
740 } 752 }
741} 753}
@@ -1412,7 +1424,7 @@ int devm_spi_register_master(struct device *dev, struct spi_master *master)
1412 return -ENOMEM; 1424 return -ENOMEM;
1413 1425
1414 ret = spi_register_master(master); 1426 ret = spi_register_master(master);
1415 if (ret != 0) { 1427 if (!ret) {
1416 *ptr = master; 1428 *ptr = master;
1417 devres_add(dev, ptr); 1429 devres_add(dev, ptr);
1418 } else { 1430 } else {
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
index 53fee2f9a498..8dfdd2732bdc 100644
--- a/drivers/staging/bcm/Bcmnet.c
+++ b/drivers/staging/bcm/Bcmnet.c
@@ -39,7 +39,8 @@ static INT bcm_close(struct net_device *dev)
39 return 0; 39 return 0;
40} 40}
41 41
42static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb) 42static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb,
43 void *accel_priv)
43{ 44{
44 return ClassifyPacket(netdev_priv(dev), skb); 45 return ClassifyPacket(netdev_priv(dev), skb);
45} 46}
diff --git a/drivers/staging/btmtk_usb/btmtk_usb.c b/drivers/staging/btmtk_usb/btmtk_usb.c
index 7a9bf3b57810..9a5ebd6cc512 100644
--- a/drivers/staging/btmtk_usb/btmtk_usb.c
+++ b/drivers/staging/btmtk_usb/btmtk_usb.c
@@ -1284,9 +1284,8 @@ done:
1284 kfree_skb(skb); 1284 kfree_skb(skb);
1285} 1285}
1286 1286
1287static int btmtk_usb_send_frame(struct sk_buff *skb) 1287static int btmtk_usb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1288{ 1288{
1289 struct hci_dev *hdev = (struct hci_dev *)skb->dev;
1290 struct btmtk_usb_data *data = hci_get_drvdata(hdev); 1289 struct btmtk_usb_data *data = hci_get_drvdata(hdev);
1291 struct usb_ctrlrequest *dr; 1290 struct usb_ctrlrequest *dr;
1292 struct urb *urb; 1291 struct urb *urb;
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 8f02bf66e20b..4964d2a2fc7d 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -446,7 +446,7 @@ int comedi_load_firmware(struct comedi_device *dev,
446 release_firmware(fw); 446 release_firmware(fw);
447 } 447 }
448 448
449 return ret; 449 return ret < 0 ? ret : 0;
450} 450}
451EXPORT_SYMBOL_GPL(comedi_load_firmware); 451EXPORT_SYMBOL_GPL(comedi_load_firmware);
452 452
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
index 432e3f9c3301..c55f234b29e6 100644
--- a/drivers/staging/comedi/drivers/8255_pci.c
+++ b/drivers/staging/comedi/drivers/8255_pci.c
@@ -63,7 +63,8 @@ enum pci_8255_boardid {
63 BOARD_ADLINK_PCI7296, 63 BOARD_ADLINK_PCI7296,
64 BOARD_CB_PCIDIO24, 64 BOARD_CB_PCIDIO24,
65 BOARD_CB_PCIDIO24H, 65 BOARD_CB_PCIDIO24H,
66 BOARD_CB_PCIDIO48H, 66 BOARD_CB_PCIDIO48H_OLD,
67 BOARD_CB_PCIDIO48H_NEW,
67 BOARD_CB_PCIDIO96H, 68 BOARD_CB_PCIDIO96H,
68 BOARD_NI_PCIDIO96, 69 BOARD_NI_PCIDIO96,
69 BOARD_NI_PCIDIO96B, 70 BOARD_NI_PCIDIO96B,
@@ -106,11 +107,16 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = {
106 .dio_badr = 2, 107 .dio_badr = 2,
107 .n_8255 = 1, 108 .n_8255 = 1,
108 }, 109 },
109 [BOARD_CB_PCIDIO48H] = { 110 [BOARD_CB_PCIDIO48H_OLD] = {
110 .name = "cb_pci-dio48h", 111 .name = "cb_pci-dio48h",
111 .dio_badr = 1, 112 .dio_badr = 1,
112 .n_8255 = 2, 113 .n_8255 = 2,
113 }, 114 },
115 [BOARD_CB_PCIDIO48H_NEW] = {
116 .name = "cb_pci-dio48h",
117 .dio_badr = 2,
118 .n_8255 = 2,
119 },
114 [BOARD_CB_PCIDIO96H] = { 120 [BOARD_CB_PCIDIO96H] = {
115 .name = "cb_pci-dio96h", 121 .name = "cb_pci-dio96h",
116 .dio_badr = 2, 122 .dio_badr = 2,
@@ -263,7 +269,10 @@ static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = {
263 { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 }, 269 { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 },
264 { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 }, 270 { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 },
265 { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H }, 271 { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H },
266 { PCI_VDEVICE(CB, 0x000b), BOARD_CB_PCIDIO48H }, 272 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000),
273 .driver_data = BOARD_CB_PCIDIO48H_OLD },
274 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b),
275 .driver_data = BOARD_CB_PCIDIO48H_NEW },
267 { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H }, 276 { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H },
268 { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 }, 277 { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 },
269 { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B }, 278 { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B },
diff --git a/drivers/staging/comedi/drivers/pcl730.c b/drivers/staging/comedi/drivers/pcl730.c
index d041b714db29..2baaf1db6fbf 100644
--- a/drivers/staging/comedi/drivers/pcl730.c
+++ b/drivers/staging/comedi/drivers/pcl730.c
@@ -173,11 +173,11 @@ static int pcl730_do_insn_bits(struct comedi_device *dev,
173 if (mask) { 173 if (mask) {
174 if (mask & 0x00ff) 174 if (mask & 0x00ff)
175 outb(s->state & 0xff, dev->iobase + reg); 175 outb(s->state & 0xff, dev->iobase + reg);
176 if ((mask & 0xff00) & (s->n_chan > 8)) 176 if ((mask & 0xff00) && (s->n_chan > 8))
177 outb((s->state >> 8) & 0xff, dev->iobase + reg + 1); 177 outb((s->state >> 8) & 0xff, dev->iobase + reg + 1);
178 if ((mask & 0xff0000) & (s->n_chan > 16)) 178 if ((mask & 0xff0000) && (s->n_chan > 16))
179 outb((s->state >> 16) & 0xff, dev->iobase + reg + 2); 179 outb((s->state >> 16) & 0xff, dev->iobase + reg + 2);
180 if ((mask & 0xff000000) & (s->n_chan > 24)) 180 if ((mask & 0xff000000) && (s->n_chan > 24))
181 outb((s->state >> 24) & 0xff, dev->iobase + reg + 3); 181 outb((s->state >> 24) & 0xff, dev->iobase + reg + 3);
182 } 182 }
183 183
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 6815cfe2664e..b486099b543d 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -494,7 +494,7 @@ static void s626_send_dac(struct comedi_device *dev, uint32_t val)
494 * Private helper function: Write setpoint to an application DAC channel. 494 * Private helper function: Write setpoint to an application DAC channel.
495 */ 495 */
496static void s626_set_dac(struct comedi_device *dev, uint16_t chan, 496static void s626_set_dac(struct comedi_device *dev, uint16_t chan,
497 unsigned short dacdata) 497 int16_t dacdata)
498{ 498{
499 struct s626_private *devpriv = dev->private; 499 struct s626_private *devpriv = dev->private;
500 uint16_t signmask; 500 uint16_t signmask;
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index 933b01a0f03d..0adf3cffddb0 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -465,7 +465,7 @@ static int vmk80xx_do_insn_bits(struct comedi_device *dev,
465 unsigned char *rx_buf = devpriv->usb_rx_buf; 465 unsigned char *rx_buf = devpriv->usb_rx_buf;
466 unsigned char *tx_buf = devpriv->usb_tx_buf; 466 unsigned char *tx_buf = devpriv->usb_tx_buf;
467 int reg, cmd; 467 int reg, cmd;
468 int ret; 468 int ret = 0;
469 469
470 if (devpriv->model == VMK8061_MODEL) { 470 if (devpriv->model == VMK8061_MODEL) {
471 reg = VMK8061_DO_REG; 471 reg = VMK8061_DO_REG;
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
index 68ded17c0f5c..12f333fa59b5 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
@@ -578,7 +578,7 @@ static int request_code_segment(struct ft1000_usb *ft1000dev, u16 **s_file,
578 u8 **c_file, const u8 *endpoint, bool boot_case) 578 u8 **c_file, const u8 *endpoint, bool boot_case)
579{ 579{
580 long word_length; 580 long word_length;
581 int status; 581 int status = 0;
582 582
583 /*DEBUG("FT1000:REQUEST_CODE_SEGMENT\n");i*/ 583 /*DEBUG("FT1000:REQUEST_CODE_SEGMENT\n");i*/
584 word_length = get_request_value(ft1000dev); 584 word_length = get_request_value(ft1000dev);
@@ -1074,4 +1074,3 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
1074 1074
1075 return status; 1075 return status;
1076} 1076}
1077
diff --git a/drivers/staging/iio/magnetometer/Kconfig b/drivers/staging/iio/magnetometer/Kconfig
index a3ea69e9d800..34634da1f9f7 100644
--- a/drivers/staging/iio/magnetometer/Kconfig
+++ b/drivers/staging/iio/magnetometer/Kconfig
@@ -6,6 +6,8 @@ menu "Magnetometer sensors"
6config SENSORS_HMC5843 6config SENSORS_HMC5843
7 tristate "Honeywell HMC5843/5883/5883L 3-Axis Magnetometer" 7 tristate "Honeywell HMC5843/5883/5883L 3-Axis Magnetometer"
8 depends on I2C 8 depends on I2C
9 select IIO_BUFFER
10 select IIO_TRIGGERED_BUFFER
9 help 11 help
10 Say Y here to add support for the Honeywell HMC5843, HMC5883 and 12 Say Y here to add support for the Honeywell HMC5843, HMC5883 and
11 HMC5883L 3-Axis Magnetometer (digital compass). 13 HMC5883L 3-Axis Magnetometer (digital compass).
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index 99421f90d189..0485d7f39867 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -451,7 +451,12 @@ done:
451 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ 451 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
452 BIT(IIO_CHAN_INFO_SAMP_FREQ), \ 452 BIT(IIO_CHAN_INFO_SAMP_FREQ), \
453 .scan_index = idx, \ 453 .scan_index = idx, \
454 .scan_type = IIO_ST('s', 16, 16, IIO_BE), \ 454 .scan_type = { \
455 .sign = 's', \
456 .realbits = 16, \
457 .storagebits = 16, \
458 .endianness = IIO_BE, \
459 }, \
455 } 460 }
456 461
457static const struct iio_chan_spec hmc5843_channels[] = { 462static const struct iio_chan_spec hmc5843_channels[] = {
diff --git a/drivers/staging/imx-drm/Makefile b/drivers/staging/imx-drm/Makefile
index 2c3a9e178fb5..8742432d7b01 100644
--- a/drivers/staging/imx-drm/Makefile
+++ b/drivers/staging/imx-drm/Makefile
@@ -8,4 +8,6 @@ obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
8obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o 8obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
9obj-$(CONFIG_DRM_IMX_FB_HELPER) += imx-fbdev.o 9obj-$(CONFIG_DRM_IMX_FB_HELPER) += imx-fbdev.o
10obj-$(CONFIG_DRM_IMX_IPUV3_CORE) += ipu-v3/ 10obj-$(CONFIG_DRM_IMX_IPUV3_CORE) += ipu-v3/
11obj-$(CONFIG_DRM_IMX_IPUV3) += ipuv3-crtc.o ipuv3-plane.o 11
12imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o
13obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 51aa9772f959..96e4eee344ef 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -72,6 +72,7 @@ int imx_drm_crtc_id(struct imx_drm_crtc *crtc)
72{ 72{
73 return crtc->pipe; 73 return crtc->pipe;
74} 74}
75EXPORT_SYMBOL_GPL(imx_drm_crtc_id);
75 76
76static void imx_drm_driver_lastclose(struct drm_device *drm) 77static void imx_drm_driver_lastclose(struct drm_device *drm)
77{ 78{
@@ -87,8 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm)
87 88
88 imx_drm_device_put(); 89 imx_drm_device_put();
89 90
90 drm_mode_config_cleanup(imxdrm->drm); 91 drm_vblank_cleanup(imxdrm->drm);
91 drm_kms_helper_poll_fini(imxdrm->drm); 92 drm_kms_helper_poll_fini(imxdrm->drm);
93 drm_mode_config_cleanup(imxdrm->drm);
92 94
93 return 0; 95 return 0;
94} 96}
@@ -198,8 +200,8 @@ static void imx_drm_driver_preclose(struct drm_device *drm,
198 if (!file->is_master) 200 if (!file->is_master)
199 return; 201 return;
200 202
201 for (i = 0; i < 4; i++) 203 for (i = 0; i < MAX_CRTC; i++)
202 imx_drm_disable_vblank(drm , i); 204 imx_drm_disable_vblank(drm, i);
203} 205}
204 206
205static const struct file_operations imx_drm_driver_fops = { 207static const struct file_operations imx_drm_driver_fops = {
@@ -375,8 +377,6 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
375 struct imx_drm_device *imxdrm = __imx_drm_device(); 377 struct imx_drm_device *imxdrm = __imx_drm_device();
376 int ret; 378 int ret;
377 379
378 drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
379 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
380 ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256); 380 ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
381 if (ret) 381 if (ret)
382 return ret; 382 return ret;
@@ -384,6 +384,9 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
384 drm_crtc_helper_add(imx_drm_crtc->crtc, 384 drm_crtc_helper_add(imx_drm_crtc->crtc,
385 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); 385 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
386 386
387 drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
388 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
389
387 drm_mode_group_reinit(imxdrm->drm); 390 drm_mode_group_reinit(imxdrm->drm);
388 391
389 return 0; 392 return 0;
@@ -427,11 +430,11 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
427 ret = drm_mode_group_init_legacy_group(imxdrm->drm, 430 ret = drm_mode_group_init_legacy_group(imxdrm->drm,
428 &imxdrm->drm->primary->mode_group); 431 &imxdrm->drm->primary->mode_group);
429 if (ret) 432 if (ret)
430 goto err_init; 433 goto err_kms;
431 434
432 ret = drm_vblank_init(imxdrm->drm, MAX_CRTC); 435 ret = drm_vblank_init(imxdrm->drm, MAX_CRTC);
433 if (ret) 436 if (ret)
434 goto err_init; 437 goto err_kms;
435 438
436 /* 439 /*
437 * with vblank_disable_allowed = true, vblank interrupt will be disabled 440 * with vblank_disable_allowed = true, vblank interrupt will be disabled
@@ -440,12 +443,19 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
440 */ 443 */
441 imxdrm->drm->vblank_disable_allowed = true; 444 imxdrm->drm->vblank_disable_allowed = true;
442 445
443 if (!imx_drm_device_get()) 446 if (!imx_drm_device_get()) {
444 ret = -EINVAL; 447 ret = -EINVAL;
448 goto err_vblank;
449 }
445 450
446 ret = 0; 451 mutex_unlock(&imxdrm->mutex);
452 return 0;
447 453
448err_init: 454err_vblank:
455 drm_vblank_cleanup(drm);
456err_kms:
457 drm_kms_helper_poll_fini(drm);
458 drm_mode_config_cleanup(drm);
449 mutex_unlock(&imxdrm->mutex); 459 mutex_unlock(&imxdrm->mutex);
450 460
451 return ret; 461 return ret;
@@ -491,6 +501,15 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
491 501
492 mutex_lock(&imxdrm->mutex); 502 mutex_lock(&imxdrm->mutex);
493 503
504 /*
505 * The vblank arrays are dimensioned by MAX_CRTC - we can't
506 * pass IDs greater than this to those functions.
507 */
508 if (imxdrm->pipes >= MAX_CRTC) {
509 ret = -EINVAL;
510 goto err_busy;
511 }
512
494 if (imxdrm->drm->open_count) { 513 if (imxdrm->drm->open_count) {
495 ret = -EBUSY; 514 ret = -EBUSY;
496 goto err_busy; 515 goto err_busy;
@@ -527,6 +546,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
527 return 0; 546 return 0;
528 547
529err_register: 548err_register:
549 list_del(&imx_drm_crtc->list);
530 kfree(imx_drm_crtc); 550 kfree(imx_drm_crtc);
531err_alloc: 551err_alloc:
532err_busy: 552err_busy:
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index 680f4c8fa081..2c44fef8d58b 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -114,7 +114,6 @@ struct imx_tve {
114 struct drm_encoder encoder; 114 struct drm_encoder encoder;
115 struct imx_drm_encoder *imx_drm_encoder; 115 struct imx_drm_encoder *imx_drm_encoder;
116 struct device *dev; 116 struct device *dev;
117 spinlock_t enable_lock; /* serializes tve_enable/disable */
118 spinlock_t lock; /* register lock */ 117 spinlock_t lock; /* register lock */
119 bool enabled; 118 bool enabled;
120 int mode; 119 int mode;
@@ -146,10 +145,8 @@ __releases(&tve->lock)
146 145
147static void tve_enable(struct imx_tve *tve) 146static void tve_enable(struct imx_tve *tve)
148{ 147{
149 unsigned long flags;
150 int ret; 148 int ret;
151 149
152 spin_lock_irqsave(&tve->enable_lock, flags);
153 if (!tve->enabled) { 150 if (!tve->enabled) {
154 tve->enabled = true; 151 tve->enabled = true;
155 clk_prepare_enable(tve->clk); 152 clk_prepare_enable(tve->clk);
@@ -169,23 +166,18 @@ static void tve_enable(struct imx_tve *tve)
169 TVE_CD_SM_IEN | 166 TVE_CD_SM_IEN |
170 TVE_CD_LM_IEN | 167 TVE_CD_LM_IEN |
171 TVE_CD_MON_END_IEN); 168 TVE_CD_MON_END_IEN);
172
173 spin_unlock_irqrestore(&tve->enable_lock, flags);
174} 169}
175 170
176static void tve_disable(struct imx_tve *tve) 171static void tve_disable(struct imx_tve *tve)
177{ 172{
178 unsigned long flags;
179 int ret; 173 int ret;
180 174
181 spin_lock_irqsave(&tve->enable_lock, flags);
182 if (tve->enabled) { 175 if (tve->enabled) {
183 tve->enabled = false; 176 tve->enabled = false;
184 ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, 177 ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
185 TVE_IPU_CLK_EN | TVE_EN, 0); 178 TVE_IPU_CLK_EN | TVE_EN, 0);
186 clk_disable_unprepare(tve->clk); 179 clk_disable_unprepare(tve->clk);
187 } 180 }
188 spin_unlock_irqrestore(&tve->enable_lock, flags);
189} 181}
190 182
191static int tve_setup_tvout(struct imx_tve *tve) 183static int tve_setup_tvout(struct imx_tve *tve)
@@ -601,7 +593,6 @@ static int imx_tve_probe(struct platform_device *pdev)
601 593
602 tve->dev = &pdev->dev; 594 tve->dev = &pdev->dev;
603 spin_lock_init(&tve->lock); 595 spin_lock_init(&tve->lock);
604 spin_lock_init(&tve->enable_lock);
605 596
606 ddc_node = of_parse_phandle(np, "ddc", 0); 597 ddc_node = of_parse_phandle(np, "ddc", 0);
607 if (ddc_node) { 598 if (ddc_node) {
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
index 7a22ce619ed2..97ca6924dbb3 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
@@ -996,35 +996,35 @@ static const struct ipu_platform_reg client_reg[] = {
996 }, 996 },
997}; 997};
998 998
999static DEFINE_MUTEX(ipu_client_id_mutex);
999static int ipu_client_id; 1000static int ipu_client_id;
1000 1001
1001static int ipu_add_subdevice_pdata(struct device *dev,
1002 const struct ipu_platform_reg *reg)
1003{
1004 struct platform_device *pdev;
1005
1006 pdev = platform_device_register_data(dev, reg->name, ipu_client_id++,
1007 &reg->pdata, sizeof(struct ipu_platform_reg));
1008
1009 return PTR_ERR_OR_ZERO(pdev);
1010}
1011
1012static int ipu_add_client_devices(struct ipu_soc *ipu) 1002static int ipu_add_client_devices(struct ipu_soc *ipu)
1013{ 1003{
1014 int ret; 1004 struct device *dev = ipu->dev;
1015 int i; 1005 unsigned i;
1006 int id, ret;
1007
1008 mutex_lock(&ipu_client_id_mutex);
1009 id = ipu_client_id;
1010 ipu_client_id += ARRAY_SIZE(client_reg);
1011 mutex_unlock(&ipu_client_id_mutex);
1016 1012
1017 for (i = 0; i < ARRAY_SIZE(client_reg); i++) { 1013 for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
1018 const struct ipu_platform_reg *reg = &client_reg[i]; 1014 const struct ipu_platform_reg *reg = &client_reg[i];
1019 ret = ipu_add_subdevice_pdata(ipu->dev, reg); 1015 struct platform_device *pdev;
1020 if (ret) 1016
1017 pdev = platform_device_register_data(dev, reg->name,
1018 id++, &reg->pdata, sizeof(reg->pdata));
1019
1020 if (IS_ERR(pdev))
1021 goto err_register; 1021 goto err_register;
1022 } 1022 }
1023 1023
1024 return 0; 1024 return 0;
1025 1025
1026err_register: 1026err_register:
1027 platform_device_unregister_children(to_platform_device(ipu->dev)); 1027 platform_device_unregister_children(to_platform_device(dev));
1028 1028
1029 return ret; 1029 return ret;
1030} 1030}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index 5dec771d70ee..4d340f4a2198 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -409,8 +409,8 @@ int ptlrpc_stop_pinger(void)
409 struct l_wait_info lwi = { 0 }; 409 struct l_wait_info lwi = { 0 };
410 int rc = 0; 410 int rc = 0;
411 411
412 if (!thread_is_init(&pinger_thread) && 412 if (thread_is_init(&pinger_thread) ||
413 !thread_is_stopped(&pinger_thread)) 413 thread_is_stopped(&pinger_thread))
414 return -EALREADY; 414 return -EALREADY;
415 415
416 ptlrpc_pinger_remove_timeouts(); 416 ptlrpc_pinger_remove_timeouts();
diff --git a/drivers/staging/media/go7007/go7007-usb.c b/drivers/staging/media/go7007/go7007-usb.c
index 58684da45e6c..b658c2316df3 100644
--- a/drivers/staging/media/go7007/go7007-usb.c
+++ b/drivers/staging/media/go7007/go7007-usb.c
@@ -15,6 +15,8 @@
15 * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 15 * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
16 */ 16 */
17 17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
18#include <linux/module.h> 20#include <linux/module.h>
19#include <linux/kernel.h> 21#include <linux/kernel.h>
20#include <linux/init.h> 22#include <linux/init.h>
@@ -661,7 +663,7 @@ static int go7007_usb_interface_reset(struct go7007 *go)
661 663
662 if (usb->board->flags & GO7007_USB_EZUSB) { 664 if (usb->board->flags & GO7007_USB_EZUSB) {
663 /* Reset buffer in EZ-USB */ 665 /* Reset buffer in EZ-USB */
664 dev_dbg(go->dev, "resetting EZ-USB buffers\n"); 666 pr_debug("resetting EZ-USB buffers\n");
665 if (go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0 || 667 if (go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0 ||
666 go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0) 668 go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0)
667 return -1; 669 return -1;
@@ -689,7 +691,7 @@ static int go7007_usb_ezusb_write_interrupt(struct go7007 *go,
689 u16 status_reg = 0; 691 u16 status_reg = 0;
690 int timeout = 500; 692 int timeout = 500;
691 693
692 dev_dbg(go->dev, "WriteInterrupt: %04x %04x\n", addr, data); 694 pr_debug("WriteInterrupt: %04x %04x\n", addr, data);
693 695
694 for (i = 0; i < 100; ++i) { 696 for (i = 0; i < 100; ++i) {
695 r = usb_control_msg(usb->usbdev, 697 r = usb_control_msg(usb->usbdev,
@@ -734,7 +736,7 @@ static int go7007_usb_onboard_write_interrupt(struct go7007 *go,
734 int r; 736 int r;
735 int timeout = 500; 737 int timeout = 500;
736 738
737 dev_dbg(go->dev, "WriteInterrupt: %04x %04x\n", addr, data); 739 pr_debug("WriteInterrupt: %04x %04x\n", addr, data);
738 740
739 go->usb_buf[0] = data & 0xff; 741 go->usb_buf[0] = data & 0xff;
740 go->usb_buf[1] = data >> 8; 742 go->usb_buf[1] = data >> 8;
@@ -771,7 +773,7 @@ static void go7007_usb_readinterrupt_complete(struct urb *urb)
771 go->interrupt_available = 1; 773 go->interrupt_available = 1;
772 go->interrupt_data = __le16_to_cpu(regs[0]); 774 go->interrupt_data = __le16_to_cpu(regs[0]);
773 go->interrupt_value = __le16_to_cpu(regs[1]); 775 go->interrupt_value = __le16_to_cpu(regs[1]);
774 dev_dbg(go->dev, "ReadInterrupt: %04x %04x\n", 776 pr_debug("ReadInterrupt: %04x %04x\n",
775 go->interrupt_value, go->interrupt_data); 777 go->interrupt_value, go->interrupt_data);
776 } 778 }
777 779
@@ -891,7 +893,7 @@ static int go7007_usb_send_firmware(struct go7007 *go, u8 *data, int len)
891 int transferred, pipe; 893 int transferred, pipe;
892 int timeout = 500; 894 int timeout = 500;
893 895
894 dev_dbg(go->dev, "DownloadBuffer sending %d bytes\n", len); 896 pr_debug("DownloadBuffer sending %d bytes\n", len);
895 897
896 if (usb->board->flags & GO7007_USB_EZUSB) 898 if (usb->board->flags & GO7007_USB_EZUSB)
897 pipe = usb_sndbulkpipe(usb->usbdev, 2); 899 pipe = usb_sndbulkpipe(usb->usbdev, 2);
@@ -977,7 +979,7 @@ static int go7007_usb_i2c_master_xfer(struct i2c_adapter *adapter,
977 !(msgs[i].flags & I2C_M_RD) && 979 !(msgs[i].flags & I2C_M_RD) &&
978 (msgs[i + 1].flags & I2C_M_RD)) { 980 (msgs[i + 1].flags & I2C_M_RD)) {
979#ifdef GO7007_I2C_DEBUG 981#ifdef GO7007_I2C_DEBUG
980 dev_dbg(go->dev, "i2c write/read %d/%d bytes on %02x\n", 982 pr_debug("i2c write/read %d/%d bytes on %02x\n",
981 msgs[i].len, msgs[i + 1].len, msgs[i].addr); 983 msgs[i].len, msgs[i + 1].len, msgs[i].addr);
982#endif 984#endif
983 buf[0] = 0x01; 985 buf[0] = 0x01;
@@ -988,7 +990,7 @@ static int go7007_usb_i2c_master_xfer(struct i2c_adapter *adapter,
988 buf[buf_len++] = msgs[++i].len; 990 buf[buf_len++] = msgs[++i].len;
989 } else if (msgs[i].flags & I2C_M_RD) { 991 } else if (msgs[i].flags & I2C_M_RD) {
990#ifdef GO7007_I2C_DEBUG 992#ifdef GO7007_I2C_DEBUG
991 dev_dbg(go->dev, "i2c read %d bytes on %02x\n", 993 pr_debug("i2c read %d bytes on %02x\n",
992 msgs[i].len, msgs[i].addr); 994 msgs[i].len, msgs[i].addr);
993#endif 995#endif
994 buf[0] = 0x01; 996 buf[0] = 0x01;
@@ -998,7 +1000,7 @@ static int go7007_usb_i2c_master_xfer(struct i2c_adapter *adapter,
998 buf_len = 4; 1000 buf_len = 4;
999 } else { 1001 } else {
1000#ifdef GO7007_I2C_DEBUG 1002#ifdef GO7007_I2C_DEBUG
1001 dev_dbg(go->dev, "i2c write %d bytes on %02x\n", 1003 pr_debug("i2c write %d bytes on %02x\n",
1002 msgs[i].len, msgs[i].addr); 1004 msgs[i].len, msgs[i].addr);
1003#endif 1005#endif
1004 buf[0] = 0x00; 1006 buf[0] = 0x00;
@@ -1057,7 +1059,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
1057 char *name; 1059 char *name;
1058 int video_pipe, i, v_urb_len; 1060 int video_pipe, i, v_urb_len;
1059 1061
1060 dev_dbg(go->dev, "probing new GO7007 USB board\n"); 1062 pr_debug("probing new GO7007 USB board\n");
1061 1063
1062 switch (id->driver_info) { 1064 switch (id->driver_info) {
1063 case GO7007_BOARDID_MATRIX_II: 1065 case GO7007_BOARDID_MATRIX_II:
@@ -1097,13 +1099,13 @@ static int go7007_usb_probe(struct usb_interface *intf,
1097 board = &board_px_tv402u; 1099 board = &board_px_tv402u;
1098 break; 1100 break;
1099 case GO7007_BOARDID_LIFEVIEW_LR192: 1101 case GO7007_BOARDID_LIFEVIEW_LR192:
1100 dev_err(go->dev, "The Lifeview TV Walker Ultra is not supported. Sorry!\n"); 1102 dev_err(&intf->dev, "The Lifeview TV Walker Ultra is not supported. Sorry!\n");
1101 return -ENODEV; 1103 return -ENODEV;
1102 name = "Lifeview TV Walker Ultra"; 1104 name = "Lifeview TV Walker Ultra";
1103 board = &board_lifeview_lr192; 1105 board = &board_lifeview_lr192;
1104 break; 1106 break;
1105 case GO7007_BOARDID_SENSORAY_2250: 1107 case GO7007_BOARDID_SENSORAY_2250:
1106 dev_info(go->dev, "Sensoray 2250 found\n"); 1108 dev_info(&intf->dev, "Sensoray 2250 found\n");
1107 name = "Sensoray 2250/2251"; 1109 name = "Sensoray 2250/2251";
1108 board = &board_sensoray_2250; 1110 board = &board_sensoray_2250;
1109 break; 1111 break;
@@ -1112,7 +1114,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
1112 board = &board_ads_usbav_709; 1114 board = &board_ads_usbav_709;
1113 break; 1115 break;
1114 default: 1116 default:
1115 dev_err(go->dev, "unknown board ID %d!\n", 1117 dev_err(&intf->dev, "unknown board ID %d!\n",
1116 (unsigned int)id->driver_info); 1118 (unsigned int)id->driver_info);
1117 return -ENODEV; 1119 return -ENODEV;
1118 } 1120 }
@@ -1247,7 +1249,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
1247 sizeof(go->name)); 1249 sizeof(go->name));
1248 break; 1250 break;
1249 default: 1251 default:
1250 dev_dbg(go->dev, "unable to detect tuner type!\n"); 1252 pr_debug("unable to detect tuner type!\n");
1251 break; 1253 break;
1252 } 1254 }
1253 /* Configure tuner mode selection inputs connected 1255 /* Configure tuner mode selection inputs connected
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 235d2b1ec593..eedffed17e39 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -306,7 +306,8 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
306 return NETDEV_TX_OK; 306 return NETDEV_TX_OK;
307} 307}
308 308
309static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb) 309static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
310 void *accel_priv)
310{ 311{
311 return (u16)smp_processor_id(); 312 return (u16)smp_processor_id();
312} 313}
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 3066ee2e753b..49ea76b3435d 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -681,7 +681,8 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
681 dev_err(nvec->dev, 681 dev_err(nvec->dev,
682 "RX buffer overflow on %p: " 682 "RX buffer overflow on %p: "
683 "Trying to write byte %u of %u\n", 683 "Trying to write byte %u of %u\n",
684 nvec->rx, nvec->rx->pos, NVEC_MSG_SIZE); 684 nvec->rx, nvec->rx ? nvec->rx->pos : 0,
685 NVEC_MSG_SIZE);
685 break; 686 break;
686 default: 687 default:
687 nvec->state = 0; 688 nvec->state = 0;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 2c678f409573..2f548ebada59 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -1115,6 +1115,9 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
1115 return _FAIL; 1115 return _FAIL;
1116 } 1116 }
1117 1117
1118 /* fix bug of flush_cam_entry at STOP AP mode */
1119 psta->state |= WIFI_AP_STATE;
1120 rtw_indicate_connect(padapter);
1118 pmlmepriv->cur_network.join_res = true;/* for check if already set beacon */ 1121 pmlmepriv->cur_network.join_res = true;/* for check if already set beacon */
1119 return ret; 1122 return ret;
1120} 1123}
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 17659bb04bef..dd69e344e409 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -652,7 +652,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
652 return dscp >> 5; 652 return dscp >> 5;
653} 653}
654 654
655static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb) 655static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
656 void *accel_priv)
656{ 657{
657 struct adapter *padapter = rtw_netdev_priv(dev); 658 struct adapter *padapter = rtw_netdev_priv(dev);
658 struct mlme_priv *pmlmepriv = &padapter->mlmepriv; 659 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index 165b918b8171..1b6d581c438b 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig TIDSPBRIDGE 5menuconfig TIDSPBRIDGE
6 tristate "DSP Bridge driver" 6 tristate "DSP Bridge driver"
7 depends on ARCH_OMAP3 && !ARCH_MULTIPLATFORM 7 depends on ARCH_OMAP3 && !ARCH_MULTIPLATFORM && BROKEN
8 select MAILBOX 8 select MAILBOX
9 select OMAP2PLUS_MBOX 9 select OMAP2PLUS_MBOX
10 help 10 help
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 1aa4a3fd0f1b..56e355b3e7fa 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -258,7 +258,8 @@ err:
258/* This function maps kernel space memory to user space memory. */ 258/* This function maps kernel space memory to user space memory. */
259static int bridge_mmap(struct file *filp, struct vm_area_struct *vma) 259static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
260{ 260{
261 u32 status; 261 struct omap_dsp_platform_data *pdata =
262 omap_dspbridge_dev->dev.platform_data;
262 263
263 /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */ 264 /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
264 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 265 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -268,13 +269,9 @@ static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
268 vma->vm_start, vma->vm_end, vma->vm_page_prot, 269 vma->vm_start, vma->vm_end, vma->vm_page_prot,
269 vma->vm_flags); 270 vma->vm_flags);
270 271
271 status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 272 return vm_iomap_memory(vma,
272 vma->vm_end - vma->vm_start, 273 pdata->phys_mempool_base,
273 vma->vm_page_prot); 274 pdata->phys_mempool_size);
274 if (status != 0)
275 status = -EAGAIN;
276
277 return status;
278} 275}
279 276
280static const struct file_operations bridge_fops = { 277static const struct file_operations bridge_fops = {
diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
index aab0012bba92..ab8b2ba6eedd 100644
--- a/drivers/staging/vt6655/hostap.c
+++ b/drivers/staging/vt6655/hostap.c
@@ -143,7 +143,8 @@ static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked)
143 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", 143 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
144 pDevice->dev->name, pDevice->apdev->name); 144 pDevice->dev->name, pDevice->apdev->name);
145 } 145 }
146 free_netdev(pDevice->apdev); 146 if (pDevice->apdev)
147 free_netdev(pDevice->apdev);
147 pDevice->apdev = NULL; 148 pDevice->apdev = NULL;
148 pDevice->bEnable8021x = false; 149 pDevice->bEnable8021x = false;
149 pDevice->bEnableHostWEP = false; 150 pDevice->bEnableHostWEP = false;
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 1e8b8412e67e..4aa5ef54b683 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -939,6 +939,7 @@ int BBbVT3184Init(struct vnt_private *pDevice)
939 u8 * pbyAgc; 939 u8 * pbyAgc;
940 u16 wLengthAgc; 940 u16 wLengthAgc;
941 u8 abyArray[256]; 941 u8 abyArray[256];
942 u8 data;
942 943
943 ntStatus = CONTROLnsRequestIn(pDevice, 944 ntStatus = CONTROLnsRequestIn(pDevice,
944 MESSAGE_TYPE_READ, 945 MESSAGE_TYPE_READ,
@@ -1104,6 +1105,16 @@ else {
1104 ControlvWriteByte(pDevice,MESSAGE_REQUEST_BBREG,0x0D,0x01); 1105 ControlvWriteByte(pDevice,MESSAGE_REQUEST_BBREG,0x0D,0x01);
1105 1106
1106 RFbRFTableDownload(pDevice); 1107 RFbRFTableDownload(pDevice);
1108
1109 /* Fix for TX USB resets from vendors driver */
1110 CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ, USB_REG4,
1111 MESSAGE_REQUEST_MEM, sizeof(data), &data);
1112
1113 data |= 0x2;
1114
1115 CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE, USB_REG4,
1116 MESSAGE_REQUEST_MEM, sizeof(data), &data);
1117
1107 return true;//ntStatus; 1118 return true;//ntStatus;
1108} 1119}
1109 1120
diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
index ae1676d190c5..67ba48b9a8d9 100644
--- a/drivers/staging/vt6656/hostap.c
+++ b/drivers/staging/vt6656/hostap.c
@@ -133,7 +133,8 @@ static int hostap_disable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
133 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", 133 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
134 pDevice->dev->name, pDevice->apdev->name); 134 pDevice->dev->name, pDevice->apdev->name);
135 } 135 }
136 free_netdev(pDevice->apdev); 136 if (pDevice->apdev)
137 free_netdev(pDevice->apdev);
137 pDevice->apdev = NULL; 138 pDevice->apdev = NULL;
138 pDevice->bEnable8021x = false; 139 pDevice->bEnable8021x = false;
139 pDevice->bEnableHostWEP = false; 140 pDevice->bEnableHostWEP = false;
diff --git a/drivers/staging/vt6656/rndis.h b/drivers/staging/vt6656/rndis.h
index 5e073062017a..5cf5e732a36f 100644
--- a/drivers/staging/vt6656/rndis.h
+++ b/drivers/staging/vt6656/rndis.h
@@ -66,6 +66,8 @@
66 66
67#define VIAUSB20_PACKET_HEADER 0x04 67#define VIAUSB20_PACKET_HEADER 0x04
68 68
69#define USB_REG4 0x604
70
69typedef struct _CMD_MESSAGE 71typedef struct _CMD_MESSAGE
70{ 72{
71 u8 byData[256]; 73 u8 byData[256];
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 79ce363b2ea9..3277d9838f4e 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -652,21 +652,30 @@ static ssize_t reset_store(struct device *dev,
652 return -ENOMEM; 652 return -ENOMEM;
653 653
654 /* Do not reset an active device! */ 654 /* Do not reset an active device! */
655 if (bdev->bd_holders) 655 if (bdev->bd_holders) {
656 return -EBUSY; 656 ret = -EBUSY;
657 goto out;
658 }
657 659
658 ret = kstrtou16(buf, 10, &do_reset); 660 ret = kstrtou16(buf, 10, &do_reset);
659 if (ret) 661 if (ret)
660 return ret; 662 goto out;
661 663
662 if (!do_reset) 664 if (!do_reset) {
663 return -EINVAL; 665 ret = -EINVAL;
666 goto out;
667 }
664 668
665 /* Make sure all pending I/O is finished */ 669 /* Make sure all pending I/O is finished */
666 fsync_bdev(bdev); 670 fsync_bdev(bdev);
671 bdput(bdev);
667 672
668 zram_reset_device(zram, true); 673 zram_reset_device(zram, true);
669 return len; 674 return len;
675
676out:
677 bdput(bdev);
678 return ret;
670} 679}
671 680
672static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) 681static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
index 1a67537dbc56..3b950e5a918f 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -430,7 +430,12 @@ static struct page *get_next_page(struct page *page)
430 return next; 430 return next;
431} 431}
432 432
433/* Encode <page, obj_idx> as a single handle value */ 433/*
434 * Encode <page, obj_idx> as a single handle value.
435 * On hardware platforms with physical memory starting at 0x0 the pfn
436 * could be 0 so we ensure that the handle will never be 0 by adjusting the
437 * encoded obj_idx value before encoding.
438 */
434static void *obj_location_to_handle(struct page *page, unsigned long obj_idx) 439static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
435{ 440{
436 unsigned long handle; 441 unsigned long handle;
@@ -441,17 +446,21 @@ static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
441 } 446 }
442 447
443 handle = page_to_pfn(page) << OBJ_INDEX_BITS; 448 handle = page_to_pfn(page) << OBJ_INDEX_BITS;
444 handle |= (obj_idx & OBJ_INDEX_MASK); 449 handle |= ((obj_idx + 1) & OBJ_INDEX_MASK);
445 450
446 return (void *)handle; 451 return (void *)handle;
447} 452}
448 453
449/* Decode <page, obj_idx> pair from the given object handle */ 454/*
455 * Decode <page, obj_idx> pair from the given object handle. We adjust the
456 * decoded obj_idx back to its original value since it was adjusted in
457 * obj_location_to_handle().
458 */
450static void obj_handle_to_location(unsigned long handle, struct page **page, 459static void obj_handle_to_location(unsigned long handle, struct page **page,
451 unsigned long *obj_idx) 460 unsigned long *obj_idx)
452{ 461{
453 *page = pfn_to_page(handle >> OBJ_INDEX_BITS); 462 *page = pfn_to_page(handle >> OBJ_INDEX_BITS);
454 *obj_idx = handle & OBJ_INDEX_MASK; 463 *obj_idx = (handle & OBJ_INDEX_MASK) - 1;
455} 464}
456 465
457static unsigned long obj_idx_to_offset(struct page *page, 466static unsigned long obj_idx_to_offset(struct page *page,
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index d70e9119e906..00867190413c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -465,6 +465,7 @@ int iscsit_del_np(struct iscsi_np *np)
465 */ 465 */
466 send_sig(SIGINT, np->np_thread, 1); 466 send_sig(SIGINT, np->np_thread, 1);
467 kthread_stop(np->np_thread); 467 kthread_stop(np->np_thread);
468 np->np_thread = NULL;
468 } 469 }
469 470
470 np->np_transport->iscsit_free_np(np); 471 np->np_transport->iscsit_free_np(np);
@@ -823,24 +824,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
823 if (((hdr->flags & ISCSI_FLAG_CMD_READ) || 824 if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
824 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { 825 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
825 /* 826 /*
826 * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2) 827 * From RFC-3720 Section 10.3.1:
827 * that adds support for RESERVE/RELEASE. There is a bug 828 *
828 * add with this new functionality that sets R/W bits when 829 * "Either or both of R and W MAY be 1 when either the
829 * neither CDB carries any READ or WRITE datapayloads. 830 * Expected Data Transfer Length and/or Bidirectional Read
831 * Expected Data Transfer Length are 0"
832 *
833 * For this case, go ahead and clear the unnecssary bits
834 * to avoid any confusion with ->data_direction.
830 */ 835 */
831 if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) { 836 hdr->flags &= ~ISCSI_FLAG_CMD_READ;
832 hdr->flags &= ~ISCSI_FLAG_CMD_READ; 837 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
833 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
834 goto done;
835 }
836 838
837 pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" 839 pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
838 " set when Expected Data Transfer Length is 0 for" 840 " set when Expected Data Transfer Length is 0 for"
839 " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); 841 " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
840 return iscsit_add_reject_cmd(cmd,
841 ISCSI_REASON_BOOKMARK_INVALID, buf);
842 } 842 }
843done:
844 843
845 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && 844 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
846 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { 845 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index e3318edb233d..1c0088fe9e99 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -474,7 +474,8 @@ static ssize_t __iscsi_##prefix##_store_##name( \
474 \ 474 \
475 if (!capable(CAP_SYS_ADMIN)) \ 475 if (!capable(CAP_SYS_ADMIN)) \
476 return -EPERM; \ 476 return -EPERM; \
477 \ 477 if (count >= sizeof(auth->name)) \
478 return -EINVAL; \
478 snprintf(auth->name, sizeof(auth->name), "%s", page); \ 479 snprintf(auth->name, sizeof(auth->name), "%s", page); \
479 if (!strncmp("NULL", auth->name, 4)) \ 480 if (!strncmp("NULL", auth->name, 4)) \
480 auth->naf_flags &= ~flags; \ 481 auth->naf_flags &= ~flags; \
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 4eb93b2b6473..e29279e6b577 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1403,11 +1403,6 @@ old_sess_out:
1403 1403
1404out: 1404out:
1405 stop = kthread_should_stop(); 1405 stop = kthread_should_stop();
1406 if (!stop && signal_pending(current)) {
1407 spin_lock_bh(&np->np_thread_lock);
1408 stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
1409 spin_unlock_bh(&np->np_thread_lock);
1410 }
1411 /* Wait for another socket.. */ 1406 /* Wait for another socket.. */
1412 if (!stop) 1407 if (!stop)
1413 return 1; 1408 return 1;
@@ -1415,7 +1410,6 @@ exit:
1415 iscsi_stop_login_thread_timer(np); 1410 iscsi_stop_login_thread_timer(np);
1416 spin_lock_bh(&np->np_thread_lock); 1411 spin_lock_bh(&np->np_thread_lock);
1417 np->np_thread_state = ISCSI_NP_THREAD_EXIT; 1412 np->np_thread_state = ISCSI_NP_THREAD_EXIT;
1418 np->np_thread = NULL;
1419 spin_unlock_bh(&np->np_thread_lock); 1413 spin_unlock_bh(&np->np_thread_lock);
1420 1414
1421 return 0; 1415 return 0;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 207b340498a3..d06de84b069b 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1106,6 +1106,11 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1106 dev->dev_attrib.block_size = block_size; 1106 dev->dev_attrib.block_size = block_size;
1107 pr_debug("dev[%p]: SE Device block_size changed to %u\n", 1107 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1108 dev, block_size); 1108 dev, block_size);
1109
1110 if (dev->dev_attrib.max_bytes_per_io)
1111 dev->dev_attrib.hw_max_sectors =
1112 dev->dev_attrib.max_bytes_per_io / block_size;
1113
1109 return 0; 1114 return 0;
1110} 1115}
1111 1116
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 0e34cda3271e..78241a53b555 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
66 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" 66 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
67 " Target Core Stack %s\n", hba->hba_id, FD_VERSION, 67 " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
68 TARGET_CORE_MOD_VERSION); 68 TARGET_CORE_MOD_VERSION);
69 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" 69 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
70 " MaxSectors: %u\n", 70 hba->hba_id, fd_host->fd_host_id);
71 hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
72 71
73 return 0; 72 return 0;
74} 73}
@@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev)
220 } 219 }
221 220
222 dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; 221 dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
223 dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; 222 dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
223 dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
224 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 224 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
225 225
226 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { 226 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 37ffc5bd2399..d7772c167685 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -7,7 +7,10 @@
7#define FD_DEVICE_QUEUE_DEPTH 32 7#define FD_DEVICE_QUEUE_DEPTH 32
8#define FD_MAX_DEVICE_QUEUE_DEPTH 128 8#define FD_MAX_DEVICE_QUEUE_DEPTH 128
9#define FD_BLOCKSIZE 512 9#define FD_BLOCKSIZE 512
10#define FD_MAX_SECTORS 2048 10/*
11 * Limited by the number of iovecs (2048) per vfs_[writev,readv] call
12 */
13#define FD_MAX_BYTES 8388608
11 14
12#define RRF_EMULATE_CDB 0x01 15#define RRF_EMULATE_CDB 0x01
13#define RRF_GOT_LBA 0x02 16#define RRF_GOT_LBA 0x02
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index f697f8baec54..2a573de19a9f 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -278,7 +278,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
278 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 278 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
279 acl->se_tpg = tpg; 279 acl->se_tpg = tpg;
280 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 280 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
281 spin_lock_init(&acl->stats_lock);
282 acl->dynamic_node_acl = 1; 281 acl->dynamic_node_acl = 1;
283 282
284 tpg->se_tpg_tfo->set_default_node_attributes(acl); 283 tpg->se_tpg_tfo->set_default_node_attributes(acl);
@@ -406,7 +405,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
406 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 405 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
407 acl->se_tpg = tpg; 406 acl->se_tpg = tpg;
408 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 407 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
409 spin_lock_init(&acl->stats_lock);
410 408
411 tpg->se_tpg_tfo->set_default_node_attributes(acl); 409 tpg->se_tpg_tfo->set_default_node_attributes(acl);
412 410
@@ -658,15 +656,9 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
658 spin_lock_init(&lun->lun_sep_lock); 656 spin_lock_init(&lun->lun_sep_lock);
659 init_completion(&lun->lun_ref_comp); 657 init_completion(&lun->lun_ref_comp);
660 658
661 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
662 if (ret < 0)
663 return ret;
664
665 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); 659 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
666 if (ret < 0) { 660 if (ret < 0)
667 percpu_ref_cancel_init(&lun->lun_ref);
668 return ret; 661 return ret;
669 }
670 662
671 return 0; 663 return 0;
672} 664}
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 2b86f8e0fb58..71630a2af42c 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1855,6 +1855,9 @@ static struct console sercons = {
1855 */ 1855 */
1856static int __init amiserial_console_init(void) 1856static int __init amiserial_console_init(void)
1857{ 1857{
1858 if (!MACH_IS_AMIGA)
1859 return -ENODEV;
1860
1858 register_console(&sercons); 1861 register_console(&sercons);
1859 return 0; 1862 return 0;
1860} 1863}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 7cdd1eb9406c..34aacaaae14a 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -93,6 +93,7 @@ struct n_tty_data {
93 size_t canon_head; 93 size_t canon_head;
94 size_t echo_head; 94 size_t echo_head;
95 size_t echo_commit; 95 size_t echo_commit;
96 size_t echo_mark;
96 DECLARE_BITMAP(char_map, 256); 97 DECLARE_BITMAP(char_map, 256);
97 98
98 /* private to n_tty_receive_overrun (single-threaded) */ 99 /* private to n_tty_receive_overrun (single-threaded) */
@@ -336,6 +337,7 @@ static void reset_buffer_flags(struct n_tty_data *ldata)
336{ 337{
337 ldata->read_head = ldata->canon_head = ldata->read_tail = 0; 338 ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
338 ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0; 339 ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
340 ldata->echo_mark = 0;
339 ldata->line_start = 0; 341 ldata->line_start = 0;
340 342
341 ldata->erasing = 0; 343 ldata->erasing = 0;
@@ -768,7 +770,7 @@ static size_t __process_echoes(struct tty_struct *tty)
768 * data at the tail to prevent a subsequent overrun */ 770 * data at the tail to prevent a subsequent overrun */
769 while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) { 771 while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
770 if (echo_buf(ldata, tail) == ECHO_OP_START) { 772 if (echo_buf(ldata, tail) == ECHO_OP_START) {
771 if (echo_buf(ldata, tail) == ECHO_OP_ERASE_TAB) 773 if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB)
772 tail += 3; 774 tail += 3;
773 else 775 else
774 tail += 2; 776 tail += 2;
@@ -787,6 +789,7 @@ static void commit_echoes(struct tty_struct *tty)
787 size_t head; 789 size_t head;
788 790
789 head = ldata->echo_head; 791 head = ldata->echo_head;
792 ldata->echo_mark = head;
790 old = ldata->echo_commit - ldata->echo_tail; 793 old = ldata->echo_commit - ldata->echo_tail;
791 794
792 /* Process committed echoes if the accumulated # of bytes 795 /* Process committed echoes if the accumulated # of bytes
@@ -810,10 +813,12 @@ static void process_echoes(struct tty_struct *tty)
810 struct n_tty_data *ldata = tty->disc_data; 813 struct n_tty_data *ldata = tty->disc_data;
811 size_t echoed; 814 size_t echoed;
812 815
813 if (!L_ECHO(tty) || ldata->echo_commit == ldata->echo_tail) 816 if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
817 ldata->echo_mark == ldata->echo_tail)
814 return; 818 return;
815 819
816 mutex_lock(&ldata->output_lock); 820 mutex_lock(&ldata->output_lock);
821 ldata->echo_commit = ldata->echo_mark;
817 echoed = __process_echoes(tty); 822 echoed = __process_echoes(tty);
818 mutex_unlock(&ldata->output_lock); 823 mutex_unlock(&ldata->output_lock);
819 824
@@ -821,11 +826,13 @@ static void process_echoes(struct tty_struct *tty)
821 tty->ops->flush_chars(tty); 826 tty->ops->flush_chars(tty);
822} 827}
823 828
829/* NB: echo_mark and echo_head should be equivalent here */
824static void flush_echoes(struct tty_struct *tty) 830static void flush_echoes(struct tty_struct *tty)
825{ 831{
826 struct n_tty_data *ldata = tty->disc_data; 832 struct n_tty_data *ldata = tty->disc_data;
827 833
828 if (!L_ECHO(tty) || ldata->echo_commit == ldata->echo_head) 834 if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
835 ldata->echo_commit == ldata->echo_head)
829 return; 836 return;
830 837
831 mutex_lock(&ldata->output_lock); 838 mutex_lock(&ldata->output_lock);
@@ -1998,7 +2005,10 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
1998 found = 1; 2005 found = 1;
1999 2006
2000 size = N_TTY_BUF_SIZE - tail; 2007 size = N_TTY_BUF_SIZE - tail;
2001 n = (found + eol + size) & (N_TTY_BUF_SIZE - 1); 2008 n = eol - tail;
2009 if (n > 4096)
2010 n += 4096;
2011 n += found;
2002 c = n; 2012 c = n;
2003 2013
2004 if (found && read_buf(ldata, eol) == __DISABLED_CHAR) { 2014 if (found && read_buf(ldata, eol) == __DISABLED_CHAR) {
@@ -2243,18 +2253,19 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2243 if (time) 2253 if (time)
2244 timeout = time; 2254 timeout = time;
2245 } 2255 }
2246 mutex_unlock(&ldata->atomic_read_lock); 2256 n_tty_set_room(tty);
2247 remove_wait_queue(&tty->read_wait, &wait); 2257 up_read(&tty->termios_rwsem);
2248 2258
2259 remove_wait_queue(&tty->read_wait, &wait);
2249 if (!waitqueue_active(&tty->read_wait)) 2260 if (!waitqueue_active(&tty->read_wait))
2250 ldata->minimum_to_wake = minimum; 2261 ldata->minimum_to_wake = minimum;
2251 2262
2263 mutex_unlock(&ldata->atomic_read_lock);
2264
2252 __set_current_state(TASK_RUNNING); 2265 __set_current_state(TASK_RUNNING);
2253 if (b - buf) 2266 if (b - buf)
2254 retval = b - buf; 2267 retval = b - buf;
2255 2268
2256 n_tty_set_room(tty);
2257 up_read(&tty->termios_rwsem);
2258 return retval; 2269 return retval;
2259} 2270}
2260 2271
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 4658e3e0ec42..06525f10e364 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -96,7 +96,8 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
96 if (offset == UART_LCR) { 96 if (offset == UART_LCR) {
97 int tries = 1000; 97 int tries = 1000;
98 while (tries--) { 98 while (tries--) {
99 if (value == p->serial_in(p, UART_LCR)) 99 unsigned int lcr = p->serial_in(p, UART_LCR);
100 if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
100 return; 101 return;
101 dw8250_force_idle(p); 102 dw8250_force_idle(p);
102 writeb(value, p->membase + (UART_LCR << p->regshift)); 103 writeb(value, p->membase + (UART_LCR << p->regshift));
@@ -132,7 +133,8 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
132 if (offset == UART_LCR) { 133 if (offset == UART_LCR) {
133 int tries = 1000; 134 int tries = 1000;
134 while (tries--) { 135 while (tries--) {
135 if (value == p->serial_in(p, UART_LCR)) 136 unsigned int lcr = p->serial_in(p, UART_LCR);
137 if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
136 return; 138 return;
137 dw8250_force_idle(p); 139 dw8250_force_idle(p);
138 writel(value, p->membase + (UART_LCR << p->regshift)); 140 writel(value, p->membase + (UART_LCR << p->regshift));
@@ -455,6 +457,8 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match);
455static const struct acpi_device_id dw8250_acpi_match[] = { 457static const struct acpi_device_id dw8250_acpi_match[] = {
456 { "INT33C4", 0 }, 458 { "INT33C4", 0 },
457 { "INT33C5", 0 }, 459 { "INT33C5", 0 },
460 { "INT3434", 0 },
461 { "INT3435", 0 },
458 { "80860F0A", 0 }, 462 { "80860F0A", 0 },
459 { }, 463 { },
460}; 464};
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index f3b306efaa59..23329918f229 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -41,7 +41,7 @@ config SERIAL_8250_DEPRECATED_OPTIONS
41 accept kernel parameters in both forms like 8250_core.nr_uarts=4 and 41 accept kernel parameters in both forms like 8250_core.nr_uarts=4 and
42 8250.nr_uarts=4. We now renamed the module back to 8250, but if 42 8250.nr_uarts=4. We now renamed the module back to 8250, but if
43 anybody noticed in 3.7 and changed their userspace we still have to 43 anybody noticed in 3.7 and changed their userspace we still have to
44 keep the 8350_core.* options around until they revert the changes 44 keep the 8250_core.* options around until they revert the changes
45 they already did. 45 they already did.
46 46
47 If 8250 is built as a module, this adds 8250_core alias instead. 47 If 8250 is built as a module, this adds 8250_core alias instead.
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 481b781b26e3..e9d420ff3931 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -2052,6 +2052,9 @@ static int __init pmz_console_init(void)
2052 /* Probe ports */ 2052 /* Probe ports */
2053 pmz_probe(); 2053 pmz_probe();
2054 2054
2055 if (pmz_ports_count == 0)
2056 return -ENODEV;
2057
2055 /* TODO: Autoprobe console based on OF */ 2058 /* TODO: Autoprobe console based on OF */
2056 /* pmz_console.index = i; */ 2059 /* pmz_console.index = i; */
2057 register_console(&pmz_console); 2060 register_console(&pmz_console);
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index e46e9f3f19b9..f619ad5b5eae 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -240,6 +240,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
240 continue; 240 continue;
241 } 241 }
242 242
243#ifdef SUPPORT_SYSRQ
243 /* 244 /*
244 * uart_handle_sysrq_char() doesn't work if 245 * uart_handle_sysrq_char() doesn't work if
245 * spinlocked, for some reason 246 * spinlocked, for some reason
@@ -253,6 +254,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
253 } 254 }
254 spin_lock(&port->lock); 255 spin_lock(&port->lock);
255 } 256 }
257#endif
256 258
257 port->icount.rx++; 259 port->icount.rx++;
258 260
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 3a1a01af9a80..c74a00ad7add 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2086,6 +2086,7 @@ retry_open:
2086 filp->f_op = &tty_fops; 2086 filp->f_op = &tty_fops;
2087 goto retry_open; 2087 goto retry_open;
2088 } 2088 }
2089 clear_bit(TTY_HUPPED, &tty->flags);
2089 tty_unlock(tty); 2090 tty_unlock(tty);
2090 2091
2091 2092
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 22fad8ad5ac2..d8a55e87877f 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -86,11 +86,21 @@ static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
86 return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); 86 return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
87} 87}
88 88
89/*
90 * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
91 * Returns 1 if count was successfully changed; @*old will have @new value.
92 * Returns 0 if count was not changed; @*old will have most recent sem->count
93 */
89static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem) 94static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
90{ 95{
91 long tmp = *old; 96 long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
92 *old = atomic_long_cmpxchg(&sem->count, *old, new); 97 if (tmp == *old) {
93 return *old == tmp; 98 *old = new;
99 return 1;
100 } else {
101 *old = tmp;
102 return 0;
103 }
94} 104}
95 105
96/* 106/*
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 67beb8444930..f7beb6eb40c7 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -653,6 +653,8 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
653 return -EINVAL; 653 return -EINVAL;
654 mem = idev->info->mem + mi; 654 mem = idev->info->mem + mi;
655 655
656 if (mem->addr & ~PAGE_MASK)
657 return -ENODEV;
656 if (vma->vm_end - vma->vm_start > mem->size) 658 if (vma->vm_end - vma->vm_start > mem->size)
657 return -EINVAL; 659 return -EINVAL;
658 660
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 5d8981c5235e..6e73f8cd60e5 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -642,6 +642,10 @@ static int ci_hdrc_probe(struct platform_device *pdev)
642 : CI_ROLE_GADGET; 642 : CI_ROLE_GADGET;
643 } 643 }
644 644
645 /* only update vbus status for peripheral */
646 if (ci->role == CI_ROLE_GADGET)
647 ci_handle_vbus_change(ci);
648
645 ret = ci_role_start(ci, ci->role); 649 ret = ci_role_start(ci, ci->role);
646 if (ret) { 650 if (ret) {
647 dev_err(dev, "can't start %s role\n", ci_role(ci)->name); 651 dev_err(dev, "can't start %s role\n", ci_role(ci)->name);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 59e6020ea753..526cd77563d8 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -88,7 +88,8 @@ static int host_start(struct ci_hdrc *ci)
88 return ret; 88 return ret;
89 89
90disable_reg: 90disable_reg:
91 regulator_disable(ci->platdata->reg_vbus); 91 if (ci->platdata->reg_vbus)
92 regulator_disable(ci->platdata->reg_vbus);
92 93
93put_hcd: 94put_hcd:
94 usb_put_hcd(hcd); 95 usb_put_hcd(hcd);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index b34c81969cba..69d20fbb38a2 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1795,9 +1795,6 @@ static int udc_start(struct ci_hdrc *ci)
1795 pm_runtime_no_callbacks(&ci->gadget.dev); 1795 pm_runtime_no_callbacks(&ci->gadget.dev);
1796 pm_runtime_enable(&ci->gadget.dev); 1796 pm_runtime_enable(&ci->gadget.dev);
1797 1797
1798 /* Update ci->vbus_active */
1799 ci_handle_vbus_change(ci);
1800
1801 return retval; 1798 return retval;
1802 1799
1803destroy_eps: 1800destroy_eps:
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 3e7560f004f8..e8404319ca68 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1515,6 +1515,8 @@ static int acm_reset_resume(struct usb_interface *intf)
1515 1515
1516static const struct usb_device_id acm_ids[] = { 1516static const struct usb_device_id acm_ids[] = {
1517 /* quirky and broken devices */ 1517 /* quirky and broken devices */
1518 { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
1519 .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
1518 { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ 1520 { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
1519 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1521 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1520 }, 1522 },
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 4d387596f3f0..0b23a8639311 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -854,13 +854,11 @@ static int wdm_manage_power(struct usb_interface *intf, int on)
854{ 854{
855 /* need autopm_get/put here to ensure the usbcore sees the new value */ 855 /* need autopm_get/put here to ensure the usbcore sees the new value */
856 int rv = usb_autopm_get_interface(intf); 856 int rv = usb_autopm_get_interface(intf);
857 if (rv < 0)
858 goto err;
859 857
860 intf->needs_remote_wakeup = on; 858 intf->needs_remote_wakeup = on;
861 usb_autopm_put_interface(intf); 859 if (!rv)
862err: 860 usb_autopm_put_interface(intf);
863 return rv; 861 return 0;
864} 862}
865 863
866static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id) 864static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index a7c04e24ca48..bd9dc3504b51 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4832,8 +4832,9 @@ static void hub_events(void)
4832 hub->ports[i - 1]->child; 4832 hub->ports[i - 1]->child;
4833 4833
4834 dev_dbg(hub_dev, "warm reset port %d\n", i); 4834 dev_dbg(hub_dev, "warm reset port %d\n", i);
4835 if (!udev || !(portstatus & 4835 if (!udev ||
4836 USB_PORT_STAT_CONNECTION)) { 4836 !(portstatus & USB_PORT_STAT_CONNECTION) ||
4837 udev->state == USB_STATE_NOTATTACHED) {
4837 status = hub_port_reset(hub, i, 4838 status = hub_port_reset(hub, i,
4838 NULL, HUB_BH_RESET_TIME, 4839 NULL, HUB_BH_RESET_TIME,
4839 true); 4840 true);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 74f9cf02da07..a49217ae3533 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -455,9 +455,6 @@ static int dwc3_probe(struct platform_device *pdev)
455 if (IS_ERR(regs)) 455 if (IS_ERR(regs))
456 return PTR_ERR(regs); 456 return PTR_ERR(regs);
457 457
458 usb_phy_set_suspend(dwc->usb2_phy, 0);
459 usb_phy_set_suspend(dwc->usb3_phy, 0);
460
461 spin_lock_init(&dwc->lock); 458 spin_lock_init(&dwc->lock);
462 platform_set_drvdata(pdev, dwc); 459 platform_set_drvdata(pdev, dwc);
463 460
@@ -488,6 +485,9 @@ static int dwc3_probe(struct platform_device *pdev)
488 goto err0; 485 goto err0;
489 } 486 }
490 487
488 usb_phy_set_suspend(dwc->usb2_phy, 0);
489 usb_phy_set_suspend(dwc->usb3_phy, 0);
490
491 ret = dwc3_event_buffers_setup(dwc); 491 ret = dwc3_event_buffers_setup(dwc);
492 if (ret) { 492 if (ret) {
493 dev_err(dwc->dev, "failed to setup event buffers\n"); 493 dev_err(dwc->dev, "failed to setup event buffers\n");
@@ -569,6 +569,8 @@ err2:
569 dwc3_event_buffers_cleanup(dwc); 569 dwc3_event_buffers_cleanup(dwc);
570 570
571err1: 571err1:
572 usb_phy_set_suspend(dwc->usb2_phy, 1);
573 usb_phy_set_suspend(dwc->usb3_phy, 1);
572 dwc3_core_exit(dwc); 574 dwc3_core_exit(dwc);
573 575
574err0: 576err0:
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 95f7649c71a7..21a352079bc2 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -459,6 +459,8 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
459 dep = dwc3_wIndex_to_dep(dwc, wIndex); 459 dep = dwc3_wIndex_to_dep(dwc, wIndex);
460 if (!dep) 460 if (!dep)
461 return -EINVAL; 461 return -EINVAL;
462 if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
463 break;
462 ret = __dwc3_gadget_ep_set_halt(dep, set); 464 ret = __dwc3_gadget_ep_set_halt(dep, set);
463 if (ret) 465 if (ret)
464 return -EINVAL; 466 return -EINVAL;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 5452c0fce360..02e44fcaf205 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1200,9 +1200,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1200 else 1200 else
1201 dep->flags |= DWC3_EP_STALL; 1201 dep->flags |= DWC3_EP_STALL;
1202 } else { 1202 } else {
1203 if (dep->flags & DWC3_EP_WEDGE)
1204 return 0;
1205
1206 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1203 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1207 DWC3_DEPCMD_CLEARSTALL, &params); 1204 DWC3_DEPCMD_CLEARSTALL, &params);
1208 if (ret) 1205 if (ret)
@@ -1210,7 +1207,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1210 value ? "set" : "clear", 1207 value ? "set" : "clear",
1211 dep->name); 1208 dep->name);
1212 else 1209 else
1213 dep->flags &= ~DWC3_EP_STALL; 1210 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1214 } 1211 }
1215 1212
1216 return ret; 1213 return ret;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index a91e6422f930..f66d96ad1f51 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -682,6 +682,7 @@ config USB_CONFIGFS_PHONET
682config USB_CONFIGFS_MASS_STORAGE 682config USB_CONFIGFS_MASS_STORAGE
683 boolean "Mass storage" 683 boolean "Mass storage"
684 depends on USB_CONFIGFS 684 depends on USB_CONFIGFS
685 depends on BLOCK
685 select USB_F_MASS_STORAGE 686 select USB_F_MASS_STORAGE
686 help 687 help
687 The Mass Storage Gadget acts as a USB Mass Storage disk drive. 688 The Mass Storage Gadget acts as a USB Mass Storage disk drive.
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 3e7ae707f691..2018ba1a2172 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -593,6 +593,7 @@ static void reset_config(struct usb_composite_dev *cdev)
593 bitmap_zero(f->endpoints, 32); 593 bitmap_zero(f->endpoints, 32);
594 } 594 }
595 cdev->config = NULL; 595 cdev->config = NULL;
596 cdev->delayed_status = 0;
596} 597}
597 598
598static int set_config(struct usb_composite_dev *cdev, 599static int set_config(struct usb_composite_dev *cdev,
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 774e8b89cdb5..241fc873ffa4 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1304,7 +1304,7 @@ static struct ffs_data *ffs_data_new(void)
1304{ 1304{
1305 struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL); 1305 struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
1306 if (unlikely(!ffs)) 1306 if (unlikely(!ffs))
1307 return 0; 1307 return NULL;
1308 1308
1309 ENTER(); 1309 ENTER();
1310 1310
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index a03ba2c83589..b96393908860 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -523,7 +523,7 @@ static int fsg_setup(struct usb_function *f,
523 */ 523 */
524 DBG(fsg, "bulk reset request\n"); 524 DBG(fsg, "bulk reset request\n");
525 raise_exception(fsg->common, FSG_STATE_RESET); 525 raise_exception(fsg->common, FSG_STATE_RESET);
526 return DELAYED_STATUS; 526 return USB_GADGET_DELAYED_STATUS;
527 527
528 case US_BULK_GET_MAX_LUN: 528 case US_BULK_GET_MAX_LUN:
529 if (ctrl->bRequestType != 529 if (ctrl->bRequestType !=
@@ -602,13 +602,14 @@ static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
602 return true; 602 return true;
603} 603}
604 604
605static int sleep_thread(struct fsg_common *common) 605static int sleep_thread(struct fsg_common *common, bool can_freeze)
606{ 606{
607 int rc = 0; 607 int rc = 0;
608 608
609 /* Wait until a signal arrives or we are woken up */ 609 /* Wait until a signal arrives or we are woken up */
610 for (;;) { 610 for (;;) {
611 try_to_freeze(); 611 if (can_freeze)
612 try_to_freeze();
612 set_current_state(TASK_INTERRUPTIBLE); 613 set_current_state(TASK_INTERRUPTIBLE);
613 if (signal_pending(current)) { 614 if (signal_pending(current)) {
614 rc = -EINTR; 615 rc = -EINTR;
@@ -682,7 +683,7 @@ static int do_read(struct fsg_common *common)
682 /* Wait for the next buffer to become available */ 683 /* Wait for the next buffer to become available */
683 bh = common->next_buffhd_to_fill; 684 bh = common->next_buffhd_to_fill;
684 while (bh->state != BUF_STATE_EMPTY) { 685 while (bh->state != BUF_STATE_EMPTY) {
685 rc = sleep_thread(common); 686 rc = sleep_thread(common, false);
686 if (rc) 687 if (rc)
687 return rc; 688 return rc;
688 } 689 }
@@ -937,7 +938,7 @@ static int do_write(struct fsg_common *common)
937 } 938 }
938 939
939 /* Wait for something to happen */ 940 /* Wait for something to happen */
940 rc = sleep_thread(common); 941 rc = sleep_thread(common, false);
941 if (rc) 942 if (rc)
942 return rc; 943 return rc;
943 } 944 }
@@ -1504,7 +1505,7 @@ static int throw_away_data(struct fsg_common *common)
1504 } 1505 }
1505 1506
1506 /* Otherwise wait for something to happen */ 1507 /* Otherwise wait for something to happen */
1507 rc = sleep_thread(common); 1508 rc = sleep_thread(common, true);
1508 if (rc) 1509 if (rc)
1509 return rc; 1510 return rc;
1510 } 1511 }
@@ -1625,7 +1626,7 @@ static int send_status(struct fsg_common *common)
1625 /* Wait for the next buffer to become available */ 1626 /* Wait for the next buffer to become available */
1626 bh = common->next_buffhd_to_fill; 1627 bh = common->next_buffhd_to_fill;
1627 while (bh->state != BUF_STATE_EMPTY) { 1628 while (bh->state != BUF_STATE_EMPTY) {
1628 rc = sleep_thread(common); 1629 rc = sleep_thread(common, true);
1629 if (rc) 1630 if (rc)
1630 return rc; 1631 return rc;
1631 } 1632 }
@@ -1828,7 +1829,7 @@ static int do_scsi_command(struct fsg_common *common)
1828 bh = common->next_buffhd_to_fill; 1829 bh = common->next_buffhd_to_fill;
1829 common->next_buffhd_to_drain = bh; 1830 common->next_buffhd_to_drain = bh;
1830 while (bh->state != BUF_STATE_EMPTY) { 1831 while (bh->state != BUF_STATE_EMPTY) {
1831 rc = sleep_thread(common); 1832 rc = sleep_thread(common, true);
1832 if (rc) 1833 if (rc)
1833 return rc; 1834 return rc;
1834 } 1835 }
@@ -2174,7 +2175,7 @@ static int get_next_command(struct fsg_common *common)
2174 /* Wait for the next buffer to become available */ 2175 /* Wait for the next buffer to become available */
2175 bh = common->next_buffhd_to_fill; 2176 bh = common->next_buffhd_to_fill;
2176 while (bh->state != BUF_STATE_EMPTY) { 2177 while (bh->state != BUF_STATE_EMPTY) {
2177 rc = sleep_thread(common); 2178 rc = sleep_thread(common, true);
2178 if (rc) 2179 if (rc)
2179 return rc; 2180 return rc;
2180 } 2181 }
@@ -2193,7 +2194,7 @@ static int get_next_command(struct fsg_common *common)
2193 2194
2194 /* Wait for the CBW to arrive */ 2195 /* Wait for the CBW to arrive */
2195 while (bh->state != BUF_STATE_FULL) { 2196 while (bh->state != BUF_STATE_FULL) {
2196 rc = sleep_thread(common); 2197 rc = sleep_thread(common, true);
2197 if (rc) 2198 if (rc)
2198 return rc; 2199 return rc;
2199 } 2200 }
@@ -2379,7 +2380,7 @@ static void handle_exception(struct fsg_common *common)
2379 } 2380 }
2380 if (num_active == 0) 2381 if (num_active == 0)
2381 break; 2382 break;
2382 if (sleep_thread(common)) 2383 if (sleep_thread(common, true))
2383 return; 2384 return;
2384 } 2385 }
2385 2386
@@ -2516,7 +2517,7 @@ static int fsg_main_thread(void *common_)
2516 } 2517 }
2517 2518
2518 if (!common->running) { 2519 if (!common->running) {
2519 sleep_thread(common); 2520 sleep_thread(common, true);
2520 continue; 2521 continue;
2521 } 2522 }
2522 2523
@@ -3111,7 +3112,7 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
3111 fsg->common->can_stall); 3112 fsg->common->can_stall);
3112 if (ret) 3113 if (ret)
3113 return ret; 3114 return ret;
3114 fsg_common_set_inquiry_string(fsg->common, 0, 0); 3115 fsg_common_set_inquiry_string(fsg->common, NULL, NULL);
3115 ret = fsg_common_run_thread(fsg->common); 3116 ret = fsg_common_run_thread(fsg->common);
3116 if (ret) 3117 if (ret)
3117 return ret; 3118 return ret;
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index 0ac6064aa3b8..409a3c45a36a 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -54,6 +54,7 @@
54 */ 54 */
55#ifdef CONFIG_ARCH_PXA 55#ifdef CONFIG_ARCH_PXA
56#include <mach/pxa25x-udc.h> 56#include <mach/pxa25x-udc.h>
57#include <mach/hardware.h>
57#endif 58#endif
58 59
59#ifdef CONFIG_ARCH_LUBBOCK 60#ifdef CONFIG_ARCH_LUBBOCK
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 9875d9c0823f..e20bc109fdd7 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -1180,6 +1180,7 @@ static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,
1180} 1180}
1181 1181
1182static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg); 1182static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg);
1183static void s3c_hsotg_disconnect(struct s3c_hsotg *hsotg);
1183 1184
1184/** 1185/**
1185 * s3c_hsotg_process_control - process a control request 1186 * s3c_hsotg_process_control - process a control request
@@ -1221,6 +1222,7 @@ static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
1221 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { 1222 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1222 switch (ctrl->bRequest) { 1223 switch (ctrl->bRequest) {
1223 case USB_REQ_SET_ADDRESS: 1224 case USB_REQ_SET_ADDRESS:
1225 s3c_hsotg_disconnect(hsotg);
1224 dcfg = readl(hsotg->regs + DCFG); 1226 dcfg = readl(hsotg->regs + DCFG);
1225 dcfg &= ~DCFG_DevAddr_MASK; 1227 dcfg &= ~DCFG_DevAddr_MASK;
1226 dcfg |= ctrl->wValue << DCFG_DevAddr_SHIFT; 1228 dcfg |= ctrl->wValue << DCFG_DevAddr_SHIFT;
@@ -1245,7 +1247,9 @@ static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
1245 /* as a fallback, try delivering it to the driver to deal with */ 1247 /* as a fallback, try delivering it to the driver to deal with */
1246 1248
1247 if (ret == 0 && hsotg->driver) { 1249 if (ret == 0 && hsotg->driver) {
1250 spin_unlock(&hsotg->lock);
1248 ret = hsotg->driver->setup(&hsotg->gadget, ctrl); 1251 ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
1252 spin_lock(&hsotg->lock);
1249 if (ret < 0) 1253 if (ret < 0)
1250 dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret); 1254 dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
1251 } 1255 }
@@ -1308,10 +1312,12 @@ static void s3c_hsotg_complete_setup(struct usb_ep *ep,
1308 return; 1312 return;
1309 } 1313 }
1310 1314
1315 spin_lock(&hsotg->lock);
1311 if (req->actual == 0) 1316 if (req->actual == 0)
1312 s3c_hsotg_enqueue_setup(hsotg); 1317 s3c_hsotg_enqueue_setup(hsotg);
1313 else 1318 else
1314 s3c_hsotg_process_control(hsotg, req->buf); 1319 s3c_hsotg_process_control(hsotg, req->buf);
1320 spin_unlock(&hsotg->lock);
1315} 1321}
1316 1322
1317/** 1323/**
@@ -2533,7 +2539,6 @@ irq_retry:
2533 writel(GINTSTS_USBSusp, hsotg->regs + GINTSTS); 2539 writel(GINTSTS_USBSusp, hsotg->regs + GINTSTS);
2534 2540
2535 call_gadget(hsotg, suspend); 2541 call_gadget(hsotg, suspend);
2536 s3c_hsotg_disconnect(hsotg);
2537 } 2542 }
2538 2543
2539 if (gintsts & GINTSTS_WkUpInt) { 2544 if (gintsts & GINTSTS_WkUpInt) {
diff --git a/drivers/usb/gadget/storage_common.h b/drivers/usb/gadget/storage_common.h
index c74c2fdbd56e..70c891469f57 100644
--- a/drivers/usb/gadget/storage_common.h
+++ b/drivers/usb/gadget/storage_common.h
@@ -119,10 +119,6 @@ static inline bool fsg_lun_is_open(struct fsg_lun *curlun)
119 return curlun->filp != NULL; 119 return curlun->filp != NULL;
120} 120}
121 121
122/* Big enough to hold our biggest descriptor */
123#define EP0_BUFSIZE 256
124#define DELAYED_STATUS (EP0_BUFSIZE + 999) /* An impossibly large value */
125
126/* Default size of buffer length. */ 122/* Default size of buffer length. */
127#define FSG_BUFLEN ((u32)16384) 123#define FSG_BUFLEN ((u32)16384)
128 124
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
index 6c3d7950d2a9..0f8aad78b54f 100644
--- a/drivers/usb/gadget/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -370,7 +370,7 @@ err:
370 return -ENOMEM; 370 return -ENOMEM;
371} 371}
372 372
373void bot_cleanup_old_alt(struct f_uas *fu) 373static void bot_cleanup_old_alt(struct f_uas *fu)
374{ 374{
375 if (!(fu->flags & USBG_ENABLED)) 375 if (!(fu->flags & USBG_ENABLED))
376 return; 376 return;
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 0dd07ae1555d..f49b0b61ecc8 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -91,17 +91,17 @@ static struct usb_zero_options gzero_options = {
91 * functional coverage for the "USBCV" test harness from USB-IF. 91 * functional coverage for the "USBCV" test harness from USB-IF.
92 * It's always set if OTG mode is enabled. 92 * It's always set if OTG mode is enabled.
93 */ 93 */
94unsigned autoresume = DEFAULT_AUTORESUME; 94static unsigned autoresume = DEFAULT_AUTORESUME;
95module_param(autoresume, uint, S_IRUGO); 95module_param(autoresume, uint, S_IRUGO);
96MODULE_PARM_DESC(autoresume, "zero, or seconds before remote wakeup"); 96MODULE_PARM_DESC(autoresume, "zero, or seconds before remote wakeup");
97 97
98/* Maximum Autoresume time */ 98/* Maximum Autoresume time */
99unsigned max_autoresume; 99static unsigned max_autoresume;
100module_param(max_autoresume, uint, S_IRUGO); 100module_param(max_autoresume, uint, S_IRUGO);
101MODULE_PARM_DESC(max_autoresume, "maximum seconds before remote wakeup"); 101MODULE_PARM_DESC(max_autoresume, "maximum seconds before remote wakeup");
102 102
103/* Interval between two remote wakeups */ 103/* Interval between two remote wakeups */
104unsigned autoresume_interval_ms; 104static unsigned autoresume_interval_ms;
105module_param(autoresume_interval_ms, uint, S_IRUGO); 105module_param(autoresume_interval_ms, uint, S_IRUGO);
106MODULE_PARM_DESC(autoresume_interval_ms, 106MODULE_PARM_DESC(autoresume_interval_ms,
107 "milliseconds to increase successive wakeup delays"); 107 "milliseconds to increase successive wakeup delays");
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 418444ebb1b8..8c356af79409 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -136,23 +136,27 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
136 struct ohci_hcd *ohci; 136 struct ohci_hcd *ohci;
137 int retval; 137 int retval;
138 struct usb_hcd *hcd = NULL; 138 struct usb_hcd *hcd = NULL;
139 139 struct device *dev = &pdev->dev;
140 if (pdev->num_resources != 2) { 140 struct resource *res;
141 pr_debug("hcd probe: invalid num_resources"); 141 int irq;
142 return -ENODEV; 142
143 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
144 if (!res) {
145 dev_dbg(dev, "hcd probe: missing memory resource\n");
146 return -ENXIO;
143 } 147 }
144 148
145 if ((pdev->resource[0].flags != IORESOURCE_MEM) 149 irq = platform_get_irq(pdev, 0);
146 || (pdev->resource[1].flags != IORESOURCE_IRQ)) { 150 if (irq < 0) {
147 pr_debug("hcd probe: invalid resource type\n"); 151 dev_dbg(dev, "hcd probe: missing irq resource\n");
148 return -ENODEV; 152 return irq;
149 } 153 }
150 154
151 hcd = usb_create_hcd(driver, &pdev->dev, "at91"); 155 hcd = usb_create_hcd(driver, &pdev->dev, "at91");
152 if (!hcd) 156 if (!hcd)
153 return -ENOMEM; 157 return -ENOMEM;
154 hcd->rsrc_start = pdev->resource[0].start; 158 hcd->rsrc_start = res->start;
155 hcd->rsrc_len = resource_size(&pdev->resource[0]); 159 hcd->rsrc_len = resource_size(res);
156 160
157 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 161 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
158 pr_debug("request_mem_region failed\n"); 162 pr_debug("request_mem_region failed\n");
@@ -199,7 +203,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
199 ohci->num_ports = board->ports; 203 ohci->num_ports = board->ports;
200 at91_start_hc(pdev); 204 at91_start_hc(pdev);
201 205
202 retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED); 206 retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
203 if (retval == 0) 207 if (retval == 0)
204 return retval; 208 return retval;
205 209
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index e89ac4d4b87e..9b7435f0dcd6 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/clk.h> 22#include <linux/clk.h>
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/dma-mapping.h>
24#include <linux/io.h> 25#include <linux/io.h>
25#include <linux/kernel.h> 26#include <linux/kernel.h>
26#include <linux/module.h> 27#include <linux/module.h>
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index b8dffd59eb25..73f5208714a4 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -128,7 +128,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
128 * any other sleep) on Haswell machines with LPT and LPT-LP 128 * any other sleep) on Haswell machines with LPT and LPT-LP
129 * with the new Intel BIOS 129 * with the new Intel BIOS
130 */ 130 */
131 xhci->quirks |= XHCI_SPURIOUS_WAKEUP; 131 /* Limit the quirk to only known vendors, as this triggers
132 * yet another BIOS bug on some other machines
133 * https://bugzilla.kernel.org/show_bug.cgi?id=66171
134 */
135 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
136 xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
132 } 137 }
133 if (pdev->vendor == PCI_VENDOR_ID_ETRON && 138 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
134 pdev->device == PCI_DEVICE_ID_ASROCK_P67) { 139 pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 1e2f3f495843..53c2e296467f 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2973,8 +2973,58 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2973 } 2973 }
2974 2974
2975 while (1) { 2975 while (1) {
2976 if (room_on_ring(xhci, ep_ring, num_trbs)) 2976 if (room_on_ring(xhci, ep_ring, num_trbs)) {
2977 break; 2977 union xhci_trb *trb = ep_ring->enqueue;
2978 unsigned int usable = ep_ring->enq_seg->trbs +
2979 TRBS_PER_SEGMENT - 1 - trb;
2980 u32 nop_cmd;
2981
2982 /*
2983 * Section 4.11.7.1 TD Fragments states that a link
2984 * TRB must only occur at the boundary between
2985 * data bursts (eg 512 bytes for 480M).
2986 * While it is possible to split a large fragment
2987 * we don't know the size yet.
2988 * Simplest solution is to fill the trb before the
2989 * LINK with nop commands.
2990 */
2991 if (num_trbs == 1 || num_trbs <= usable || usable == 0)
2992 break;
2993
2994 if (ep_ring->type != TYPE_BULK)
2995 /*
2996 * While isoc transfers might have a buffer that
2997 * crosses a 64k boundary it is unlikely.
2998 * Since we can't add NOPs without generating
2999 * gaps in the traffic just hope it never
3000 * happens at the end of the ring.
3001 * This could be fixed by writing a LINK TRB
3002 * instead of the first NOP - however the
3003 * TRB_TYPE_LINK_LE32() calls would all need
3004 * changing to check the ring length.
3005 */
3006 break;
3007
3008 if (num_trbs >= TRBS_PER_SEGMENT) {
3009 xhci_err(xhci, "Too many fragments %d, max %d\n",
3010 num_trbs, TRBS_PER_SEGMENT - 1);
3011 return -ENOMEM;
3012 }
3013
3014 nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) |
3015 ep_ring->cycle_state);
3016 ep_ring->num_trbs_free -= usable;
3017 do {
3018 trb->generic.field[0] = 0;
3019 trb->generic.field[1] = 0;
3020 trb->generic.field[2] = 0;
3021 trb->generic.field[3] = nop_cmd;
3022 trb++;
3023 } while (--usable);
3024 ep_ring->enqueue = trb;
3025 if (room_on_ring(xhci, ep_ring, num_trbs))
3026 break;
3027 }
2978 3028
2979 if (ep_ring == xhci->cmd_ring) { 3029 if (ep_ring == xhci->cmd_ring) {
2980 xhci_err(xhci, "Do not support expand command ring\n"); 3030 xhci_err(xhci, "Do not support expand command ring\n");
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 0a43329569d1..4d4499b80449 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1809,7 +1809,6 @@ static void musb_free(struct musb *musb)
1809 disable_irq_wake(musb->nIrq); 1809 disable_irq_wake(musb->nIrq);
1810 free_irq(musb->nIrq, musb); 1810 free_irq(musb->nIrq, musb);
1811 } 1811 }
1812 cancel_work_sync(&musb->irq_work);
1813 1812
1814 musb_host_free(musb); 1813 musb_host_free(musb);
1815} 1814}
@@ -1896,6 +1895,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
1896 musb_platform_disable(musb); 1895 musb_platform_disable(musb);
1897 musb_generic_disable(musb); 1896 musb_generic_disable(musb);
1898 1897
1898 /* Init IRQ workqueue before request_irq */
1899 INIT_WORK(&musb->irq_work, musb_irq_work);
1900
1899 /* setup musb parts of the core (especially endpoints) */ 1901 /* setup musb parts of the core (especially endpoints) */
1900 status = musb_core_init(plat->config->multipoint 1902 status = musb_core_init(plat->config->multipoint
1901 ? MUSB_CONTROLLER_MHDRC 1903 ? MUSB_CONTROLLER_MHDRC
@@ -1905,9 +1907,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
1905 1907
1906 setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); 1908 setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
1907 1909
1908 /* Init IRQ workqueue before request_irq */
1909 INIT_WORK(&musb->irq_work, musb_irq_work);
1910
1911 /* attach to the IRQ */ 1910 /* attach to the IRQ */
1912 if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) { 1911 if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) {
1913 dev_err(dev, "request_irq %d failed!\n", nIrq); 1912 dev_err(dev, "request_irq %d failed!\n", nIrq);
@@ -1981,6 +1980,7 @@ fail4:
1981 musb_host_cleanup(musb); 1980 musb_host_cleanup(musb);
1982 1981
1983fail3: 1982fail3:
1983 cancel_work_sync(&musb->irq_work);
1984 if (musb->dma_controller) 1984 if (musb->dma_controller)
1985 dma_controller_destroy(musb->dma_controller); 1985 dma_controller_destroy(musb->dma_controller);
1986fail2_5: 1986fail2_5:
@@ -2043,6 +2043,7 @@ static int musb_remove(struct platform_device *pdev)
2043 if (musb->dma_controller) 2043 if (musb->dma_controller)
2044 dma_controller_destroy(musb->dma_controller); 2044 dma_controller_destroy(musb->dma_controller);
2045 2045
2046 cancel_work_sync(&musb->irq_work);
2046 musb_free(musb); 2047 musb_free(musb);
2047 device_init_wakeup(dev, 0); 2048 device_init_wakeup(dev, 0);
2048 return 0; 2049 return 0;
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index ff9d6de2b746..a12bd30401e0 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -38,6 +38,7 @@ struct cppi41_dma_channel {
38 u32 prog_len; 38 u32 prog_len;
39 u32 transferred; 39 u32 transferred;
40 u32 packet_sz; 40 u32 packet_sz;
41 struct list_head tx_check;
41}; 42};
42 43
43#define MUSB_DMA_NUM_CHANNELS 15 44#define MUSB_DMA_NUM_CHANNELS 15
@@ -47,6 +48,8 @@ struct cppi41_dma_controller {
47 struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS]; 48 struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
48 struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS]; 49 struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
49 struct musb *musb; 50 struct musb *musb;
51 struct hrtimer early_tx;
52 struct list_head early_tx_list;
50 u32 rx_mode; 53 u32 rx_mode;
51 u32 tx_mode; 54 u32 tx_mode;
52 u32 auto_req; 55 u32 auto_req;
@@ -96,31 +99,27 @@ static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
96 cppi41_channel->usb_toggle = toggle; 99 cppi41_channel->usb_toggle = toggle;
97} 100}
98 101
99static void cppi41_dma_callback(void *private_data) 102static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
100{ 103{
101 struct dma_channel *channel = private_data; 104 u8 epnum = hw_ep->epnum;
102 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 105 struct musb *musb = hw_ep->musb;
103 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; 106 void __iomem *epio = musb->endpoints[epnum].regs;
104 struct musb *musb = hw_ep->musb; 107 u16 csr;
105 unsigned long flags;
106 struct dma_tx_state txstate;
107 u32 transferred;
108 108
109 spin_lock_irqsave(&musb->lock, flags); 109 csr = musb_readw(epio, MUSB_TXCSR);
110 if (csr & MUSB_TXCSR_TXPKTRDY)
111 return false;
112 return true;
113}
110 114
111 dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie, 115static void cppi41_dma_callback(void *private_data);
112 &txstate);
113 transferred = cppi41_channel->prog_len - txstate.residue;
114 cppi41_channel->transferred += transferred;
115 116
116 dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n", 117static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
117 hw_ep->epnum, cppi41_channel->transferred, 118{
118 cppi41_channel->total_len); 119 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
120 struct musb *musb = hw_ep->musb;
119 121
120 update_rx_toggle(cppi41_channel); 122 if (!cppi41_channel->prog_len) {
121
122 if (cppi41_channel->transferred == cppi41_channel->total_len ||
123 transferred < cppi41_channel->packet_sz) {
124 123
125 /* done, complete */ 124 /* done, complete */
126 cppi41_channel->channel.actual_len = 125 cppi41_channel->channel.actual_len =
@@ -150,13 +149,11 @@ static void cppi41_dma_callback(void *private_data)
150 remain_bytes, 149 remain_bytes,
151 direction, 150 direction,
152 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 151 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
153 if (WARN_ON(!dma_desc)) { 152 if (WARN_ON(!dma_desc))
154 spin_unlock_irqrestore(&musb->lock, flags);
155 return; 153 return;
156 }
157 154
158 dma_desc->callback = cppi41_dma_callback; 155 dma_desc->callback = cppi41_dma_callback;
159 dma_desc->callback_param = channel; 156 dma_desc->callback_param = &cppi41_channel->channel;
160 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc); 157 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
161 dma_async_issue_pending(dc); 158 dma_async_issue_pending(dc);
162 159
@@ -166,6 +163,117 @@ static void cppi41_dma_callback(void *private_data)
166 musb_writew(epio, MUSB_RXCSR, csr); 163 musb_writew(epio, MUSB_RXCSR, csr);
167 } 164 }
168 } 165 }
166}
167
168static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
169{
170 struct cppi41_dma_controller *controller;
171 struct cppi41_dma_channel *cppi41_channel, *n;
172 struct musb *musb;
173 unsigned long flags;
174 enum hrtimer_restart ret = HRTIMER_NORESTART;
175
176 controller = container_of(timer, struct cppi41_dma_controller,
177 early_tx);
178 musb = controller->musb;
179
180 spin_lock_irqsave(&musb->lock, flags);
181 list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
182 tx_check) {
183 bool empty;
184 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
185
186 empty = musb_is_tx_fifo_empty(hw_ep);
187 if (empty) {
188 list_del_init(&cppi41_channel->tx_check);
189 cppi41_trans_done(cppi41_channel);
190 }
191 }
192
193 if (!list_empty(&controller->early_tx_list)) {
194 ret = HRTIMER_RESTART;
195 hrtimer_forward_now(&controller->early_tx,
196 ktime_set(0, 150 * NSEC_PER_USEC));
197 }
198
199 spin_unlock_irqrestore(&musb->lock, flags);
200 return ret;
201}
202
203static void cppi41_dma_callback(void *private_data)
204{
205 struct dma_channel *channel = private_data;
206 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
207 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
208 struct musb *musb = hw_ep->musb;
209 unsigned long flags;
210 struct dma_tx_state txstate;
211 u32 transferred;
212 bool empty;
213
214 spin_lock_irqsave(&musb->lock, flags);
215
216 dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
217 &txstate);
218 transferred = cppi41_channel->prog_len - txstate.residue;
219 cppi41_channel->transferred += transferred;
220
221 dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
222 hw_ep->epnum, cppi41_channel->transferred,
223 cppi41_channel->total_len);
224
225 update_rx_toggle(cppi41_channel);
226
227 if (cppi41_channel->transferred == cppi41_channel->total_len ||
228 transferred < cppi41_channel->packet_sz)
229 cppi41_channel->prog_len = 0;
230
231 empty = musb_is_tx_fifo_empty(hw_ep);
232 if (empty) {
233 cppi41_trans_done(cppi41_channel);
234 } else {
235 struct cppi41_dma_controller *controller;
236 /*
237 * On AM335x it has been observed that the TX interrupt fires
238 * too early that means the TXFIFO is not yet empty but the DMA
239 * engine says that it is done with the transfer. We don't
240 * receive a FIFO empty interrupt so the only thing we can do is
241 * to poll for the bit. On HS it usually takes 2us, on FS around
242 * 110us - 150us depending on the transfer size.
243 * We spin on HS (no longer than than 25us and setup a timer on
244 * FS to check for the bit and complete the transfer.
245 */
246 controller = cppi41_channel->controller;
247
248 if (musb->g.speed == USB_SPEED_HIGH) {
249 unsigned wait = 25;
250
251 do {
252 empty = musb_is_tx_fifo_empty(hw_ep);
253 if (empty)
254 break;
255 wait--;
256 if (!wait)
257 break;
258 udelay(1);
259 } while (1);
260
261 empty = musb_is_tx_fifo_empty(hw_ep);
262 if (empty) {
263 cppi41_trans_done(cppi41_channel);
264 goto out;
265 }
266 }
267 list_add_tail(&cppi41_channel->tx_check,
268 &controller->early_tx_list);
269 if (!hrtimer_active(&controller->early_tx)) {
270 hrtimer_start_range_ns(&controller->early_tx,
271 ktime_set(0, 140 * NSEC_PER_USEC),
272 40 * NSEC_PER_USEC,
273 HRTIMER_MODE_REL);
274 }
275 }
276out:
169 spin_unlock_irqrestore(&musb->lock, flags); 277 spin_unlock_irqrestore(&musb->lock, flags);
170} 278}
171 279
@@ -364,6 +472,8 @@ static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
364 WARN_ON(1); 472 WARN_ON(1);
365 return 1; 473 return 1;
366 } 474 }
475 if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
476 return 0;
367 if (cppi41_channel->is_tx) 477 if (cppi41_channel->is_tx)
368 return 1; 478 return 1;
369 /* AM335x Advisory 1.0.13. No workaround for device RX mode */ 479 /* AM335x Advisory 1.0.13. No workaround for device RX mode */
@@ -388,6 +498,7 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
388 if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE) 498 if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
389 return 0; 499 return 0;
390 500
501 list_del_init(&cppi41_channel->tx_check);
391 if (is_tx) { 502 if (is_tx) {
392 csr = musb_readw(epio, MUSB_TXCSR); 503 csr = musb_readw(epio, MUSB_TXCSR);
393 csr &= ~MUSB_TXCSR_DMAENAB; 504 csr &= ~MUSB_TXCSR_DMAENAB;
@@ -495,6 +606,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
495 cppi41_channel->controller = controller; 606 cppi41_channel->controller = controller;
496 cppi41_channel->port_num = port; 607 cppi41_channel->port_num = port;
497 cppi41_channel->is_tx = is_tx; 608 cppi41_channel->is_tx = is_tx;
609 INIT_LIST_HEAD(&cppi41_channel->tx_check);
498 610
499 musb_dma = &cppi41_channel->channel; 611 musb_dma = &cppi41_channel->channel;
500 musb_dma->private_data = cppi41_channel; 612 musb_dma->private_data = cppi41_channel;
@@ -520,6 +632,7 @@ void dma_controller_destroy(struct dma_controller *c)
520 struct cppi41_dma_controller *controller = container_of(c, 632 struct cppi41_dma_controller *controller = container_of(c,
521 struct cppi41_dma_controller, controller); 633 struct cppi41_dma_controller, controller);
522 634
635 hrtimer_cancel(&controller->early_tx);
523 cppi41_dma_controller_stop(controller); 636 cppi41_dma_controller_stop(controller);
524 kfree(controller); 637 kfree(controller);
525} 638}
@@ -539,6 +652,9 @@ struct dma_controller *dma_controller_create(struct musb *musb,
539 if (!controller) 652 if (!controller)
540 goto kzalloc_fail; 653 goto kzalloc_fail;
541 654
655 hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
656 controller->early_tx.function = cppi41_recheck_tx_req;
657 INIT_LIST_HEAD(&controller->early_tx_list);
542 controller->musb = musb; 658 controller->musb = musb;
543 659
544 controller->controller.channel_alloc = cppi41_dma_channel_allocate; 660 controller->controller.channel_alloc = cppi41_dma_channel_allocate;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index d2d3a173b315..32fb057c03f5 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1796,7 +1796,11 @@ int musb_gadget_setup(struct musb *musb)
1796 1796
1797 /* this "gadget" abstracts/virtualizes the controller */ 1797 /* this "gadget" abstracts/virtualizes the controller */
1798 musb->g.name = musb_driver_name; 1798 musb->g.name = musb_driver_name;
1799#if IS_ENABLED(CONFIG_USB_MUSB_DUAL_ROLE)
1799 musb->g.is_otg = 1; 1800 musb->g.is_otg = 1;
1801#elif IS_ENABLED(CONFIG_USB_MUSB_GADGET)
1802 musb->g.is_otg = 0;
1803#endif
1800 1804
1801 musb_g_init_endpoints(musb); 1805 musb_g_init_endpoints(musb);
1802 1806
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 08e2f39027ec..2b41c636a52a 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -19,8 +19,9 @@ config AB8500_USB
19 in host mode, low speed. 19 in host mode, low speed.
20 20
21config FSL_USB2_OTG 21config FSL_USB2_OTG
22 bool "Freescale USB OTG Transceiver Driver" 22 tristate "Freescale USB OTG Transceiver Driver"
23 depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME 23 depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME
24 depends on USB
24 select USB_OTG 25 select USB_OTG
25 select USB_PHY 26 select USB_PHY
26 help 27 help
@@ -29,6 +30,7 @@ config FSL_USB2_OTG
29config ISP1301_OMAP 30config ISP1301_OMAP
30 tristate "Philips ISP1301 with OMAP OTG" 31 tristate "Philips ISP1301 with OMAP OTG"
31 depends on I2C && ARCH_OMAP_OTG 32 depends on I2C && ARCH_OMAP_OTG
33 depends on USB
32 select USB_PHY 34 select USB_PHY
33 help 35 help
34 If you say yes here you get support for the Philips ISP1301 36 If you say yes here you get support for the Philips ISP1301
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index 6370e50649d7..0e3c60cb669a 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -52,8 +52,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
52 return am_phy->id; 52 return am_phy->id;
53 } 53 }
54 54
55 ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen, 55 ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen, NULL);
56 USB_PHY_TYPE_USB2, 0, false);
57 if (ret) 56 if (ret)
58 return ret; 57 return ret;
59 58
@@ -66,8 +65,6 @@ static int am335x_phy_probe(struct platform_device *pdev)
66 platform_set_drvdata(pdev, am_phy); 65 platform_set_drvdata(pdev, am_phy);
67 66
68 return 0; 67 return 0;
69
70 return ret;
71} 68}
72 69
73static int am335x_phy_remove(struct platform_device *pdev) 70static int am335x_phy_remove(struct platform_device *pdev)
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index fce3a9e9bb5d..aa6d37b3378a 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -48,8 +48,9 @@ void usb_nop_xceiv_register(void)
48 if (pd) 48 if (pd)
49 return; 49 return;
50 pd = platform_device_register_simple("usb_phy_gen_xceiv", -1, NULL, 0); 50 pd = platform_device_register_simple("usb_phy_gen_xceiv", -1, NULL, 0);
51 if (!pd) { 51 if (IS_ERR(pd)) {
52 pr_err("Unable to register generic usb transceiver\n"); 52 pr_err("Unable to register generic usb transceiver\n");
53 pd = NULL;
53 return; 54 return;
54 } 55 }
55} 56}
@@ -150,10 +151,40 @@ static int nop_set_host(struct usb_otg *otg, struct usb_bus *host)
150} 151}
151 152
152int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop, 153int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop,
153 enum usb_phy_type type, u32 clk_rate, bool needs_vcc) 154 struct usb_phy_gen_xceiv_platform_data *pdata)
154{ 155{
156 enum usb_phy_type type = USB_PHY_TYPE_USB2;
155 int err; 157 int err;
156 158
159 u32 clk_rate = 0;
160 bool needs_vcc = false;
161
162 nop->reset_active_low = true; /* default behaviour */
163
164 if (dev->of_node) {
165 struct device_node *node = dev->of_node;
166 enum of_gpio_flags flags = 0;
167
168 if (of_property_read_u32(node, "clock-frequency", &clk_rate))
169 clk_rate = 0;
170
171 needs_vcc = of_property_read_bool(node, "vcc-supply");
172 nop->gpio_reset = of_get_named_gpio_flags(node, "reset-gpios",
173 0, &flags);
174 if (nop->gpio_reset == -EPROBE_DEFER)
175 return -EPROBE_DEFER;
176
177 nop->reset_active_low = flags & OF_GPIO_ACTIVE_LOW;
178
179 } else if (pdata) {
180 type = pdata->type;
181 clk_rate = pdata->clk_rate;
182 needs_vcc = pdata->needs_vcc;
183 nop->gpio_reset = pdata->gpio_reset;
184 } else {
185 nop->gpio_reset = -1;
186 }
187
157 nop->phy.otg = devm_kzalloc(dev, sizeof(*nop->phy.otg), 188 nop->phy.otg = devm_kzalloc(dev, sizeof(*nop->phy.otg),
158 GFP_KERNEL); 189 GFP_KERNEL);
159 if (!nop->phy.otg) 190 if (!nop->phy.otg)
@@ -218,43 +249,14 @@ EXPORT_SYMBOL_GPL(usb_phy_gen_create_phy);
218static int usb_phy_gen_xceiv_probe(struct platform_device *pdev) 249static int usb_phy_gen_xceiv_probe(struct platform_device *pdev)
219{ 250{
220 struct device *dev = &pdev->dev; 251 struct device *dev = &pdev->dev;
221 struct usb_phy_gen_xceiv_platform_data *pdata =
222 dev_get_platdata(&pdev->dev);
223 struct usb_phy_gen_xceiv *nop; 252 struct usb_phy_gen_xceiv *nop;
224 enum usb_phy_type type = USB_PHY_TYPE_USB2;
225 int err; 253 int err;
226 u32 clk_rate = 0;
227 bool needs_vcc = false;
228 254
229 nop = devm_kzalloc(dev, sizeof(*nop), GFP_KERNEL); 255 nop = devm_kzalloc(dev, sizeof(*nop), GFP_KERNEL);
230 if (!nop) 256 if (!nop)
231 return -ENOMEM; 257 return -ENOMEM;
232 258
233 nop->reset_active_low = true; /* default behaviour */ 259 err = usb_phy_gen_create_phy(dev, nop, dev_get_platdata(&pdev->dev));
234
235 if (dev->of_node) {
236 struct device_node *node = dev->of_node;
237 enum of_gpio_flags flags;
238
239 if (of_property_read_u32(node, "clock-frequency", &clk_rate))
240 clk_rate = 0;
241
242 needs_vcc = of_property_read_bool(node, "vcc-supply");
243 nop->gpio_reset = of_get_named_gpio_flags(node, "reset-gpios",
244 0, &flags);
245 if (nop->gpio_reset == -EPROBE_DEFER)
246 return -EPROBE_DEFER;
247
248 nop->reset_active_low = flags & OF_GPIO_ACTIVE_LOW;
249
250 } else if (pdata) {
251 type = pdata->type;
252 clk_rate = pdata->clk_rate;
253 needs_vcc = pdata->needs_vcc;
254 nop->gpio_reset = pdata->gpio_reset;
255 }
256
257 err = usb_phy_gen_create_phy(dev, nop, type, clk_rate, needs_vcc);
258 if (err) 260 if (err)
259 return err; 261 return err;
260 262
@@ -271,8 +273,6 @@ static int usb_phy_gen_xceiv_probe(struct platform_device *pdev)
271 platform_set_drvdata(pdev, nop); 273 platform_set_drvdata(pdev, nop);
272 274
273 return 0; 275 return 0;
274
275 return err;
276} 276}
277 277
278static int usb_phy_gen_xceiv_remove(struct platform_device *pdev) 278static int usb_phy_gen_xceiv_remove(struct platform_device *pdev)
diff --git a/drivers/usb/phy/phy-generic.h b/drivers/usb/phy/phy-generic.h
index d2a220d81734..38a81f307b82 100644
--- a/drivers/usb/phy/phy-generic.h
+++ b/drivers/usb/phy/phy-generic.h
@@ -1,6 +1,8 @@
1#ifndef _PHY_GENERIC_H_ 1#ifndef _PHY_GENERIC_H_
2#define _PHY_GENERIC_H_ 2#define _PHY_GENERIC_H_
3 3
4#include <linux/usb/usb_phy_gen_xceiv.h>
5
4struct usb_phy_gen_xceiv { 6struct usb_phy_gen_xceiv {
5 struct usb_phy phy; 7 struct usb_phy phy;
6 struct device *dev; 8 struct device *dev;
@@ -14,6 +16,6 @@ int usb_gen_phy_init(struct usb_phy *phy);
14void usb_gen_phy_shutdown(struct usb_phy *phy); 16void usb_gen_phy_shutdown(struct usb_phy *phy);
15 17
16int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop, 18int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop,
17 enum usb_phy_type type, u32 clk_rate, bool needs_vcc); 19 struct usb_phy_gen_xceiv_platform_data *pdata);
18 20
19#endif 21#endif
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index fdd33b44dbd3..545844b7e796 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -164,7 +164,7 @@ static int mxs_phy_probe(struct platform_device *pdev)
164 164
165 mxs_phy->clk = clk; 165 mxs_phy->clk = clk;
166 166
167 platform_set_drvdata(pdev, &mxs_phy->phy); 167 platform_set_drvdata(pdev, mxs_phy);
168 168
169 ret = usb_add_phy_dev(&mxs_phy->phy); 169 ret = usb_add_phy_dev(&mxs_phy->phy);
170 if (ret) 170 if (ret)
diff --git a/drivers/usb/phy/phy-rcar-gen2-usb.c b/drivers/usb/phy/phy-rcar-gen2-usb.c
index a99a6953f11c..db3ab34cddb4 100644
--- a/drivers/usb/phy/phy-rcar-gen2-usb.c
+++ b/drivers/usb/phy/phy-rcar-gen2-usb.c
@@ -107,10 +107,10 @@ static void __rcar_gen2_usb_phy_init(struct rcar_gen2_usb_phy_priv *priv)
107 clk_prepare_enable(priv->clk); 107 clk_prepare_enable(priv->clk);
108 108
109 /* Set USB channels in the USBHS UGCTRL2 register */ 109 /* Set USB channels in the USBHS UGCTRL2 register */
110 val = ioread32(priv->base); 110 val = ioread32(priv->base + USBHS_UGCTRL2_REG);
111 val &= ~(USBHS_UGCTRL2_USB0_HS | USBHS_UGCTRL2_USB2_SS); 111 val &= ~(USBHS_UGCTRL2_USB0_HS | USBHS_UGCTRL2_USB2_SS);
112 val |= priv->ugctrl2; 112 val |= priv->ugctrl2;
113 iowrite32(val, priv->base); 113 iowrite32(val, priv->base + USBHS_UGCTRL2_REG);
114} 114}
115 115
116/* Shutdown USB channels */ 116/* Shutdown USB channels */
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 82232acf1ab6..bbe4f8e6e8d7 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -876,7 +876,7 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
876 876
877 tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start, 877 tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start,
878 resource_size(res)); 878 resource_size(res));
879 if (!tegra_phy->regs) { 879 if (!tegra_phy->pad_regs) {
880 dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n"); 880 dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n");
881 return -ENOMEM; 881 return -ENOMEM;
882 } 882 }
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index 30e8a61552d4..bad57ce77ba5 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -127,7 +127,8 @@ static inline int twl6030_writeb(struct twl6030_usb *twl, u8 module,
127 127
128static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address) 128static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address)
129{ 129{
130 u8 data, ret = 0; 130 u8 data;
131 int ret;
131 132
132 ret = twl_i2c_read_u8(module, &data, address); 133 ret = twl_i2c_read_u8(module, &data, address);
133 if (ret >= 0) 134 if (ret >= 0)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 9ced8937a8f3..fb0d537435eb 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -2123,6 +2123,20 @@ static void ftdi_set_termios(struct tty_struct *tty,
2123 termios->c_cflag |= CRTSCTS; 2123 termios->c_cflag |= CRTSCTS;
2124 } 2124 }
2125 2125
2126 /*
2127 * All FTDI UART chips are limited to CS7/8. We won't pretend to
2128 * support CS5/6 and revert the CSIZE setting instead.
2129 */
2130 if ((C_CSIZE(tty) != CS8) && (C_CSIZE(tty) != CS7)) {
2131 dev_warn(ddev, "requested CSIZE setting not supported\n");
2132
2133 termios->c_cflag &= ~CSIZE;
2134 if (old_termios)
2135 termios->c_cflag |= old_termios->c_cflag & CSIZE;
2136 else
2137 termios->c_cflag |= CS8;
2138 }
2139
2126 cflag = termios->c_cflag; 2140 cflag = termios->c_cflag;
2127 2141
2128 if (!old_termios) 2142 if (!old_termios)
@@ -2159,19 +2173,16 @@ no_skip:
2159 } else { 2173 } else {
2160 urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE; 2174 urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE;
2161 } 2175 }
2162 if (cflag & CSIZE) { 2176 switch (cflag & CSIZE) {
2163 switch (cflag & CSIZE) { 2177 case CS7:
2164 case CS7: 2178 urb_value |= 7;
2165 urb_value |= 7; 2179 dev_dbg(ddev, "Setting CS7\n");
2166 dev_dbg(ddev, "Setting CS7\n"); 2180 break;
2167 break; 2181 default:
2168 case CS8: 2182 case CS8:
2169 urb_value |= 8; 2183 urb_value |= 8;
2170 dev_dbg(ddev, "Setting CS8\n"); 2184 dev_dbg(ddev, "Setting CS8\n");
2171 break; 2185 break;
2172 default:
2173 dev_err(ddev, "CSIZE was set but not CS7-CS8\n");
2174 }
2175 } 2186 }
2176 2187
2177 /* This is needed by the break command since it uses the same command 2188 /* This is needed by the break command since it uses the same command
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 2b01ec8651c2..b63ce023f96f 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -173,16 +173,8 @@ retry:
173 clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); 173 clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
174 return result; 174 return result;
175 } 175 }
176 /*
177 * Try sending off another urb, unless called from completion handler
178 * (in which case there will be no free urb or no data).
179 */
180 if (mem_flags != GFP_ATOMIC)
181 goto retry;
182 176
183 clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); 177 goto retry; /* try sending off another urb */
184
185 return 0;
186} 178}
187EXPORT_SYMBOL_GPL(usb_serial_generic_write_start); 179EXPORT_SYMBOL_GPL(usb_serial_generic_write_start);
188 180
@@ -208,7 +200,7 @@ int usb_serial_generic_write(struct tty_struct *tty,
208 return 0; 200 return 0;
209 201
210 count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock); 202 count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock);
211 result = usb_serial_generic_write_start(port, GFP_KERNEL); 203 result = usb_serial_generic_write_start(port, GFP_ATOMIC);
212 if (result) 204 if (result)
213 return result; 205 return result;
214 206
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index e5bdd987b9e8..a69da83604c0 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1813,25 +1813,25 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
1813 iflag = tty->termios.c_iflag; 1813 iflag = tty->termios.c_iflag;
1814 1814
1815 /* Change the number of bits */ 1815 /* Change the number of bits */
1816 if (cflag & CSIZE) { 1816 switch (cflag & CSIZE) {
1817 switch (cflag & CSIZE) { 1817 case CS5:
1818 case CS5: 1818 lData = LCR_BITS_5;
1819 lData = LCR_BITS_5; 1819 break;
1820 break;
1821 1820
1822 case CS6: 1821 case CS6:
1823 lData = LCR_BITS_6; 1822 lData = LCR_BITS_6;
1824 break; 1823 break;
1825 1824
1826 case CS7: 1825 case CS7:
1827 lData = LCR_BITS_7; 1826 lData = LCR_BITS_7;
1828 break; 1827 break;
1829 default: 1828
1830 case CS8: 1829 default:
1831 lData = LCR_BITS_8; 1830 case CS8:
1832 break; 1831 lData = LCR_BITS_8;
1833 } 1832 break;
1834 } 1833 }
1834
1835 /* Change the Parity bit */ 1835 /* Change the Parity bit */
1836 if (cflag & PARENB) { 1836 if (cflag & PARENB) {
1837 if (cflag & PARODD) { 1837 if (cflag & PARODD) {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index c3d94853b4ab..cc7a24154490 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -85,6 +85,7 @@ static void option_instat_callback(struct urb *urb);
85#define HUAWEI_PRODUCT_K4505 0x1464 85#define HUAWEI_PRODUCT_K4505 0x1464
86#define HUAWEI_PRODUCT_K3765 0x1465 86#define HUAWEI_PRODUCT_K3765 0x1465
87#define HUAWEI_PRODUCT_K4605 0x14C6 87#define HUAWEI_PRODUCT_K4605 0x14C6
88#define HUAWEI_PRODUCT_E173S6 0x1C07
88 89
89#define QUANTA_VENDOR_ID 0x0408 90#define QUANTA_VENDOR_ID 0x0408
90#define QUANTA_PRODUCT_Q101 0xEA02 91#define QUANTA_PRODUCT_Q101 0xEA02
@@ -250,6 +251,7 @@ static void option_instat_callback(struct urb *urb);
250#define ZTE_PRODUCT_MF628 0x0015 251#define ZTE_PRODUCT_MF628 0x0015
251#define ZTE_PRODUCT_MF626 0x0031 252#define ZTE_PRODUCT_MF626 0x0031
252#define ZTE_PRODUCT_MC2718 0xffe8 253#define ZTE_PRODUCT_MC2718 0xffe8
254#define ZTE_PRODUCT_AC2726 0xfff1
253 255
254#define BENQ_VENDOR_ID 0x04a5 256#define BENQ_VENDOR_ID 0x04a5
255#define BENQ_PRODUCT_H10 0x4068 257#define BENQ_PRODUCT_H10 0x4068
@@ -572,6 +574,8 @@ static const struct usb_device_id option_ids[] = {
572 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, 574 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
573 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), 575 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
574 .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, 576 .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
577 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff),
578 .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
575 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff), 579 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
576 .driver_info = (kernel_ulong_t) &net_intf2_blacklist }, 580 .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
577 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, 581 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
@@ -634,6 +638,10 @@ static const struct usb_device_id option_ids[] = {
634 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) }, 638 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) },
635 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) }, 639 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) },
636 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) }, 640 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) },
641 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x72) },
642 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x73) },
643 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x74) },
644 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x75) },
637 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) }, 645 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) },
638 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) }, 646 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) },
639 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) }, 647 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) },
@@ -688,6 +696,10 @@ static const struct usb_device_id option_ids[] = {
688 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) }, 696 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) },
689 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) }, 697 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) },
690 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) }, 698 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) },
699 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x72) },
700 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x73) },
701 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x74) },
702 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x75) },
691 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) }, 703 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) },
692 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) }, 704 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) },
693 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) }, 705 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
@@ -742,6 +754,10 @@ static const struct usb_device_id option_ids[] = {
742 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) }, 754 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
743 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) }, 755 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
744 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) }, 756 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
757 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x72) },
758 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x73) },
759 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x74) },
760 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x75) },
745 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) }, 761 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
746 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) }, 762 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
747 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) }, 763 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
@@ -796,6 +812,10 @@ static const struct usb_device_id option_ids[] = {
796 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) }, 812 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
797 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) }, 813 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
798 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) }, 814 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
815 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x72) },
816 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x73) },
817 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x74) },
818 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x75) },
799 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) }, 819 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
800 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) }, 820 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
801 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) }, 821 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
@@ -850,6 +870,10 @@ static const struct usb_device_id option_ids[] = {
850 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) }, 870 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
851 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) }, 871 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
852 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) }, 872 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
873 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x72) },
874 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x73) },
875 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x74) },
876 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x75) },
853 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) }, 877 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
854 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) }, 878 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
855 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) }, 879 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
@@ -904,6 +928,10 @@ static const struct usb_device_id option_ids[] = {
904 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) }, 928 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
905 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) }, 929 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
906 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) }, 930 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
931 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x72) },
932 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x73) },
933 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x74) },
934 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x75) },
907 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) }, 935 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
908 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) }, 936 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
909 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) }, 937 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
@@ -1426,6 +1454,7 @@ static const struct usb_device_id option_ids[] = {
1426 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, 1454 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
1427 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, 1455 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
1428 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, 1456 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
1457 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
1429 1458
1430 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, 1459 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
1431 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, 1460 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 1e6de4cd079d..1e3318dfa1cb 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -361,23 +361,21 @@ static void pl2303_set_termios(struct tty_struct *tty,
361 0, 0, buf, 7, 100); 361 0, 0, buf, 7, 100);
362 dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf); 362 dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf);
363 363
364 if (C_CSIZE(tty)) { 364 switch (C_CSIZE(tty)) {
365 switch (C_CSIZE(tty)) { 365 case CS5:
366 case CS5: 366 buf[6] = 5;
367 buf[6] = 5; 367 break;
368 break; 368 case CS6:
369 case CS6: 369 buf[6] = 6;
370 buf[6] = 6; 370 break;
371 break; 371 case CS7:
372 case CS7: 372 buf[6] = 7;
373 buf[6] = 7; 373 break;
374 break; 374 default:
375 default: 375 case CS8:
376 case CS8: 376 buf[6] = 8;
377 buf[6] = 8;
378 }
379 dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
380 } 377 }
378 dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
381 379
382 /* For reference buf[0]:buf[3] baud rate value */ 380 /* For reference buf[0]:buf[3] baud rate value */
383 pl2303_encode_baudrate(tty, port, &buf[0]); 381 pl2303_encode_baudrate(tty, port, &buf[0]);
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 4abac28b5992..5b793c352267 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -348,22 +348,20 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
348 } 348 }
349 349
350 /* Set Data Length : 00:5bit, 01:6bit, 10:7bit, 11:8bit */ 350 /* Set Data Length : 00:5bit, 01:6bit, 10:7bit, 11:8bit */
351 if (cflag & CSIZE) { 351 switch (cflag & CSIZE) {
352 switch (cflag & CSIZE) { 352 case CS5:
353 case CS5: 353 buf[1] |= SET_UART_FORMAT_SIZE_5;
354 buf[1] |= SET_UART_FORMAT_SIZE_5; 354 break;
355 break; 355 case CS6:
356 case CS6: 356 buf[1] |= SET_UART_FORMAT_SIZE_6;
357 buf[1] |= SET_UART_FORMAT_SIZE_6; 357 break;
358 break; 358 case CS7:
359 case CS7: 359 buf[1] |= SET_UART_FORMAT_SIZE_7;
360 buf[1] |= SET_UART_FORMAT_SIZE_7; 360 break;
361 break; 361 default:
362 default: 362 case CS8:
363 case CS8: 363 buf[1] |= SET_UART_FORMAT_SIZE_8;
364 buf[1] |= SET_UART_FORMAT_SIZE_8; 364 break;
365 break;
366 }
367 } 365 }
368 366
369 /* Set Stop bit2 : 0:1bit 1:2bit */ 367 /* Set Stop bit2 : 0:1bit 1:2bit */
diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c
index fca4c752a4ed..eae2c873b39f 100644
--- a/drivers/usb/serial/zte_ev.c
+++ b/drivers/usb/serial/zte_ev.c
@@ -281,8 +281,7 @@ static const struct usb_device_id id_table[] = {
281 { USB_DEVICE(0x19d2, 0xfffd) }, 281 { USB_DEVICE(0x19d2, 0xfffd) },
282 { USB_DEVICE(0x19d2, 0xfffc) }, 282 { USB_DEVICE(0x19d2, 0xfffc) },
283 { USB_DEVICE(0x19d2, 0xfffb) }, 283 { USB_DEVICE(0x19d2, 0xfffb) },
284 /* AC2726, AC8710_V3 */ 284 /* AC8710_V3 */
285 { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) },
286 { USB_DEVICE(0x19d2, 0xfff6) }, 285 { USB_DEVICE(0x19d2, 0xfff6) },
287 { USB_DEVICE(0x19d2, 0xfff7) }, 286 { USB_DEVICE(0x19d2, 0xfff7) },
288 { USB_DEVICE(0x19d2, 0xfff8) }, 287 { USB_DEVICE(0x19d2, 0xfff8) },
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index e538b72c4e3a..f14e7929ba22 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -97,18 +97,12 @@ static void wusbhc_devconnect_acked_work(struct work_struct *work);
97 97
98static void wusb_dev_free(struct wusb_dev *wusb_dev) 98static void wusb_dev_free(struct wusb_dev *wusb_dev)
99{ 99{
100 if (wusb_dev) { 100 kfree(wusb_dev);
101 kfree(wusb_dev->set_gtk_req);
102 usb_free_urb(wusb_dev->set_gtk_urb);
103 kfree(wusb_dev);
104 }
105} 101}
106 102
107static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc) 103static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)
108{ 104{
109 struct wusb_dev *wusb_dev; 105 struct wusb_dev *wusb_dev;
110 struct urb *urb;
111 struct usb_ctrlrequest *req;
112 106
113 wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL); 107 wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL);
114 if (wusb_dev == NULL) 108 if (wusb_dev == NULL)
@@ -118,22 +112,6 @@ static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)
118 112
119 INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work); 113 INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work);
120 114
121 urb = usb_alloc_urb(0, GFP_KERNEL);
122 if (urb == NULL)
123 goto err;
124 wusb_dev->set_gtk_urb = urb;
125
126 req = kmalloc(sizeof(*req), GFP_KERNEL);
127 if (req == NULL)
128 goto err;
129 wusb_dev->set_gtk_req = req;
130
131 req->bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE;
132 req->bRequest = USB_REQ_SET_DESCRIPTOR;
133 req->wValue = cpu_to_le16(USB_DT_KEY << 8 | wusbhc->gtk_index);
134 req->wIndex = 0;
135 req->wLength = cpu_to_le16(wusbhc->gtk.descr.bLength);
136
137 return wusb_dev; 115 return wusb_dev;
138err: 116err:
139 wusb_dev_free(wusb_dev); 117 wusb_dev_free(wusb_dev);
@@ -411,9 +389,6 @@ static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc,
411/* 389/*
412 * Refresh the list of keep alives to emit in the MMC 390 * Refresh the list of keep alives to emit in the MMC
413 * 391 *
414 * Some devices don't respond to keep alives unless they've been
415 * authenticated, so skip unauthenticated devices.
416 *
417 * We only publish the first four devices that have a coming timeout 392 * We only publish the first four devices that have a coming timeout
418 * condition. Then when we are done processing those, we go for the 393 * condition. Then when we are done processing those, we go for the
419 * next ones. We ignore the ones that have timed out already (they'll 394 * next ones. We ignore the ones that have timed out already (they'll
@@ -448,7 +423,7 @@ static void __wusbhc_keep_alive(struct wusbhc *wusbhc)
448 423
449 if (wusb_dev == NULL) 424 if (wusb_dev == NULL)
450 continue; 425 continue;
451 if (wusb_dev->usb_dev == NULL || !wusb_dev->usb_dev->authenticated) 426 if (wusb_dev->usb_dev == NULL)
452 continue; 427 continue;
453 428
454 if (time_after(jiffies, wusb_dev->entry_ts + tt)) { 429 if (time_after(jiffies, wusb_dev->entry_ts + tt)) {
@@ -524,11 +499,19 @@ static struct wusb_dev *wusbhc_find_dev_by_addr(struct wusbhc *wusbhc, u8 addr)
524 * 499 *
525 * @wusbhc shall be referenced and unlocked 500 * @wusbhc shall be referenced and unlocked
526 */ 501 */
527static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) 502static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, u8 srcaddr)
528{ 503{
504 struct wusb_dev *wusb_dev;
505
529 mutex_lock(&wusbhc->mutex); 506 mutex_lock(&wusbhc->mutex);
530 wusb_dev->entry_ts = jiffies; 507 wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
531 __wusbhc_keep_alive(wusbhc); 508 if (wusb_dev == NULL) {
509 dev_dbg(wusbhc->dev, "ignoring DN_Alive from unconnected device %02x\n",
510 srcaddr);
511 } else {
512 wusb_dev->entry_ts = jiffies;
513 __wusbhc_keep_alive(wusbhc);
514 }
532 mutex_unlock(&wusbhc->mutex); 515 mutex_unlock(&wusbhc->mutex);
533} 516}
534 517
@@ -582,14 +565,22 @@ static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc,
582 * 565 *
583 * @wusbhc shall be referenced and unlocked 566 * @wusbhc shall be referenced and unlocked
584 */ 567 */
585static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) 568static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, u8 srcaddr)
586{ 569{
587 struct device *dev = wusbhc->dev; 570 struct device *dev = wusbhc->dev;
588 571 struct wusb_dev *wusb_dev;
589 dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n", wusb_dev->addr);
590 572
591 mutex_lock(&wusbhc->mutex); 573 mutex_lock(&wusbhc->mutex);
592 __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, wusb_dev->port_idx)); 574 wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
575 if (wusb_dev == NULL) {
576 dev_dbg(dev, "ignoring DN DISCONNECT from unconnected device %02x\n",
577 srcaddr);
578 } else {
579 dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n",
580 wusb_dev->addr);
581 __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc,
582 wusb_dev->port_idx));
583 }
593 mutex_unlock(&wusbhc->mutex); 584 mutex_unlock(&wusbhc->mutex);
594} 585}
595 586
@@ -611,30 +602,21 @@ void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr,
611 struct wusb_dn_hdr *dn_hdr, size_t size) 602 struct wusb_dn_hdr *dn_hdr, size_t size)
612{ 603{
613 struct device *dev = wusbhc->dev; 604 struct device *dev = wusbhc->dev;
614 struct wusb_dev *wusb_dev;
615 605
616 if (size < sizeof(struct wusb_dn_hdr)) { 606 if (size < sizeof(struct wusb_dn_hdr)) {
617 dev_err(dev, "DN data shorter than DN header (%d < %d)\n", 607 dev_err(dev, "DN data shorter than DN header (%d < %d)\n",
618 (int)size, (int)sizeof(struct wusb_dn_hdr)); 608 (int)size, (int)sizeof(struct wusb_dn_hdr));
619 return; 609 return;
620 } 610 }
621
622 wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
623 if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) {
624 dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n",
625 dn_hdr->bType, srcaddr);
626 return;
627 }
628
629 switch (dn_hdr->bType) { 611 switch (dn_hdr->bType) {
630 case WUSB_DN_CONNECT: 612 case WUSB_DN_CONNECT:
631 wusbhc_handle_dn_connect(wusbhc, dn_hdr, size); 613 wusbhc_handle_dn_connect(wusbhc, dn_hdr, size);
632 break; 614 break;
633 case WUSB_DN_ALIVE: 615 case WUSB_DN_ALIVE:
634 wusbhc_handle_dn_alive(wusbhc, wusb_dev); 616 wusbhc_handle_dn_alive(wusbhc, srcaddr);
635 break; 617 break;
636 case WUSB_DN_DISCONNECT: 618 case WUSB_DN_DISCONNECT:
637 wusbhc_handle_dn_disconnect(wusbhc, wusb_dev); 619 wusbhc_handle_dn_disconnect(wusbhc, srcaddr);
638 break; 620 break;
639 case WUSB_DN_MASAVAILCHANGED: 621 case WUSB_DN_MASAVAILCHANGED:
640 case WUSB_DN_RWAKE: 622 case WUSB_DN_RWAKE:
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index dd88441c8f78..4c40d0dbf53d 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -29,19 +29,16 @@
29#include <linux/export.h> 29#include <linux/export.h>
30#include "wusbhc.h" 30#include "wusbhc.h"
31 31
32static void wusbhc_set_gtk_callback(struct urb *urb); 32static void wusbhc_gtk_rekey_work(struct work_struct *work);
33static void wusbhc_gtk_rekey_done_work(struct work_struct *work);
34 33
35int wusbhc_sec_create(struct wusbhc *wusbhc) 34int wusbhc_sec_create(struct wusbhc *wusbhc)
36{ 35{
37 wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + sizeof(wusbhc->gtk.data); 36 wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + sizeof(wusbhc->gtk.data);
38 wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY; 37 wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY;
39 wusbhc->gtk.descr.bReserved = 0; 38 wusbhc->gtk.descr.bReserved = 0;
39 wusbhc->gtk_index = 0;
40 40
41 wusbhc->gtk_index = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK, 41 INIT_WORK(&wusbhc->gtk_rekey_work, wusbhc_gtk_rekey_work);
42 WUSB_KEY_INDEX_ORIGINATOR_HOST);
43
44 INIT_WORK(&wusbhc->gtk_rekey_done_work, wusbhc_gtk_rekey_done_work);
45 42
46 return 0; 43 return 0;
47} 44}
@@ -113,7 +110,7 @@ int wusbhc_sec_start(struct wusbhc *wusbhc)
113 wusbhc_generate_gtk(wusbhc); 110 wusbhc_generate_gtk(wusbhc);
114 111
115 result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, 112 result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid,
116 &wusbhc->gtk.descr.bKeyData, key_size); 113 &wusbhc->gtk.descr.bKeyData, key_size);
117 if (result < 0) 114 if (result < 0)
118 dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n", 115 dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n",
119 result); 116 result);
@@ -129,7 +126,7 @@ int wusbhc_sec_start(struct wusbhc *wusbhc)
129 */ 126 */
130void wusbhc_sec_stop(struct wusbhc *wusbhc) 127void wusbhc_sec_stop(struct wusbhc *wusbhc)
131{ 128{
132 cancel_work_sync(&wusbhc->gtk_rekey_done_work); 129 cancel_work_sync(&wusbhc->gtk_rekey_work);
133} 130}
134 131
135 132
@@ -185,12 +182,14 @@ static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value)
185static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) 182static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
186{ 183{
187 struct usb_device *usb_dev = wusb_dev->usb_dev; 184 struct usb_device *usb_dev = wusb_dev->usb_dev;
185 u8 key_index = wusb_key_index(wusbhc->gtk_index,
186 WUSB_KEY_INDEX_TYPE_GTK, WUSB_KEY_INDEX_ORIGINATOR_HOST);
188 187
189 return usb_control_msg( 188 return usb_control_msg(
190 usb_dev, usb_sndctrlpipe(usb_dev, 0), 189 usb_dev, usb_sndctrlpipe(usb_dev, 0),
191 USB_REQ_SET_DESCRIPTOR, 190 USB_REQ_SET_DESCRIPTOR,
192 USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, 191 USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
193 USB_DT_KEY << 8 | wusbhc->gtk_index, 0, 192 USB_DT_KEY << 8 | key_index, 0,
194 &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength, 193 &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
195 1000); 194 1000);
196} 195}
@@ -520,24 +519,55 @@ error_kzalloc:
520 * Once all connected and authenticated devices have received the new 519 * Once all connected and authenticated devices have received the new
521 * GTK, switch the host to using it. 520 * GTK, switch the host to using it.
522 */ 521 */
523static void wusbhc_gtk_rekey_done_work(struct work_struct *work) 522static void wusbhc_gtk_rekey_work(struct work_struct *work)
524{ 523{
525 struct wusbhc *wusbhc = container_of(work, struct wusbhc, gtk_rekey_done_work); 524 struct wusbhc *wusbhc = container_of(work,
525 struct wusbhc, gtk_rekey_work);
526 size_t key_size = sizeof(wusbhc->gtk.data); 526 size_t key_size = sizeof(wusbhc->gtk.data);
527 int port_idx;
528 struct wusb_dev *wusb_dev, *wusb_dev_next;
529 LIST_HEAD(rekey_list);
527 530
528 mutex_lock(&wusbhc->mutex); 531 mutex_lock(&wusbhc->mutex);
532 /* generate the new key */
533 wusbhc_generate_gtk(wusbhc);
534 /* roll the gtk index. */
535 wusbhc->gtk_index = (wusbhc->gtk_index + 1) % (WUSB_KEY_INDEX_MAX + 1);
536 /*
537 * Save all connected devices on a list while holding wusbhc->mutex and
538 * take a reference to each one. Then submit the set key request to
539 * them after releasing the lock in order to avoid a deadlock.
540 */
541 for (port_idx = 0; port_idx < wusbhc->ports_max; port_idx++) {
542 wusb_dev = wusbhc->port[port_idx].wusb_dev;
543 if (!wusb_dev || !wusb_dev->usb_dev
544 || !wusb_dev->usb_dev->authenticated)
545 continue;
529 546
530 if (--wusbhc->pending_set_gtks == 0) 547 wusb_dev_get(wusb_dev);
531 wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size); 548 list_add_tail(&wusb_dev->rekey_node, &rekey_list);
532 549 }
533 mutex_unlock(&wusbhc->mutex); 550 mutex_unlock(&wusbhc->mutex);
534}
535 551
536static void wusbhc_set_gtk_callback(struct urb *urb) 552 /* Submit the rekey requests without holding wusbhc->mutex. */
537{ 553 list_for_each_entry_safe(wusb_dev, wusb_dev_next, &rekey_list,
538 struct wusbhc *wusbhc = urb->context; 554 rekey_node) {
555 list_del_init(&wusb_dev->rekey_node);
556 dev_dbg(&wusb_dev->usb_dev->dev, "%s: rekey device at port %d\n",
557 __func__, wusb_dev->port_idx);
558
559 if (wusb_dev_set_gtk(wusbhc, wusb_dev) < 0) {
560 dev_err(&wusb_dev->usb_dev->dev, "%s: rekey device at port %d failed\n",
561 __func__, wusb_dev->port_idx);
562 }
563 wusb_dev_put(wusb_dev);
564 }
539 565
540 queue_work(wusbd, &wusbhc->gtk_rekey_done_work); 566 /* Switch the host controller to use the new GTK. */
567 mutex_lock(&wusbhc->mutex);
568 wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid,
569 &wusbhc->gtk.descr.bKeyData, key_size);
570 mutex_unlock(&wusbhc->mutex);
541} 571}
542 572
543/** 573/**
@@ -553,26 +583,12 @@ static void wusbhc_set_gtk_callback(struct urb *urb)
553 */ 583 */
554void wusbhc_gtk_rekey(struct wusbhc *wusbhc) 584void wusbhc_gtk_rekey(struct wusbhc *wusbhc)
555{ 585{
556 static const size_t key_size = sizeof(wusbhc->gtk.data); 586 /*
557 int p; 587 * We need to submit a URB to the downstream WUSB devices in order to
558 588 * change the group key. This can't be done while holding the
559 wusbhc_generate_gtk(wusbhc); 589 * wusbhc->mutex since that is also taken in the urb_enqueue routine
560 590 * and will cause a deadlock. Instead, queue a work item to do
561 for (p = 0; p < wusbhc->ports_max; p++) { 591 * it when the lock is not held
562 struct wusb_dev *wusb_dev; 592 */
563 593 queue_work(wusbd, &wusbhc->gtk_rekey_work);
564 wusb_dev = wusbhc->port[p].wusb_dev;
565 if (!wusb_dev || !wusb_dev->usb_dev || !wusb_dev->usb_dev->authenticated)
566 continue;
567
568 usb_fill_control_urb(wusb_dev->set_gtk_urb, wusb_dev->usb_dev,
569 usb_sndctrlpipe(wusb_dev->usb_dev, 0),
570 (void *)wusb_dev->set_gtk_req,
571 &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
572 wusbhc_set_gtk_callback, wusbhc);
573 if (usb_submit_urb(wusb_dev->set_gtk_urb, GFP_KERNEL) == 0)
574 wusbhc->pending_set_gtks++;
575 }
576 if (wusbhc->pending_set_gtks == 0)
577 wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size);
578} 594}
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h
index 711b1952b114..6bd3b819a6b5 100644
--- a/drivers/usb/wusbcore/wusbhc.h
+++ b/drivers/usb/wusbcore/wusbhc.h
@@ -97,6 +97,7 @@ struct wusb_dev {
97 struct kref refcnt; 97 struct kref refcnt;
98 struct wusbhc *wusbhc; 98 struct wusbhc *wusbhc;
99 struct list_head cack_node; /* Connect-Ack list */ 99 struct list_head cack_node; /* Connect-Ack list */
100 struct list_head rekey_node; /* GTK rekey list */
100 u8 port_idx; 101 u8 port_idx;
101 u8 addr; 102 u8 addr;
102 u8 beacon_type:4; 103 u8 beacon_type:4;
@@ -107,8 +108,6 @@ struct wusb_dev {
107 struct usb_wireless_cap_descriptor *wusb_cap_descr; 108 struct usb_wireless_cap_descriptor *wusb_cap_descr;
108 struct uwb_mas_bm availability; 109 struct uwb_mas_bm availability;
109 struct work_struct devconnect_acked_work; 110 struct work_struct devconnect_acked_work;
110 struct urb *set_gtk_urb;
111 struct usb_ctrlrequest *set_gtk_req;
112 struct usb_device *usb_dev; 111 struct usb_device *usb_dev;
113}; 112};
114 113
@@ -296,8 +295,7 @@ struct wusbhc {
296 } __attribute__((packed)) gtk; 295 } __attribute__((packed)) gtk;
297 u8 gtk_index; 296 u8 gtk_index;
298 u32 gtk_tkid; 297 u32 gtk_tkid;
299 struct work_struct gtk_rekey_done_work; 298 struct work_struct gtk_rekey_work;
300 int pending_set_gtks;
301 299
302 struct usb_encryption_descriptor *ccm1_etd; 300 struct usb_encryption_descriptor *ccm1_etd;
303}; 301};
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 8521051cf946..cd961622f9c1 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -131,6 +131,7 @@ static const struct platform_device_id atmel_lcdfb_devtypes[] = {
131 /* terminator */ 131 /* terminator */
132 } 132 }
133}; 133};
134MODULE_DEVICE_TABLE(platform, atmel_lcdfb_devtypes);
134 135
135static struct atmel_lcdfb_config * 136static struct atmel_lcdfb_config *
136atmel_lcdfb_get_config(struct platform_device *pdev) 137atmel_lcdfb_get_config(struct platform_device *pdev)
diff --git a/drivers/video/kyro/fbdev.c b/drivers/video/kyro/fbdev.c
index 50c857477e4f..65041e15fd59 100644
--- a/drivers/video/kyro/fbdev.c
+++ b/drivers/video/kyro/fbdev.c
@@ -624,15 +624,15 @@ static int kyrofb_ioctl(struct fb_info *info,
624 return -EINVAL; 624 return -EINVAL;
625 } 625 }
626 case KYRO_IOCTL_UVSTRIDE: 626 case KYRO_IOCTL_UVSTRIDE:
627 if (copy_to_user(argp, &deviceInfo.ulOverlayUVStride, sizeof(unsigned long))) 627 if (copy_to_user(argp, &deviceInfo.ulOverlayUVStride, sizeof(deviceInfo.ulOverlayUVStride)))
628 return -EFAULT; 628 return -EFAULT;
629 break; 629 break;
630 case KYRO_IOCTL_STRIDE: 630 case KYRO_IOCTL_STRIDE:
631 if (copy_to_user(argp, &deviceInfo.ulOverlayStride, sizeof(unsigned long))) 631 if (copy_to_user(argp, &deviceInfo.ulOverlayStride, sizeof(deviceInfo.ulOverlayStride)))
632 return -EFAULT; 632 return -EFAULT;
633 break; 633 break;
634 case KYRO_IOCTL_OVERLAY_OFFSET: 634 case KYRO_IOCTL_OVERLAY_OFFSET:
635 if (copy_to_user(argp, &deviceInfo.ulOverlayOffset, sizeof(unsigned long))) 635 if (copy_to_user(argp, &deviceInfo.ulOverlayOffset, sizeof(deviceInfo.ulOverlayOffset)))
636 return -EFAULT; 636 return -EFAULT;
637 break; 637 break;
638 } 638 }
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 9dbea2223401..7d44d669d5b6 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -91,6 +91,15 @@ extern boot_infos_t *boot_infos;
91#define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4 91#define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4
92#define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8 92#define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8
93 93
94#define FB_RIGHT_POS(p, bpp) (fb_be_math(p) ? 0 : (32 - (bpp)))
95
96static inline u32 offb_cmap_byteswap(struct fb_info *info, u32 value)
97{
98 u32 bpp = info->var.bits_per_pixel;
99
100 return cpu_to_be32(value) >> FB_RIGHT_POS(info, bpp);
101}
102
94 /* 103 /*
95 * Set a single color register. The values supplied are already 104 * Set a single color register. The values supplied are already
96 * rounded down to the hardware's capabilities (according to the 105 * rounded down to the hardware's capabilities (according to the
@@ -120,7 +129,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
120 mask <<= info->var.transp.offset; 129 mask <<= info->var.transp.offset;
121 value |= mask; 130 value |= mask;
122 } 131 }
123 pal[regno] = value; 132 pal[regno] = offb_cmap_byteswap(info, value);
124 return 0; 133 return 0;
125 } 134 }
126 135
@@ -301,7 +310,7 @@ static struct fb_ops offb_ops = {
301static void __iomem *offb_map_reg(struct device_node *np, int index, 310static void __iomem *offb_map_reg(struct device_node *np, int index,
302 unsigned long offset, unsigned long size) 311 unsigned long offset, unsigned long size)
303{ 312{
304 const u32 *addrp; 313 const __be32 *addrp;
305 u64 asize, taddr; 314 u64 asize, taddr;
306 unsigned int flags; 315 unsigned int flags;
307 316
@@ -369,7 +378,11 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
369 } 378 }
370 of_node_put(pciparent); 379 of_node_put(pciparent);
371 } else if (dp && of_device_is_compatible(dp, "qemu,std-vga")) { 380 } else if (dp && of_device_is_compatible(dp, "qemu,std-vga")) {
372 const u32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 }; 381#ifdef __BIG_ENDIAN
382 const __be32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 };
383#else
384 const __be32 io_of_addr[3] = { 0x00000001, 0x0, 0x0 };
385#endif
373 u64 io_addr = of_translate_address(dp, io_of_addr); 386 u64 io_addr = of_translate_address(dp, io_of_addr);
374 if (io_addr != OF_BAD_ADDR) { 387 if (io_addr != OF_BAD_ADDR) {
375 par->cmap_adr = ioremap(io_addr + 0x3c8, 2); 388 par->cmap_adr = ioremap(io_addr + 0x3c8, 2);
@@ -535,7 +548,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
535 unsigned int flags, rsize, addr_prop = 0; 548 unsigned int flags, rsize, addr_prop = 0;
536 unsigned long max_size = 0; 549 unsigned long max_size = 0;
537 u64 rstart, address = OF_BAD_ADDR; 550 u64 rstart, address = OF_BAD_ADDR;
538 const u32 *pp, *addrp, *up; 551 const __be32 *pp, *addrp, *up;
539 u64 asize; 552 u64 asize;
540 int foreign_endian = 0; 553 int foreign_endian = 0;
541 554
@@ -551,25 +564,25 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
551 if (pp == NULL) 564 if (pp == NULL)
552 pp = of_get_property(dp, "depth", &len); 565 pp = of_get_property(dp, "depth", &len);
553 if (pp && len == sizeof(u32)) 566 if (pp && len == sizeof(u32))
554 depth = *pp; 567 depth = be32_to_cpup(pp);
555 568
556 pp = of_get_property(dp, "linux,bootx-width", &len); 569 pp = of_get_property(dp, "linux,bootx-width", &len);
557 if (pp == NULL) 570 if (pp == NULL)
558 pp = of_get_property(dp, "width", &len); 571 pp = of_get_property(dp, "width", &len);
559 if (pp && len == sizeof(u32)) 572 if (pp && len == sizeof(u32))
560 width = *pp; 573 width = be32_to_cpup(pp);
561 574
562 pp = of_get_property(dp, "linux,bootx-height", &len); 575 pp = of_get_property(dp, "linux,bootx-height", &len);
563 if (pp == NULL) 576 if (pp == NULL)
564 pp = of_get_property(dp, "height", &len); 577 pp = of_get_property(dp, "height", &len);
565 if (pp && len == sizeof(u32)) 578 if (pp && len == sizeof(u32))
566 height = *pp; 579 height = be32_to_cpup(pp);
567 580
568 pp = of_get_property(dp, "linux,bootx-linebytes", &len); 581 pp = of_get_property(dp, "linux,bootx-linebytes", &len);
569 if (pp == NULL) 582 if (pp == NULL)
570 pp = of_get_property(dp, "linebytes", &len); 583 pp = of_get_property(dp, "linebytes", &len);
571 if (pp && len == sizeof(u32) && (*pp != 0xffffffffu)) 584 if (pp && len == sizeof(u32) && (*pp != 0xffffffffu))
572 pitch = *pp; 585 pitch = be32_to_cpup(pp);
573 else 586 else
574 pitch = width * ((depth + 7) / 8); 587 pitch = width * ((depth + 7) / 8);
575 588
diff --git a/drivers/video/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
index e6d56f714ae4..d94f35dbd536 100644
--- a/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
+++ b/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
@@ -526,6 +526,8 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
526 struct omap_dss_device *in = ddata->in; 526 struct omap_dss_device *in = ddata->in;
527 int r; 527 int r;
528 528
529 mutex_lock(&ddata->mutex);
530
529 dev_dbg(&ddata->spi->dev, "%s\n", __func__); 531 dev_dbg(&ddata->spi->dev, "%s\n", __func__);
530 532
531 in->ops.sdi->set_timings(in, &ddata->videomode); 533 in->ops.sdi->set_timings(in, &ddata->videomode);
@@ -614,10 +616,7 @@ static int acx565akm_enable(struct omap_dss_device *dssdev)
614 if (omapdss_device_is_enabled(dssdev)) 616 if (omapdss_device_is_enabled(dssdev))
615 return 0; 617 return 0;
616 618
617 mutex_lock(&ddata->mutex);
618 r = acx565akm_panel_power_on(dssdev); 619 r = acx565akm_panel_power_on(dssdev);
619 mutex_unlock(&ddata->mutex);
620
621 if (r) 620 if (r)
622 return r; 621 return r;
623 622
diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/sh_mobile_meram.c
index e0f098562a74..a297de5cc859 100644
--- a/drivers/video/sh_mobile_meram.c
+++ b/drivers/video/sh_mobile_meram.c
@@ -569,6 +569,7 @@ EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_update);
569 * Power management 569 * Power management
570 */ 570 */
571 571
572#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_RUNTIME)
572static int sh_mobile_meram_suspend(struct device *dev) 573static int sh_mobile_meram_suspend(struct device *dev)
573{ 574{
574 struct platform_device *pdev = to_platform_device(dev); 575 struct platform_device *pdev = to_platform_device(dev);
@@ -611,6 +612,7 @@ static int sh_mobile_meram_resume(struct device *dev)
611 meram_write_reg(priv->base, common_regs[i], priv->regs[i]); 612 meram_write_reg(priv->base, common_regs[i], priv->regs[i]);
612 return 0; 613 return 0;
613} 614}
615#endif /* CONFIG_PM_SLEEP || CONFIG_PM_RUNTIME */
614 616
615static UNIVERSAL_DEV_PM_OPS(sh_mobile_meram_dev_pm_ops, 617static UNIVERSAL_DEV_PM_OPS(sh_mobile_meram_dev_pm_ops,
616 sh_mobile_meram_suspend, 618 sh_mobile_meram_suspend,
diff --git a/drivers/video/vt8500lcdfb.c b/drivers/video/vt8500lcdfb.c
index b30e5a439d1f..a8f2b280f796 100644
--- a/drivers/video/vt8500lcdfb.c
+++ b/drivers/video/vt8500lcdfb.c
@@ -293,8 +293,7 @@ static int vt8500lcd_probe(struct platform_device *pdev)
293 + sizeof(u32) * 16, GFP_KERNEL); 293 + sizeof(u32) * 16, GFP_KERNEL);
294 if (!fbi) { 294 if (!fbi) {
295 dev_err(&pdev->dev, "Failed to initialize framebuffer device\n"); 295 dev_err(&pdev->dev, "Failed to initialize framebuffer device\n");
296 ret = -ENOMEM; 296 return -ENOMEM;
297 goto failed;
298 } 297 }
299 298
300 strcpy(fbi->fb.fix.id, "VT8500 LCD"); 299 strcpy(fbi->fb.fix.id, "VT8500 LCD");
@@ -327,15 +326,13 @@ static int vt8500lcd_probe(struct platform_device *pdev)
327 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 326 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
328 if (res == NULL) { 327 if (res == NULL) {
329 dev_err(&pdev->dev, "no I/O memory resource defined\n"); 328 dev_err(&pdev->dev, "no I/O memory resource defined\n");
330 ret = -ENODEV; 329 return -ENODEV;
331 goto failed_fbi;
332 } 330 }
333 331
334 res = request_mem_region(res->start, resource_size(res), "vt8500lcd"); 332 res = request_mem_region(res->start, resource_size(res), "vt8500lcd");
335 if (res == NULL) { 333 if (res == NULL) {
336 dev_err(&pdev->dev, "failed to request I/O memory\n"); 334 dev_err(&pdev->dev, "failed to request I/O memory\n");
337 ret = -EBUSY; 335 return -EBUSY;
338 goto failed_fbi;
339 } 336 }
340 337
341 fbi->regbase = ioremap(res->start, resource_size(res)); 338 fbi->regbase = ioremap(res->start, resource_size(res));
@@ -346,17 +343,19 @@ static int vt8500lcd_probe(struct platform_device *pdev)
346 } 343 }
347 344
348 disp_timing = of_get_display_timings(pdev->dev.of_node); 345 disp_timing = of_get_display_timings(pdev->dev.of_node);
349 if (!disp_timing) 346 if (!disp_timing) {
350 return -EINVAL; 347 ret = -EINVAL;
348 goto failed_free_io;
349 }
351 350
352 ret = of_get_fb_videomode(pdev->dev.of_node, &of_mode, 351 ret = of_get_fb_videomode(pdev->dev.of_node, &of_mode,
353 OF_USE_NATIVE_MODE); 352 OF_USE_NATIVE_MODE);
354 if (ret) 353 if (ret)
355 return ret; 354 goto failed_free_io;
356 355
357 ret = of_property_read_u32(pdev->dev.of_node, "bits-per-pixel", &bpp); 356 ret = of_property_read_u32(pdev->dev.of_node, "bits-per-pixel", &bpp);
358 if (ret) 357 if (ret)
359 return ret; 358 goto failed_free_io;
360 359
361 /* try allocating the framebuffer */ 360 /* try allocating the framebuffer */
362 fb_mem_len = of_mode.xres * of_mode.yres * 2 * (bpp / 8); 361 fb_mem_len = of_mode.xres * of_mode.yres * 2 * (bpp / 8);
@@ -364,7 +363,8 @@ static int vt8500lcd_probe(struct platform_device *pdev)
364 GFP_KERNEL); 363 GFP_KERNEL);
365 if (!fb_mem_virt) { 364 if (!fb_mem_virt) {
366 pr_err("%s: Failed to allocate framebuffer\n", __func__); 365 pr_err("%s: Failed to allocate framebuffer\n", __func__);
367 return -ENOMEM; 366 ret = -ENOMEM;
367 goto failed_free_io;
368 } 368 }
369 369
370 fbi->fb.fix.smem_start = fb_mem_phys; 370 fbi->fb.fix.smem_start = fb_mem_phys;
@@ -447,9 +447,6 @@ failed_free_io:
447 iounmap(fbi->regbase); 447 iounmap(fbi->regbase);
448failed_free_res: 448failed_free_res:
449 release_mem_region(res->start, resource_size(res)); 449 release_mem_region(res->start, resource_size(res));
450failed_fbi:
451 kfree(fbi);
452failed:
453 return ret; 450 return ret;
454} 451}
455 452
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index c444654fc33f..5c4a95b516cf 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -285,7 +285,7 @@ static void update_balloon_size(struct virtio_balloon *vb)
285{ 285{
286 __le32 actual = cpu_to_le32(vb->num_pages); 286 __le32 actual = cpu_to_le32(vb->num_pages);
287 287
288 virtio_cwrite(vb->vdev, struct virtio_balloon_config, num_pages, 288 virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
289 &actual); 289 &actual);
290} 290}
291 291
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index a6a2cebb2587..cafa973c43be 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -19,7 +19,6 @@
19#include <linux/watchdog.h> 19#include <linux/watchdog.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/of_address.h> 21#include <linux/of_address.h>
22#include <linux/miscdevice.h>
23 22
24#define PM_RSTC 0x1c 23#define PM_RSTC 0x1c
25#define PM_WDOG 0x24 24#define PM_WDOG 0x24
diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c
index 833e81311848..d1d07f2f69df 100644
--- a/drivers/watchdog/ep93xx_wdt.c
+++ b/drivers/watchdog/ep93xx_wdt.c
@@ -28,7 +28,6 @@
28 28
29#include <linux/platform_device.h> 29#include <linux/platform_device.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/miscdevice.h>
32#include <linux/watchdog.h> 31#include <linux/watchdog.h>
33#include <linux/timer.h> 32#include <linux/timer.h>
34#include <linux/io.h> 33#include <linux/io.h>
diff --git a/drivers/watchdog/ie6xx_wdt.c b/drivers/watchdog/ie6xx_wdt.c
index 70a240297c6d..07f88f54e5c0 100644
--- a/drivers/watchdog/ie6xx_wdt.c
+++ b/drivers/watchdog/ie6xx_wdt.c
@@ -28,7 +28,6 @@
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/watchdog.h> 30#include <linux/watchdog.h>
31#include <linux/miscdevice.h>
32#include <linux/seq_file.h> 31#include <linux/seq_file.h>
33#include <linux/debugfs.h> 32#include <linux/debugfs.h>
34#include <linux/uaccess.h> 33#include <linux/uaccess.h>
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index 2de486a7eea1..3aa50cfa335f 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -17,7 +17,6 @@
17#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/miscdevice.h>
21#include <linux/watchdog.h> 20#include <linux/watchdog.h>
22#include <linux/init.h> 21#include <linux/init.h>
23#include <linux/platform_device.h> 22#include <linux/platform_device.h>
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
index a1a3638c579c..20dc73844737 100644
--- a/drivers/watchdog/kempld_wdt.c
+++ b/drivers/watchdog/kempld_wdt.c
@@ -26,7 +26,6 @@
26 26
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/moduleparam.h> 28#include <linux/moduleparam.h>
29#include <linux/miscdevice.h>
30#include <linux/uaccess.h> 29#include <linux/uaccess.h>
31#include <linux/watchdog.h> 30#include <linux/watchdog.h>
32#include <linux/platform_device.h> 31#include <linux/platform_device.h>
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index 6d4f3998e1f6..bdb3f4a5b27c 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -19,7 +19,6 @@
19#include <linux/moduleparam.h> 19#include <linux/moduleparam.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/miscdevice.h>
23#include <linux/watchdog.h> 22#include <linux/watchdog.h>
24#include <linux/init.h> 23#include <linux/init.h>
25#include <linux/bitops.h> 24#include <linux/bitops.h>
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 44edca66d564..f7722a424676 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -16,7 +16,6 @@
16#include <linux/moduleparam.h> 16#include <linux/moduleparam.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/miscdevice.h>
20#include <linux/platform_device.h> 19#include <linux/platform_device.h>
21#include <linux/watchdog.h> 20#include <linux/watchdog.h>
22#include <linux/init.h> 21#include <linux/init.h>
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 1bdcc313e1d9..5bec20f5dc2d 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -23,7 +23,6 @@
23#include <linux/moduleparam.h> 23#include <linux/moduleparam.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/miscdevice.h>
27#include <linux/watchdog.h> 26#include <linux/watchdog.h>
28#include <linux/init.h> 27#include <linux/init.h>
29#include <linux/platform_device.h> 28#include <linux/platform_device.h>
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index 53d37fea183e..d92c2d5859ce 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -16,7 +16,6 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/watchdog.h> 18#include <linux/watchdog.h>
19#include <linux/miscdevice.h>
20#include <linux/moduleparam.h> 19#include <linux/moduleparam.h>
21#include <linux/platform_device.h> 20#include <linux/platform_device.h>
22 21
diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c
index 3b9fff9dcf65..131193a7acdf 100644
--- a/drivers/watchdog/sc1200wdt.c
+++ b/drivers/watchdog/sc1200wdt.c
@@ -409,8 +409,9 @@ static int __init sc1200wdt_init(void)
409#if defined CONFIG_PNP 409#if defined CONFIG_PNP
410 /* now that the user has specified an IO port and we haven't detected 410 /* now that the user has specified an IO port and we haven't detected
411 * any devices, disable pnp support */ 411 * any devices, disable pnp support */
412 if (isapnp)
413 pnp_unregister_driver(&scl200wdt_pnp_driver);
412 isapnp = 0; 414 isapnp = 0;
413 pnp_unregister_driver(&scl200wdt_pnp_driver);
414#endif 415#endif
415 416
416 if (!request_region(io, io_len, SC1200_MODULE_NAME)) { 417 if (!request_region(io, io_len, SC1200_MODULE_NAME)) {
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index f9b8e06f3558..af3528f84d65 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -26,7 +26,6 @@
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <linux/miscdevice.h>
30#include <linux/watchdog.h> 29#include <linux/watchdog.h>
31#include <linux/pm_runtime.h> 30#include <linux/pm_runtime.h>
32#include <linux/fs.h> 31#include <linux/fs.h>
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index ef2638fee4a8..c04a1aa158e2 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -42,7 +42,6 @@
42#include <linux/moduleparam.h> 42#include <linux/moduleparam.h>
43#include <linux/types.h> 43#include <linux/types.h>
44#include <linux/timer.h> 44#include <linux/timer.h>
45#include <linux/miscdevice.h>
46#include <linux/watchdog.h> 45#include <linux/watchdog.h>
47#include <linux/notifier.h> 46#include <linux/notifier.h>
48#include <linux/reboot.h> 47#include <linux/reboot.h>
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
index d667f6b51d35..bb64ae3f47da 100644
--- a/drivers/watchdog/stmp3xxx_rtc_wdt.c
+++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c
@@ -12,7 +12,6 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/miscdevice.h>
16#include <linux/watchdog.h> 15#include <linux/watchdog.h>
17#include <linux/platform_device.h> 16#include <linux/platform_device.h>
18#include <linux/stmp3xxx_rtc_wdt.h> 17#include <linux/stmp3xxx_rtc_wdt.h>
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c
index 0fd0e8ae62a8..6a447e321dd0 100644
--- a/drivers/watchdog/txx9wdt.c
+++ b/drivers/watchdog/txx9wdt.c
@@ -13,7 +13,6 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/miscdevice.h>
17#include <linux/watchdog.h> 16#include <linux/watchdog.h>
18#include <linux/init.h> 17#include <linux/init.h>
19#include <linux/platform_device.h> 18#include <linux/platform_device.h>
diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c
index e029b5768f2c..5aed9d7ad47e 100644
--- a/drivers/watchdog/ux500_wdt.c
+++ b/drivers/watchdog/ux500_wdt.c
@@ -12,7 +12,6 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/miscdevice.h>
16#include <linux/err.h> 15#include <linux/err.h>
17#include <linux/uaccess.h> 16#include <linux/uaccess.h>
18#include <linux/watchdog.h> 17#include <linux/watchdog.h>
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 55ea73f7c70b..4c02e2b94103 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -350,17 +350,19 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
350 350
351 pfn = page_to_pfn(page); 351 pfn = page_to_pfn(page);
352 352
353 set_phys_to_machine(pfn, frame_list[i]);
354
355#ifdef CONFIG_XEN_HAVE_PVMMU 353#ifdef CONFIG_XEN_HAVE_PVMMU
356 /* Link back into the page tables if not highmem. */ 354 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
357 if (xen_pv_domain() && !PageHighMem(page)) { 355 set_phys_to_machine(pfn, frame_list[i]);
358 int ret; 356
359 ret = HYPERVISOR_update_va_mapping( 357 /* Link back into the page tables if not highmem. */
360 (unsigned long)__va(pfn << PAGE_SHIFT), 358 if (!PageHighMem(page)) {
361 mfn_pte(frame_list[i], PAGE_KERNEL), 359 int ret;
362 0); 360 ret = HYPERVISOR_update_va_mapping(
363 BUG_ON(ret); 361 (unsigned long)__va(pfn << PAGE_SHIFT),
362 mfn_pte(frame_list[i], PAGE_KERNEL),
363 0);
364 BUG_ON(ret);
365 }
364 } 366 }
365#endif 367#endif
366 368
@@ -378,7 +380,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
378 enum bp_state state = BP_DONE; 380 enum bp_state state = BP_DONE;
379 unsigned long pfn, i; 381 unsigned long pfn, i;
380 struct page *page; 382 struct page *page;
381 struct page *scratch_page;
382 int ret; 383 int ret;
383 struct xen_memory_reservation reservation = { 384 struct xen_memory_reservation reservation = {
384 .address_bits = 0, 385 .address_bits = 0,
@@ -411,27 +412,29 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
411 412
412 scrub_page(page); 413 scrub_page(page);
413 414
415#ifdef CONFIG_XEN_HAVE_PVMMU
414 /* 416 /*
415 * Ballooned out frames are effectively replaced with 417 * Ballooned out frames are effectively replaced with
416 * a scratch frame. Ensure direct mappings and the 418 * a scratch frame. Ensure direct mappings and the
417 * p2m are consistent. 419 * p2m are consistent.
418 */ 420 */
419 scratch_page = get_balloon_scratch_page();
420#ifdef CONFIG_XEN_HAVE_PVMMU
421 if (xen_pv_domain() && !PageHighMem(page)) {
422 ret = HYPERVISOR_update_va_mapping(
423 (unsigned long)__va(pfn << PAGE_SHIFT),
424 pfn_pte(page_to_pfn(scratch_page),
425 PAGE_KERNEL_RO), 0);
426 BUG_ON(ret);
427 }
428#endif
429 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 421 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
430 unsigned long p; 422 unsigned long p;
423 struct page *scratch_page = get_balloon_scratch_page();
424
425 if (!PageHighMem(page)) {
426 ret = HYPERVISOR_update_va_mapping(
427 (unsigned long)__va(pfn << PAGE_SHIFT),
428 pfn_pte(page_to_pfn(scratch_page),
429 PAGE_KERNEL_RO), 0);
430 BUG_ON(ret);
431 }
431 p = page_to_pfn(scratch_page); 432 p = page_to_pfn(scratch_page);
432 __set_phys_to_machine(pfn, pfn_to_mfn(p)); 433 __set_phys_to_machine(pfn, pfn_to_mfn(p));
434
435 put_balloon_scratch_page();
433 } 436 }
434 put_balloon_scratch_page(); 437#endif
435 438
436 balloon_append(pfn_to_page(pfn)); 439 balloon_append(pfn_to_page(pfn));
437 } 440 }
@@ -627,15 +630,17 @@ static int __init balloon_init(void)
627 if (!xen_domain()) 630 if (!xen_domain())
628 return -ENODEV; 631 return -ENODEV;
629 632
630 for_each_online_cpu(cpu) 633 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
631 { 634 for_each_online_cpu(cpu)
632 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); 635 {
633 if (per_cpu(balloon_scratch_page, cpu) == NULL) { 636 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
634 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); 637 if (per_cpu(balloon_scratch_page, cpu) == NULL) {
635 return -ENOMEM; 638 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
639 return -ENOMEM;
640 }
636 } 641 }
642 register_cpu_notifier(&balloon_cpu_notifier);
637 } 643 }
638 register_cpu_notifier(&balloon_cpu_notifier);
639 644
640 pr_info("Initialising balloon driver\n"); 645 pr_info("Initialising balloon driver\n");
641 646
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 62ccf5424ba8..aa846a48f400 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -930,9 +930,10 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
930 ret = m2p_add_override(mfn, pages[i], kmap_ops ? 930 ret = m2p_add_override(mfn, pages[i], kmap_ops ?
931 &kmap_ops[i] : NULL); 931 &kmap_ops[i] : NULL);
932 if (ret) 932 if (ret)
933 return ret; 933 goto out;
934 } 934 }
935 935
936 out:
936 if (lazy) 937 if (lazy)
937 arch_leave_lazy_mmu_mode(); 938 arch_leave_lazy_mmu_mode();
938 939
@@ -969,9 +970,10 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
969 ret = m2p_remove_override(pages[i], kmap_ops ? 970 ret = m2p_remove_override(pages[i], kmap_ops ?
970 &kmap_ops[i] : NULL); 971 &kmap_ops[i] : NULL);
971 if (ret) 972 if (ret)
972 return ret; 973 goto out;
973 } 974 }
974 975
976 out:
975 if (lazy) 977 if (lazy)
976 arch_leave_lazy_mmu_mode(); 978 arch_leave_lazy_mmu_mode();
977 979
@@ -1174,7 +1176,8 @@ static int gnttab_setup(void)
1174 gnttab_shared.addr = xen_remap(xen_hvm_resume_frames, 1176 gnttab_shared.addr = xen_remap(xen_hvm_resume_frames,
1175 PAGE_SIZE * max_nr_gframes); 1177 PAGE_SIZE * max_nr_gframes);
1176 if (gnttab_shared.addr == NULL) { 1178 if (gnttab_shared.addr == NULL) {
1177 pr_warn("Failed to ioremap gnttab share frames!\n"); 1179 pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n",
1180 xen_hvm_resume_frames);
1178 return -ENOMEM; 1181 return -ENOMEM;
1179 } 1182 }
1180 } 1183 }
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 8e74590fa1bb..569a13b9e856 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -533,12 +533,17 @@ static void privcmd_close(struct vm_area_struct *vma)
533{ 533{
534 struct page **pages = vma->vm_private_data; 534 struct page **pages = vma->vm_private_data;
535 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 535 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
536 int rc;
536 537
537 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) 538 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
538 return; 539 return;
539 540
540 xen_unmap_domain_mfn_range(vma, numpgs, pages); 541 rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
541 free_xenballooned_pages(numpgs, pages); 542 if (rc == 0)
543 free_xenballooned_pages(numpgs, pages);
544 else
545 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
546 numpgs, rc);
542 kfree(pages); 547 kfree(pages);
543} 548}
544 549
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index a224bc74b6b9..1eac0731c349 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -555,6 +555,11 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
555 sg_dma_len(sgl) = 0; 555 sg_dma_len(sgl) = 0;
556 return 0; 556 return 0;
557 } 557 }
558 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
559 map & ~PAGE_MASK,
560 sg->length,
561 dir,
562 attrs);
558 sg->dma_address = xen_phys_to_bus(map); 563 sg->dma_address = xen_phys_to_bus(map);
559 } else { 564 } else {
560 /* we are not interested in the dma_addr returned by 565 /* we are not interested in the dma_addr returned by
diff --git a/fs/affs/Changes b/fs/affs/Changes
index a29409c1ffe0..b41c2c9792ff 100644
--- a/fs/affs/Changes
+++ b/fs/affs/Changes
@@ -91,7 +91,7 @@ more 2.4 fixes: [Roman Zippel]
91Version 3.11 91Version 3.11
92------------ 92------------
93 93
94- Converted to use 2.3.x page cache [Dave Jones <dave@powertweak.com>] 94- Converted to use 2.3.x page cache [Dave Jones]
95- Corruption in truncate() bugfix [Ken Tyler <kent@werple.net.au>] 95- Corruption in truncate() bugfix [Ken Tyler <kent@werple.net.au>]
96 96
97Version 3.10 97Version 3.10
diff --git a/fs/aio.c b/fs/aio.c
index 08159ed13649..062a5f6a1448 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -244,9 +244,14 @@ static void aio_free_ring(struct kioctx *ctx)
244 int i; 244 int i;
245 245
246 for (i = 0; i < ctx->nr_pages; i++) { 246 for (i = 0; i < ctx->nr_pages; i++) {
247 struct page *page;
247 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, 248 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
248 page_count(ctx->ring_pages[i])); 249 page_count(ctx->ring_pages[i]));
249 put_page(ctx->ring_pages[i]); 250 page = ctx->ring_pages[i];
251 if (!page)
252 continue;
253 ctx->ring_pages[i] = NULL;
254 put_page(page);
250 } 255 }
251 256
252 put_aio_ring_file(ctx); 257 put_aio_ring_file(ctx);
@@ -280,18 +285,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
280 unsigned long flags; 285 unsigned long flags;
281 int rc; 286 int rc;
282 287
288 rc = 0;
289
290 /* Make sure the old page hasn't already been changed */
291 spin_lock(&mapping->private_lock);
292 ctx = mapping->private_data;
293 if (ctx) {
294 pgoff_t idx;
295 spin_lock_irqsave(&ctx->completion_lock, flags);
296 idx = old->index;
297 if (idx < (pgoff_t)ctx->nr_pages) {
298 if (ctx->ring_pages[idx] != old)
299 rc = -EAGAIN;
300 } else
301 rc = -EINVAL;
302 spin_unlock_irqrestore(&ctx->completion_lock, flags);
303 } else
304 rc = -EINVAL;
305 spin_unlock(&mapping->private_lock);
306
307 if (rc != 0)
308 return rc;
309
283 /* Writeback must be complete */ 310 /* Writeback must be complete */
284 BUG_ON(PageWriteback(old)); 311 BUG_ON(PageWriteback(old));
285 put_page(old); 312 get_page(new);
286 313
287 rc = migrate_page_move_mapping(mapping, new, old, NULL, mode); 314 rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
288 if (rc != MIGRATEPAGE_SUCCESS) { 315 if (rc != MIGRATEPAGE_SUCCESS) {
289 get_page(old); 316 put_page(new);
290 return rc; 317 return rc;
291 } 318 }
292 319
293 get_page(new);
294
295 /* We can potentially race against kioctx teardown here. Use the 320 /* We can potentially race against kioctx teardown here. Use the
296 * address_space's private data lock to protect the mapping's 321 * address_space's private data lock to protect the mapping's
297 * private_data. 322 * private_data.
@@ -303,13 +328,24 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
303 spin_lock_irqsave(&ctx->completion_lock, flags); 328 spin_lock_irqsave(&ctx->completion_lock, flags);
304 migrate_page_copy(new, old); 329 migrate_page_copy(new, old);
305 idx = old->index; 330 idx = old->index;
306 if (idx < (pgoff_t)ctx->nr_pages) 331 if (idx < (pgoff_t)ctx->nr_pages) {
307 ctx->ring_pages[idx] = new; 332 /* And only do the move if things haven't changed */
333 if (ctx->ring_pages[idx] == old)
334 ctx->ring_pages[idx] = new;
335 else
336 rc = -EAGAIN;
337 } else
338 rc = -EINVAL;
308 spin_unlock_irqrestore(&ctx->completion_lock, flags); 339 spin_unlock_irqrestore(&ctx->completion_lock, flags);
309 } else 340 } else
310 rc = -EBUSY; 341 rc = -EBUSY;
311 spin_unlock(&mapping->private_lock); 342 spin_unlock(&mapping->private_lock);
312 343
344 if (rc == MIGRATEPAGE_SUCCESS)
345 put_page(old);
346 else
347 put_page(new);
348
313 return rc; 349 return rc;
314} 350}
315#endif 351#endif
@@ -326,7 +362,7 @@ static int aio_setup_ring(struct kioctx *ctx)
326 struct aio_ring *ring; 362 struct aio_ring *ring;
327 unsigned nr_events = ctx->max_reqs; 363 unsigned nr_events = ctx->max_reqs;
328 struct mm_struct *mm = current->mm; 364 struct mm_struct *mm = current->mm;
329 unsigned long size, populate; 365 unsigned long size, unused;
330 int nr_pages; 366 int nr_pages;
331 int i; 367 int i;
332 struct file *file; 368 struct file *file;
@@ -347,6 +383,20 @@ static int aio_setup_ring(struct kioctx *ctx)
347 return -EAGAIN; 383 return -EAGAIN;
348 } 384 }
349 385
386 ctx->aio_ring_file = file;
387 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
388 / sizeof(struct io_event);
389
390 ctx->ring_pages = ctx->internal_pages;
391 if (nr_pages > AIO_RING_PAGES) {
392 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
393 GFP_KERNEL);
394 if (!ctx->ring_pages) {
395 put_aio_ring_file(ctx);
396 return -ENOMEM;
397 }
398 }
399
350 for (i = 0; i < nr_pages; i++) { 400 for (i = 0; i < nr_pages; i++) {
351 struct page *page; 401 struct page *page;
352 page = find_or_create_page(file->f_inode->i_mapping, 402 page = find_or_create_page(file->f_inode->i_mapping,
@@ -358,17 +408,14 @@ static int aio_setup_ring(struct kioctx *ctx)
358 SetPageUptodate(page); 408 SetPageUptodate(page);
359 SetPageDirty(page); 409 SetPageDirty(page);
360 unlock_page(page); 410 unlock_page(page);
411
412 ctx->ring_pages[i] = page;
361 } 413 }
362 ctx->aio_ring_file = file; 414 ctx->nr_pages = i;
363 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
364 / sizeof(struct io_event);
365 415
366 ctx->ring_pages = ctx->internal_pages; 416 if (unlikely(i != nr_pages)) {
367 if (nr_pages > AIO_RING_PAGES) { 417 aio_free_ring(ctx);
368 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), 418 return -EAGAIN;
369 GFP_KERNEL);
370 if (!ctx->ring_pages)
371 return -ENOMEM;
372 } 419 }
373 420
374 ctx->mmap_size = nr_pages * PAGE_SIZE; 421 ctx->mmap_size = nr_pages * PAGE_SIZE;
@@ -377,9 +424,9 @@ static int aio_setup_ring(struct kioctx *ctx)
377 down_write(&mm->mmap_sem); 424 down_write(&mm->mmap_sem);
378 ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, 425 ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
379 PROT_READ | PROT_WRITE, 426 PROT_READ | PROT_WRITE,
380 MAP_SHARED | MAP_POPULATE, 0, &populate); 427 MAP_SHARED, 0, &unused);
428 up_write(&mm->mmap_sem);
381 if (IS_ERR((void *)ctx->mmap_base)) { 429 if (IS_ERR((void *)ctx->mmap_base)) {
382 up_write(&mm->mmap_sem);
383 ctx->mmap_size = 0; 430 ctx->mmap_size = 0;
384 aio_free_ring(ctx); 431 aio_free_ring(ctx);
385 return -EAGAIN; 432 return -EAGAIN;
@@ -387,27 +434,6 @@ static int aio_setup_ring(struct kioctx *ctx)
387 434
388 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); 435 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
389 436
390 /* We must do this while still holding mmap_sem for write, as we
391 * need to be protected against userspace attempting to mremap()
392 * or munmap() the ring buffer.
393 */
394 ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages,
395 1, 0, ctx->ring_pages, NULL);
396
397 /* Dropping the reference here is safe as the page cache will hold
398 * onto the pages for us. It is also required so that page migration
399 * can unmap the pages and get the right reference count.
400 */
401 for (i = 0; i < ctx->nr_pages; i++)
402 put_page(ctx->ring_pages[i]);
403
404 up_write(&mm->mmap_sem);
405
406 if (unlikely(ctx->nr_pages != nr_pages)) {
407 aio_free_ring(ctx);
408 return -EAGAIN;
409 }
410
411 ctx->user_id = ctx->mmap_base; 437 ctx->user_id = ctx->mmap_base;
412 ctx->nr_events = nr_events; /* trusted copy */ 438 ctx->nr_events = nr_events; /* trusted copy */
413 439
@@ -645,12 +671,13 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
645 aio_nr + nr_events < aio_nr) { 671 aio_nr + nr_events < aio_nr) {
646 spin_unlock(&aio_nr_lock); 672 spin_unlock(&aio_nr_lock);
647 err = -EAGAIN; 673 err = -EAGAIN;
648 goto err; 674 goto err_ctx;
649 } 675 }
650 aio_nr += ctx->max_reqs; 676 aio_nr += ctx->max_reqs;
651 spin_unlock(&aio_nr_lock); 677 spin_unlock(&aio_nr_lock);
652 678
653 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ 679 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
680 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */
654 681
655 err = ioctx_add_table(ctx, mm); 682 err = ioctx_add_table(ctx, mm);
656 if (err) 683 if (err)
@@ -662,6 +689,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
662 689
663err_cleanup: 690err_cleanup:
664 aio_nr_sub(ctx->max_reqs); 691 aio_nr_sub(ctx->max_reqs);
692err_ctx:
693 aio_free_ring(ctx);
665err: 694err:
666 free_percpu(ctx->cpu); 695 free_percpu(ctx->cpu);
667 free_percpu(ctx->reqs.pcpu_count); 696 free_percpu(ctx->reqs.pcpu_count);
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index b50764bef141..131d82800b3a 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -333,7 +333,6 @@ static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx);
333static int btrfsic_read_block(struct btrfsic_state *state, 333static int btrfsic_read_block(struct btrfsic_state *state,
334 struct btrfsic_block_data_ctx *block_ctx); 334 struct btrfsic_block_data_ctx *block_ctx);
335static void btrfsic_dump_database(struct btrfsic_state *state); 335static void btrfsic_dump_database(struct btrfsic_state *state);
336static void btrfsic_complete_bio_end_io(struct bio *bio, int err);
337static int btrfsic_test_for_metadata(struct btrfsic_state *state, 336static int btrfsic_test_for_metadata(struct btrfsic_state *state,
338 char **datav, unsigned int num_pages); 337 char **datav, unsigned int num_pages);
339static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, 338static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
@@ -1687,7 +1686,6 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1687 for (i = 0; i < num_pages;) { 1686 for (i = 0; i < num_pages;) {
1688 struct bio *bio; 1687 struct bio *bio;
1689 unsigned int j; 1688 unsigned int j;
1690 DECLARE_COMPLETION_ONSTACK(complete);
1691 1689
1692 bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i); 1690 bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i);
1693 if (!bio) { 1691 if (!bio) {
@@ -1698,8 +1696,6 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1698 } 1696 }
1699 bio->bi_bdev = block_ctx->dev->bdev; 1697 bio->bi_bdev = block_ctx->dev->bdev;
1700 bio->bi_sector = dev_bytenr >> 9; 1698 bio->bi_sector = dev_bytenr >> 9;
1701 bio->bi_end_io = btrfsic_complete_bio_end_io;
1702 bio->bi_private = &complete;
1703 1699
1704 for (j = i; j < num_pages; j++) { 1700 for (j = i; j < num_pages; j++) {
1705 ret = bio_add_page(bio, block_ctx->pagev[j], 1701 ret = bio_add_page(bio, block_ctx->pagev[j],
@@ -1712,12 +1708,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1712 "btrfsic: error, failed to add a single page!\n"); 1708 "btrfsic: error, failed to add a single page!\n");
1713 return -1; 1709 return -1;
1714 } 1710 }
1715 submit_bio(READ, bio); 1711 if (submit_bio_wait(READ, bio)) {
1716
1717 /* this will also unplug the queue */
1718 wait_for_completion(&complete);
1719
1720 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1721 printk(KERN_INFO 1712 printk(KERN_INFO
1722 "btrfsic: read error at logical %llu dev %s!\n", 1713 "btrfsic: read error at logical %llu dev %s!\n",
1723 block_ctx->start, block_ctx->dev->name); 1714 block_ctx->start, block_ctx->dev->name);
@@ -1740,11 +1731,6 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1740 return block_ctx->len; 1731 return block_ctx->len;
1741} 1732}
1742 1733
1743static void btrfsic_complete_bio_end_io(struct bio *bio, int err)
1744{
1745 complete((struct completion *)bio->bi_private);
1746}
1747
1748static void btrfsic_dump_database(struct btrfsic_state *state) 1734static void btrfsic_dump_database(struct btrfsic_state *state)
1749{ 1735{
1750 struct list_head *elem_all; 1736 struct list_head *elem_all;
@@ -3008,14 +2994,12 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh)
3008 return submit_bh(rw, bh); 2994 return submit_bh(rw, bh);
3009} 2995}
3010 2996
3011void btrfsic_submit_bio(int rw, struct bio *bio) 2997static void __btrfsic_submit_bio(int rw, struct bio *bio)
3012{ 2998{
3013 struct btrfsic_dev_state *dev_state; 2999 struct btrfsic_dev_state *dev_state;
3014 3000
3015 if (!btrfsic_is_initialized) { 3001 if (!btrfsic_is_initialized)
3016 submit_bio(rw, bio);
3017 return; 3002 return;
3018 }
3019 3003
3020 mutex_lock(&btrfsic_mutex); 3004 mutex_lock(&btrfsic_mutex);
3021 /* since btrfsic_submit_bio() is also called before 3005 /* since btrfsic_submit_bio() is also called before
@@ -3106,10 +3090,20 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
3106 } 3090 }
3107leave: 3091leave:
3108 mutex_unlock(&btrfsic_mutex); 3092 mutex_unlock(&btrfsic_mutex);
3093}
3109 3094
3095void btrfsic_submit_bio(int rw, struct bio *bio)
3096{
3097 __btrfsic_submit_bio(rw, bio);
3110 submit_bio(rw, bio); 3098 submit_bio(rw, bio);
3111} 3099}
3112 3100
3101int btrfsic_submit_bio_wait(int rw, struct bio *bio)
3102{
3103 __btrfsic_submit_bio(rw, bio);
3104 return submit_bio_wait(rw, bio);
3105}
3106
3113int btrfsic_mount(struct btrfs_root *root, 3107int btrfsic_mount(struct btrfs_root *root,
3114 struct btrfs_fs_devices *fs_devices, 3108 struct btrfs_fs_devices *fs_devices,
3115 int including_extent_data, u32 print_mask) 3109 int including_extent_data, u32 print_mask)
diff --git a/fs/btrfs/check-integrity.h b/fs/btrfs/check-integrity.h
index 8b59175cc502..13b8566c97ab 100644
--- a/fs/btrfs/check-integrity.h
+++ b/fs/btrfs/check-integrity.h
@@ -22,9 +22,11 @@
22#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 22#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
23int btrfsic_submit_bh(int rw, struct buffer_head *bh); 23int btrfsic_submit_bh(int rw, struct buffer_head *bh);
24void btrfsic_submit_bio(int rw, struct bio *bio); 24void btrfsic_submit_bio(int rw, struct bio *bio);
25int btrfsic_submit_bio_wait(int rw, struct bio *bio);
25#else 26#else
26#define btrfsic_submit_bh submit_bh 27#define btrfsic_submit_bh submit_bh
27#define btrfsic_submit_bio submit_bio 28#define btrfsic_submit_bio submit_bio
29#define btrfsic_submit_bio_wait submit_bio_wait
28#endif 30#endif
29 31
30int btrfsic_mount(struct btrfs_root *root, 32int btrfsic_mount(struct btrfs_root *root,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 45d98d01028f..9c01509dd8ab 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -767,20 +767,19 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
767 if (!path) 767 if (!path)
768 return -ENOMEM; 768 return -ENOMEM;
769 769
770 if (metadata) {
771 key.objectid = bytenr;
772 key.type = BTRFS_METADATA_ITEM_KEY;
773 key.offset = offset;
774 } else {
775 key.objectid = bytenr;
776 key.type = BTRFS_EXTENT_ITEM_KEY;
777 key.offset = offset;
778 }
779
780 if (!trans) { 770 if (!trans) {
781 path->skip_locking = 1; 771 path->skip_locking = 1;
782 path->search_commit_root = 1; 772 path->search_commit_root = 1;
783 } 773 }
774
775search_again:
776 key.objectid = bytenr;
777 key.offset = offset;
778 if (metadata)
779 key.type = BTRFS_METADATA_ITEM_KEY;
780 else
781 key.type = BTRFS_EXTENT_ITEM_KEY;
782
784again: 783again:
785 ret = btrfs_search_slot(trans, root->fs_info->extent_root, 784 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
786 &key, path, 0, 0); 785 &key, path, 0, 0);
@@ -788,7 +787,6 @@ again:
788 goto out_free; 787 goto out_free;
789 788
790 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) { 789 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
791 metadata = 0;
792 if (path->slots[0]) { 790 if (path->slots[0]) {
793 path->slots[0]--; 791 path->slots[0]--;
794 btrfs_item_key_to_cpu(path->nodes[0], &key, 792 btrfs_item_key_to_cpu(path->nodes[0], &key,
@@ -855,7 +853,7 @@ again:
855 mutex_lock(&head->mutex); 853 mutex_lock(&head->mutex);
856 mutex_unlock(&head->mutex); 854 mutex_unlock(&head->mutex);
857 btrfs_put_delayed_ref(&head->node); 855 btrfs_put_delayed_ref(&head->node);
858 goto again; 856 goto search_again;
859 } 857 }
860 if (head->extent_op && head->extent_op->update_flags) 858 if (head->extent_op && head->extent_op->update_flags)
861 extent_flags |= head->extent_op->flags_to_set; 859 extent_flags |= head->extent_op->flags_to_set;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 8e457fca0a0b..ff43802a7c88 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1952,11 +1952,6 @@ static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1952 return err; 1952 return err;
1953} 1953}
1954 1954
1955static void repair_io_failure_callback(struct bio *bio, int err)
1956{
1957 complete(bio->bi_private);
1958}
1959
1960/* 1955/*
1961 * this bypasses the standard btrfs submit functions deliberately, as 1956 * this bypasses the standard btrfs submit functions deliberately, as
1962 * the standard behavior is to write all copies in a raid setup. here we only 1957 * the standard behavior is to write all copies in a raid setup. here we only
@@ -1973,7 +1968,6 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
1973{ 1968{
1974 struct bio *bio; 1969 struct bio *bio;
1975 struct btrfs_device *dev; 1970 struct btrfs_device *dev;
1976 DECLARE_COMPLETION_ONSTACK(compl);
1977 u64 map_length = 0; 1971 u64 map_length = 0;
1978 u64 sector; 1972 u64 sector;
1979 struct btrfs_bio *bbio = NULL; 1973 struct btrfs_bio *bbio = NULL;
@@ -1990,8 +1984,6 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
1990 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); 1984 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1991 if (!bio) 1985 if (!bio)
1992 return -EIO; 1986 return -EIO;
1993 bio->bi_private = &compl;
1994 bio->bi_end_io = repair_io_failure_callback;
1995 bio->bi_size = 0; 1987 bio->bi_size = 0;
1996 map_length = length; 1988 map_length = length;
1997 1989
@@ -2012,10 +2004,8 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
2012 } 2004 }
2013 bio->bi_bdev = dev->bdev; 2005 bio->bi_bdev = dev->bdev;
2014 bio_add_page(bio, page, length, start - page_offset(page)); 2006 bio_add_page(bio, page, length, start - page_offset(page));
2015 btrfsic_submit_bio(WRITE_SYNC, bio);
2016 wait_for_completion(&compl);
2017 2007
2018 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { 2008 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
2019 /* try to remap that extent elsewhere? */ 2009 /* try to remap that extent elsewhere? */
2020 bio_put(bio); 2010 bio_put(bio);
2021 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 2011 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a111622598b0..21da5762b0b1 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2121,7 +2121,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2121 2121
2122 err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT); 2122 err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
2123 if (err == -EINTR) 2123 if (err == -EINTR)
2124 goto out; 2124 goto out_drop_write;
2125 dentry = lookup_one_len(vol_args->name, parent, namelen); 2125 dentry = lookup_one_len(vol_args->name, parent, namelen);
2126 if (IS_ERR(dentry)) { 2126 if (IS_ERR(dentry)) {
2127 err = PTR_ERR(dentry); 2127 err = PTR_ERR(dentry);
@@ -2284,6 +2284,7 @@ out_dput:
2284 dput(dentry); 2284 dput(dentry);
2285out_unlock_dir: 2285out_unlock_dir:
2286 mutex_unlock(&dir->i_mutex); 2286 mutex_unlock(&dir->i_mutex);
2287out_drop_write:
2287 mnt_drop_write_file(file); 2288 mnt_drop_write_file(file);
2288out: 2289out:
2289 kfree(vol_args); 2290 kfree(vol_args);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index ce459a7cb16d..429c73c374b8 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -571,7 +571,9 @@ static int is_cowonly_root(u64 root_objectid)
571 root_objectid == BTRFS_CHUNK_TREE_OBJECTID || 571 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
572 root_objectid == BTRFS_DEV_TREE_OBJECTID || 572 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
573 root_objectid == BTRFS_TREE_LOG_OBJECTID || 573 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
574 root_objectid == BTRFS_CSUM_TREE_OBJECTID) 574 root_objectid == BTRFS_CSUM_TREE_OBJECTID ||
575 root_objectid == BTRFS_UUID_TREE_OBJECTID ||
576 root_objectid == BTRFS_QUOTA_TREE_OBJECTID)
575 return 1; 577 return 1;
576 return 0; 578 return 0;
577} 579}
@@ -1264,10 +1266,10 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
1264} 1266}
1265 1267
1266/* 1268/*
1267 * helper to update/delete the 'address of tree root -> reloc tree' 1269 * helper to delete the 'address of tree root -> reloc tree'
1268 * mapping 1270 * mapping
1269 */ 1271 */
1270static int __update_reloc_root(struct btrfs_root *root, int del) 1272static void __del_reloc_root(struct btrfs_root *root)
1271{ 1273{
1272 struct rb_node *rb_node; 1274 struct rb_node *rb_node;
1273 struct mapping_node *node = NULL; 1275 struct mapping_node *node = NULL;
@@ -1275,7 +1277,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
1275 1277
1276 spin_lock(&rc->reloc_root_tree.lock); 1278 spin_lock(&rc->reloc_root_tree.lock);
1277 rb_node = tree_search(&rc->reloc_root_tree.rb_root, 1279 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1278 root->commit_root->start); 1280 root->node->start);
1279 if (rb_node) { 1281 if (rb_node) {
1280 node = rb_entry(rb_node, struct mapping_node, rb_node); 1282 node = rb_entry(rb_node, struct mapping_node, rb_node);
1281 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 1283 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
@@ -1283,23 +1285,45 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
1283 spin_unlock(&rc->reloc_root_tree.lock); 1285 spin_unlock(&rc->reloc_root_tree.lock);
1284 1286
1285 if (!node) 1287 if (!node)
1286 return 0; 1288 return;
1287 BUG_ON((struct btrfs_root *)node->data != root); 1289 BUG_ON((struct btrfs_root *)node->data != root);
1288 1290
1289 if (!del) { 1291 spin_lock(&root->fs_info->trans_lock);
1290 spin_lock(&rc->reloc_root_tree.lock); 1292 list_del_init(&root->root_list);
1291 node->bytenr = root->node->start; 1293 spin_unlock(&root->fs_info->trans_lock);
1292 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1294 kfree(node);
1293 node->bytenr, &node->rb_node); 1295}
1294 spin_unlock(&rc->reloc_root_tree.lock); 1296
1295 if (rb_node) 1297/*
1296 backref_tree_panic(rb_node, -EEXIST, node->bytenr); 1298 * helper to update the 'address of tree root -> reloc tree'
1297 } else { 1299 * mapping
1298 spin_lock(&root->fs_info->trans_lock); 1300 */
1299 list_del_init(&root->root_list); 1301static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
1300 spin_unlock(&root->fs_info->trans_lock); 1302{
1301 kfree(node); 1303 struct rb_node *rb_node;
1304 struct mapping_node *node = NULL;
1305 struct reloc_control *rc = root->fs_info->reloc_ctl;
1306
1307 spin_lock(&rc->reloc_root_tree.lock);
1308 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1309 root->node->start);
1310 if (rb_node) {
1311 node = rb_entry(rb_node, struct mapping_node, rb_node);
1312 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
1302 } 1313 }
1314 spin_unlock(&rc->reloc_root_tree.lock);
1315
1316 if (!node)
1317 return 0;
1318 BUG_ON((struct btrfs_root *)node->data != root);
1319
1320 spin_lock(&rc->reloc_root_tree.lock);
1321 node->bytenr = new_bytenr;
1322 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1323 node->bytenr, &node->rb_node);
1324 spin_unlock(&rc->reloc_root_tree.lock);
1325 if (rb_node)
1326 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1303 return 0; 1327 return 0;
1304} 1328}
1305 1329
@@ -1420,7 +1444,6 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1420{ 1444{
1421 struct btrfs_root *reloc_root; 1445 struct btrfs_root *reloc_root;
1422 struct btrfs_root_item *root_item; 1446 struct btrfs_root_item *root_item;
1423 int del = 0;
1424 int ret; 1447 int ret;
1425 1448
1426 if (!root->reloc_root) 1449 if (!root->reloc_root)
@@ -1432,11 +1455,9 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1432 if (root->fs_info->reloc_ctl->merge_reloc_tree && 1455 if (root->fs_info->reloc_ctl->merge_reloc_tree &&
1433 btrfs_root_refs(root_item) == 0) { 1456 btrfs_root_refs(root_item) == 0) {
1434 root->reloc_root = NULL; 1457 root->reloc_root = NULL;
1435 del = 1; 1458 __del_reloc_root(reloc_root);
1436 } 1459 }
1437 1460
1438 __update_reloc_root(reloc_root, del);
1439
1440 if (reloc_root->commit_root != reloc_root->node) { 1461 if (reloc_root->commit_root != reloc_root->node) {
1441 btrfs_set_root_node(root_item, reloc_root->node); 1462 btrfs_set_root_node(root_item, reloc_root->node);
1442 free_extent_buffer(reloc_root->commit_root); 1463 free_extent_buffer(reloc_root->commit_root);
@@ -2287,7 +2308,7 @@ void free_reloc_roots(struct list_head *list)
2287 while (!list_empty(list)) { 2308 while (!list_empty(list)) {
2288 reloc_root = list_entry(list->next, struct btrfs_root, 2309 reloc_root = list_entry(list->next, struct btrfs_root,
2289 root_list); 2310 root_list);
2290 __update_reloc_root(reloc_root, 1); 2311 __del_reloc_root(reloc_root);
2291 free_extent_buffer(reloc_root->node); 2312 free_extent_buffer(reloc_root->node);
2292 free_extent_buffer(reloc_root->commit_root); 2313 free_extent_buffer(reloc_root->commit_root);
2293 kfree(reloc_root); 2314 kfree(reloc_root);
@@ -2332,7 +2353,7 @@ again:
2332 2353
2333 ret = merge_reloc_root(rc, root); 2354 ret = merge_reloc_root(rc, root);
2334 if (ret) { 2355 if (ret) {
2335 __update_reloc_root(reloc_root, 1); 2356 __del_reloc_root(reloc_root);
2336 free_extent_buffer(reloc_root->node); 2357 free_extent_buffer(reloc_root->node);
2337 free_extent_buffer(reloc_root->commit_root); 2358 free_extent_buffer(reloc_root->commit_root);
2338 kfree(reloc_root); 2359 kfree(reloc_root);
@@ -2388,6 +2409,13 @@ out:
2388 btrfs_std_error(root->fs_info, ret); 2409 btrfs_std_error(root->fs_info, ret);
2389 if (!list_empty(&reloc_roots)) 2410 if (!list_empty(&reloc_roots))
2390 free_reloc_roots(&reloc_roots); 2411 free_reloc_roots(&reloc_roots);
2412
2413 /* new reloc root may be added */
2414 mutex_lock(&root->fs_info->reloc_mutex);
2415 list_splice_init(&rc->reloc_roots, &reloc_roots);
2416 mutex_unlock(&root->fs_info->reloc_mutex);
2417 if (!list_empty(&reloc_roots))
2418 free_reloc_roots(&reloc_roots);
2391 } 2419 }
2392 2420
2393 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); 2421 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
@@ -4522,6 +4550,11 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4522 BUG_ON(rc->stage == UPDATE_DATA_PTRS && 4550 BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
4523 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); 4551 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
4524 4552
4553 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
4554 if (buf == root->node)
4555 __update_reloc_root(root, cow->start);
4556 }
4557
4525 level = btrfs_header_level(buf); 4558 level = btrfs_header_level(buf);
4526 if (btrfs_header_generation(buf) <= 4559 if (btrfs_header_generation(buf) <=
4527 btrfs_root_last_snapshot(&root->root_item)) 4560 btrfs_root_last_snapshot(&root->root_item))
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 561e2f16ba3e..1fd3f33c330a 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -208,7 +208,6 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
208 int is_metadata, int have_csum, 208 int is_metadata, int have_csum,
209 const u8 *csum, u64 generation, 209 const u8 *csum, u64 generation,
210 u16 csum_size); 210 u16 csum_size);
211static void scrub_complete_bio_end_io(struct bio *bio, int err);
212static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 211static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
213 struct scrub_block *sblock_good, 212 struct scrub_block *sblock_good,
214 int force_write); 213 int force_write);
@@ -1294,7 +1293,6 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1294 for (page_num = 0; page_num < sblock->page_count; page_num++) { 1293 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1295 struct bio *bio; 1294 struct bio *bio;
1296 struct scrub_page *page = sblock->pagev[page_num]; 1295 struct scrub_page *page = sblock->pagev[page_num];
1297 DECLARE_COMPLETION_ONSTACK(complete);
1298 1296
1299 if (page->dev->bdev == NULL) { 1297 if (page->dev->bdev == NULL) {
1300 page->io_error = 1; 1298 page->io_error = 1;
@@ -1311,18 +1309,11 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1311 } 1309 }
1312 bio->bi_bdev = page->dev->bdev; 1310 bio->bi_bdev = page->dev->bdev;
1313 bio->bi_sector = page->physical >> 9; 1311 bio->bi_sector = page->physical >> 9;
1314 bio->bi_end_io = scrub_complete_bio_end_io;
1315 bio->bi_private = &complete;
1316 1312
1317 bio_add_page(bio, page->page, PAGE_SIZE, 0); 1313 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1318 btrfsic_submit_bio(READ, bio); 1314 if (btrfsic_submit_bio_wait(READ, bio))
1319
1320 /* this will also unplug the queue */
1321 wait_for_completion(&complete);
1322
1323 page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
1324 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1325 sblock->no_io_error_seen = 0; 1315 sblock->no_io_error_seen = 0;
1316
1326 bio_put(bio); 1317 bio_put(bio);
1327 } 1318 }
1328 1319
@@ -1391,11 +1382,6 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1391 sblock->checksum_error = 1; 1382 sblock->checksum_error = 1;
1392} 1383}
1393 1384
1394static void scrub_complete_bio_end_io(struct bio *bio, int err)
1395{
1396 complete((struct completion *)bio->bi_private);
1397}
1398
1399static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 1385static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1400 struct scrub_block *sblock_good, 1386 struct scrub_block *sblock_good,
1401 int force_write) 1387 int force_write)
@@ -1430,7 +1416,6 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1430 sblock_bad->checksum_error || page_bad->io_error) { 1416 sblock_bad->checksum_error || page_bad->io_error) {
1431 struct bio *bio; 1417 struct bio *bio;
1432 int ret; 1418 int ret;
1433 DECLARE_COMPLETION_ONSTACK(complete);
1434 1419
1435 if (!page_bad->dev->bdev) { 1420 if (!page_bad->dev->bdev) {
1436 printk_ratelimited(KERN_WARNING 1421 printk_ratelimited(KERN_WARNING
@@ -1443,19 +1428,14 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1443 return -EIO; 1428 return -EIO;
1444 bio->bi_bdev = page_bad->dev->bdev; 1429 bio->bi_bdev = page_bad->dev->bdev;
1445 bio->bi_sector = page_bad->physical >> 9; 1430 bio->bi_sector = page_bad->physical >> 9;
1446 bio->bi_end_io = scrub_complete_bio_end_io;
1447 bio->bi_private = &complete;
1448 1431
1449 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); 1432 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1450 if (PAGE_SIZE != ret) { 1433 if (PAGE_SIZE != ret) {
1451 bio_put(bio); 1434 bio_put(bio);
1452 return -EIO; 1435 return -EIO;
1453 } 1436 }
1454 btrfsic_submit_bio(WRITE, bio);
1455 1437
1456 /* this will also unplug the queue */ 1438 if (btrfsic_submit_bio_wait(WRITE, bio)) {
1457 wait_for_completion(&complete);
1458 if (!bio_flagged(bio, BIO_UPTODATE)) {
1459 btrfs_dev_stat_inc_and_print(page_bad->dev, 1439 btrfs_dev_stat_inc_and_print(page_bad->dev,
1460 BTRFS_DEV_STAT_WRITE_ERRS); 1440 BTRFS_DEV_STAT_WRITE_ERRS);
1461 btrfs_dev_replace_stats_inc( 1441 btrfs_dev_replace_stats_inc(
@@ -3375,7 +3355,6 @@ static int write_page_nocow(struct scrub_ctx *sctx,
3375 struct bio *bio; 3355 struct bio *bio;
3376 struct btrfs_device *dev; 3356 struct btrfs_device *dev;
3377 int ret; 3357 int ret;
3378 DECLARE_COMPLETION_ONSTACK(compl);
3379 3358
3380 dev = sctx->wr_ctx.tgtdev; 3359 dev = sctx->wr_ctx.tgtdev;
3381 if (!dev) 3360 if (!dev)
@@ -3392,8 +3371,6 @@ static int write_page_nocow(struct scrub_ctx *sctx,
3392 spin_unlock(&sctx->stat_lock); 3371 spin_unlock(&sctx->stat_lock);
3393 return -ENOMEM; 3372 return -ENOMEM;
3394 } 3373 }
3395 bio->bi_private = &compl;
3396 bio->bi_end_io = scrub_complete_bio_end_io;
3397 bio->bi_size = 0; 3374 bio->bi_size = 0;
3398 bio->bi_sector = physical_for_dev_replace >> 9; 3375 bio->bi_sector = physical_for_dev_replace >> 9;
3399 bio->bi_bdev = dev->bdev; 3376 bio->bi_bdev = dev->bdev;
@@ -3404,10 +3381,8 @@ leave_with_eio:
3404 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 3381 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
3405 return -EIO; 3382 return -EIO;
3406 } 3383 }
3407 btrfsic_submit_bio(WRITE_SYNC, bio);
3408 wait_for_completion(&compl);
3409 3384
3410 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 3385 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
3411 goto leave_with_eio; 3386 goto leave_with_eio;
3412 3387
3413 bio_put(bio); 3388 bio_put(bio);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 6837fe87f3a6..945d1db98f26 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4723,8 +4723,8 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
4723 } 4723 }
4724 4724
4725 if (!access_ok(VERIFY_READ, arg->clone_sources, 4725 if (!access_ok(VERIFY_READ, arg->clone_sources,
4726 sizeof(*arg->clone_sources * 4726 sizeof(*arg->clone_sources) *
4727 arg->clone_sources_count))) { 4727 arg->clone_sources_count)) {
4728 ret = -EFAULT; 4728 ret = -EFAULT;
4729 goto out; 4729 goto out;
4730 } 4730 }
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 2d8ac1bf0cf9..d71a11d13dfa 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -432,7 +432,6 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
432 } else { 432 } else {
433 printk(KERN_INFO "btrfs: setting nodatacow\n"); 433 printk(KERN_INFO "btrfs: setting nodatacow\n");
434 } 434 }
435 info->compress_type = BTRFS_COMPRESS_NONE;
436 btrfs_clear_opt(info->mount_opt, COMPRESS); 435 btrfs_clear_opt(info->mount_opt, COMPRESS);
437 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); 436 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
438 btrfs_set_opt(info->mount_opt, NODATACOW); 437 btrfs_set_opt(info->mount_opt, NODATACOW);
@@ -461,7 +460,6 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
461 btrfs_set_fs_incompat(info, COMPRESS_LZO); 460 btrfs_set_fs_incompat(info, COMPRESS_LZO);
462 } else if (strncmp(args[0].from, "no", 2) == 0) { 461 } else if (strncmp(args[0].from, "no", 2) == 0) {
463 compress_type = "no"; 462 compress_type = "no";
464 info->compress_type = BTRFS_COMPRESS_NONE;
465 btrfs_clear_opt(info->mount_opt, COMPRESS); 463 btrfs_clear_opt(info->mount_opt, COMPRESS);
466 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); 464 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
467 compress_force = false; 465 compress_force = false;
@@ -474,9 +472,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
474 btrfs_set_opt(info->mount_opt, FORCE_COMPRESS); 472 btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
475 pr_info("btrfs: force %s compression\n", 473 pr_info("btrfs: force %s compression\n",
476 compress_type); 474 compress_type);
477 } else 475 } else if (btrfs_test_opt(root, COMPRESS)) {
478 pr_info("btrfs: use %s compression\n", 476 pr_info("btrfs: use %s compression\n",
479 compress_type); 477 compress_type);
478 }
480 break; 479 break;
481 case Opt_ssd: 480 case Opt_ssd:
482 printk(KERN_INFO "btrfs: use ssd allocation scheme\n"); 481 printk(KERN_INFO "btrfs: use ssd allocation scheme\n");
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 6df8bd481425..ec3ba43b9faa 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -210,13 +210,17 @@ static int readpage_nounlock(struct file *filp, struct page *page)
210 if (err < 0) { 210 if (err < 0) {
211 SetPageError(page); 211 SetPageError(page);
212 goto out; 212 goto out;
213 } else if (err < PAGE_CACHE_SIZE) { 213 } else {
214 if (err < PAGE_CACHE_SIZE) {
214 /* zero fill remainder of page */ 215 /* zero fill remainder of page */
215 zero_user_segment(page, err, PAGE_CACHE_SIZE); 216 zero_user_segment(page, err, PAGE_CACHE_SIZE);
217 } else {
218 flush_dcache_page(page);
219 }
216 } 220 }
217 SetPageUptodate(page); 221 SetPageUptodate(page);
218 222
219 if (err == 0) 223 if (err >= 0)
220 ceph_readpage_to_fscache(inode, page); 224 ceph_readpage_to_fscache(inode, page);
221 225
222out: 226out:
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index 7db2e6ca4b8f..8c44fdd4e1c3 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -324,6 +324,9 @@ void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
324{ 324{
325 struct ceph_inode_info *ci = ceph_inode(inode); 325 struct ceph_inode_info *ci = ceph_inode(inode);
326 326
327 if (!PageFsCache(page))
328 return;
329
327 fscache_wait_on_page_write(ci->fscache, page); 330 fscache_wait_on_page_write(ci->fscache, page);
328 fscache_uncache_page(ci->fscache, page); 331 fscache_uncache_page(ci->fscache, page);
329} 332}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 13976c33332e..3c0a4bd74996 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -897,7 +897,7 @@ static int __ceph_is_any_caps(struct ceph_inode_info *ci)
897 * caller should hold i_ceph_lock. 897 * caller should hold i_ceph_lock.
898 * caller will not hold session s_mutex if called from destroy_inode. 898 * caller will not hold session s_mutex if called from destroy_inode.
899 */ 899 */
900void __ceph_remove_cap(struct ceph_cap *cap) 900void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
901{ 901{
902 struct ceph_mds_session *session = cap->session; 902 struct ceph_mds_session *session = cap->session;
903 struct ceph_inode_info *ci = cap->ci; 903 struct ceph_inode_info *ci = cap->ci;
@@ -909,6 +909,16 @@ void __ceph_remove_cap(struct ceph_cap *cap)
909 909
910 /* remove from session list */ 910 /* remove from session list */
911 spin_lock(&session->s_cap_lock); 911 spin_lock(&session->s_cap_lock);
912 /*
913 * s_cap_reconnect is protected by s_cap_lock. no one changes
914 * s_cap_gen while session is in the reconnect state.
915 */
916 if (queue_release &&
917 (!session->s_cap_reconnect ||
918 cap->cap_gen == session->s_cap_gen))
919 __queue_cap_release(session, ci->i_vino.ino, cap->cap_id,
920 cap->mseq, cap->issue_seq);
921
912 if (session->s_cap_iterator == cap) { 922 if (session->s_cap_iterator == cap) {
913 /* not yet, we are iterating over this very cap */ 923 /* not yet, we are iterating over this very cap */
914 dout("__ceph_remove_cap delaying %p removal from session %p\n", 924 dout("__ceph_remove_cap delaying %p removal from session %p\n",
@@ -1023,7 +1033,6 @@ void __queue_cap_release(struct ceph_mds_session *session,
1023 struct ceph_mds_cap_release *head; 1033 struct ceph_mds_cap_release *head;
1024 struct ceph_mds_cap_item *item; 1034 struct ceph_mds_cap_item *item;
1025 1035
1026 spin_lock(&session->s_cap_lock);
1027 BUG_ON(!session->s_num_cap_releases); 1036 BUG_ON(!session->s_num_cap_releases);
1028 msg = list_first_entry(&session->s_cap_releases, 1037 msg = list_first_entry(&session->s_cap_releases,
1029 struct ceph_msg, list_head); 1038 struct ceph_msg, list_head);
@@ -1052,7 +1061,6 @@ void __queue_cap_release(struct ceph_mds_session *session,
1052 (int)CEPH_CAPS_PER_RELEASE, 1061 (int)CEPH_CAPS_PER_RELEASE,
1053 (int)msg->front.iov_len); 1062 (int)msg->front.iov_len);
1054 } 1063 }
1055 spin_unlock(&session->s_cap_lock);
1056} 1064}
1057 1065
1058/* 1066/*
@@ -1067,12 +1075,8 @@ void ceph_queue_caps_release(struct inode *inode)
1067 p = rb_first(&ci->i_caps); 1075 p = rb_first(&ci->i_caps);
1068 while (p) { 1076 while (p) {
1069 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node); 1077 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
1070 struct ceph_mds_session *session = cap->session;
1071
1072 __queue_cap_release(session, ceph_ino(inode), cap->cap_id,
1073 cap->mseq, cap->issue_seq);
1074 p = rb_next(p); 1078 p = rb_next(p);
1075 __ceph_remove_cap(cap); 1079 __ceph_remove_cap(cap, true);
1076 } 1080 }
1077} 1081}
1078 1082
@@ -2791,7 +2795,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2791 } 2795 }
2792 spin_unlock(&mdsc->cap_dirty_lock); 2796 spin_unlock(&mdsc->cap_dirty_lock);
2793 } 2797 }
2794 __ceph_remove_cap(cap); 2798 __ceph_remove_cap(cap, false);
2795 } 2799 }
2796 /* else, we already released it */ 2800 /* else, we already released it */
2797 2801
@@ -2931,9 +2935,12 @@ void ceph_handle_caps(struct ceph_mds_session *session,
2931 if (!inode) { 2935 if (!inode) {
2932 dout(" i don't have ino %llx\n", vino.ino); 2936 dout(" i don't have ino %llx\n", vino.ino);
2933 2937
2934 if (op == CEPH_CAP_OP_IMPORT) 2938 if (op == CEPH_CAP_OP_IMPORT) {
2939 spin_lock(&session->s_cap_lock);
2935 __queue_cap_release(session, vino.ino, cap_id, 2940 __queue_cap_release(session, vino.ino, cap_id,
2936 mseq, seq); 2941 mseq, seq);
2942 spin_unlock(&session->s_cap_lock);
2943 }
2937 goto flush_cap_releases; 2944 goto flush_cap_releases;
2938 } 2945 }
2939 2946
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 868b61d56cac..2a0bcaeb189a 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -352,8 +352,18 @@ more:
352 } 352 }
353 353
354 /* note next offset and last dentry name */ 354 /* note next offset and last dentry name */
355 rinfo = &req->r_reply_info;
356 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
357 frag = le32_to_cpu(rinfo->dir_dir->frag);
358 if (ceph_frag_is_leftmost(frag))
359 fi->next_offset = 2;
360 else
361 fi->next_offset = 0;
362 off = fi->next_offset;
363 }
355 fi->offset = fi->next_offset; 364 fi->offset = fi->next_offset;
356 fi->last_readdir = req; 365 fi->last_readdir = req;
366 fi->frag = frag;
357 367
358 if (req->r_reply_info.dir_end) { 368 if (req->r_reply_info.dir_end) {
359 kfree(fi->last_name); 369 kfree(fi->last_name);
@@ -363,7 +373,6 @@ more:
363 else 373 else
364 fi->next_offset = 0; 374 fi->next_offset = 0;
365 } else { 375 } else {
366 rinfo = &req->r_reply_info;
367 err = note_last_dentry(fi, 376 err = note_last_dentry(fi,
368 rinfo->dir_dname[rinfo->dir_nr-1], 377 rinfo->dir_dname[rinfo->dir_nr-1],
369 rinfo->dir_dname_len[rinfo->dir_nr-1]); 378 rinfo->dir_dname_len[rinfo->dir_nr-1]);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 8549a48115f7..278fd2891288 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -577,6 +577,8 @@ static int fill_inode(struct inode *inode,
577 int issued = 0, implemented; 577 int issued = 0, implemented;
578 struct timespec mtime, atime, ctime; 578 struct timespec mtime, atime, ctime;
579 u32 nsplits; 579 u32 nsplits;
580 struct ceph_inode_frag *frag;
581 struct rb_node *rb_node;
580 struct ceph_buffer *xattr_blob = NULL; 582 struct ceph_buffer *xattr_blob = NULL;
581 int err = 0; 583 int err = 0;
582 int queue_trunc = 0; 584 int queue_trunc = 0;
@@ -751,15 +753,38 @@ no_change:
751 /* FIXME: move me up, if/when version reflects fragtree changes */ 753 /* FIXME: move me up, if/when version reflects fragtree changes */
752 nsplits = le32_to_cpu(info->fragtree.nsplits); 754 nsplits = le32_to_cpu(info->fragtree.nsplits);
753 mutex_lock(&ci->i_fragtree_mutex); 755 mutex_lock(&ci->i_fragtree_mutex);
756 rb_node = rb_first(&ci->i_fragtree);
754 for (i = 0; i < nsplits; i++) { 757 for (i = 0; i < nsplits; i++) {
755 u32 id = le32_to_cpu(info->fragtree.splits[i].frag); 758 u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
756 struct ceph_inode_frag *frag = __get_or_create_frag(ci, id); 759 frag = NULL;
757 760 while (rb_node) {
758 if (IS_ERR(frag)) 761 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
759 continue; 762 if (ceph_frag_compare(frag->frag, id) >= 0) {
763 if (frag->frag != id)
764 frag = NULL;
765 else
766 rb_node = rb_next(rb_node);
767 break;
768 }
769 rb_node = rb_next(rb_node);
770 rb_erase(&frag->node, &ci->i_fragtree);
771 kfree(frag);
772 frag = NULL;
773 }
774 if (!frag) {
775 frag = __get_or_create_frag(ci, id);
776 if (IS_ERR(frag))
777 continue;
778 }
760 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by); 779 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
761 dout(" frag %x split by %d\n", frag->frag, frag->split_by); 780 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
762 } 781 }
782 while (rb_node) {
783 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
784 rb_node = rb_next(rb_node);
785 rb_erase(&frag->node, &ci->i_fragtree);
786 kfree(frag);
787 }
763 mutex_unlock(&ci->i_fragtree_mutex); 788 mutex_unlock(&ci->i_fragtree_mutex);
764 789
765 /* were we issued a capability? */ 790 /* were we issued a capability? */
@@ -953,7 +978,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
953 struct ceph_mds_reply_inode *ininfo; 978 struct ceph_mds_reply_inode *ininfo;
954 struct ceph_vino vino; 979 struct ceph_vino vino;
955 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 980 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
956 int i = 0;
957 int err = 0; 981 int err = 0;
958 982
959 dout("fill_trace %p is_dentry %d is_target %d\n", req, 983 dout("fill_trace %p is_dentry %d is_target %d\n", req,
@@ -1014,6 +1038,29 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1014 } 1038 }
1015 } 1039 }
1016 1040
1041 if (rinfo->head->is_target) {
1042 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1043 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1044
1045 in = ceph_get_inode(sb, vino);
1046 if (IS_ERR(in)) {
1047 err = PTR_ERR(in);
1048 goto done;
1049 }
1050 req->r_target_inode = in;
1051
1052 err = fill_inode(in, &rinfo->targeti, NULL,
1053 session, req->r_request_started,
1054 (le32_to_cpu(rinfo->head->result) == 0) ?
1055 req->r_fmode : -1,
1056 &req->r_caps_reservation);
1057 if (err < 0) {
1058 pr_err("fill_inode badness %p %llx.%llx\n",
1059 in, ceph_vinop(in));
1060 goto done;
1061 }
1062 }
1063
1017 /* 1064 /*
1018 * ignore null lease/binding on snapdir ENOENT, or else we 1065 * ignore null lease/binding on snapdir ENOENT, or else we
1019 * will have trouble splicing in the virtual snapdir later 1066 * will have trouble splicing in the virtual snapdir later
@@ -1083,7 +1130,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1083 ceph_dentry(req->r_old_dentry)->offset); 1130 ceph_dentry(req->r_old_dentry)->offset);
1084 1131
1085 dn = req->r_old_dentry; /* use old_dentry */ 1132 dn = req->r_old_dentry; /* use old_dentry */
1086 in = dn->d_inode;
1087 } 1133 }
1088 1134
1089 /* null dentry? */ 1135 /* null dentry? */
@@ -1105,44 +1151,28 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1105 } 1151 }
1106 1152
1107 /* attach proper inode */ 1153 /* attach proper inode */
1108 ininfo = rinfo->targeti.in; 1154 if (!dn->d_inode) {
1109 vino.ino = le64_to_cpu(ininfo->ino); 1155 ihold(in);
1110 vino.snap = le64_to_cpu(ininfo->snapid);
1111 in = dn->d_inode;
1112 if (!in) {
1113 in = ceph_get_inode(sb, vino);
1114 if (IS_ERR(in)) {
1115 pr_err("fill_trace bad get_inode "
1116 "%llx.%llx\n", vino.ino, vino.snap);
1117 err = PTR_ERR(in);
1118 d_drop(dn);
1119 goto done;
1120 }
1121 dn = splice_dentry(dn, in, &have_lease, true); 1156 dn = splice_dentry(dn, in, &have_lease, true);
1122 if (IS_ERR(dn)) { 1157 if (IS_ERR(dn)) {
1123 err = PTR_ERR(dn); 1158 err = PTR_ERR(dn);
1124 goto done; 1159 goto done;
1125 } 1160 }
1126 req->r_dentry = dn; /* may have spliced */ 1161 req->r_dentry = dn; /* may have spliced */
1127 ihold(in); 1162 } else if (dn->d_inode && dn->d_inode != in) {
1128 } else if (ceph_ino(in) == vino.ino &&
1129 ceph_snap(in) == vino.snap) {
1130 ihold(in);
1131 } else {
1132 dout(" %p links to %p %llx.%llx, not %llx.%llx\n", 1163 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1133 dn, in, ceph_ino(in), ceph_snap(in), 1164 dn, dn->d_inode, ceph_vinop(dn->d_inode),
1134 vino.ino, vino.snap); 1165 ceph_vinop(in));
1135 have_lease = false; 1166 have_lease = false;
1136 in = NULL;
1137 } 1167 }
1138 1168
1139 if (have_lease) 1169 if (have_lease)
1140 update_dentry_lease(dn, rinfo->dlease, session, 1170 update_dentry_lease(dn, rinfo->dlease, session,
1141 req->r_request_started); 1171 req->r_request_started);
1142 dout(" final dn %p\n", dn); 1172 dout(" final dn %p\n", dn);
1143 i++; 1173 } else if (!req->r_aborted &&
1144 } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP || 1174 (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1145 req->r_op == CEPH_MDS_OP_MKSNAP) && !req->r_aborted) { 1175 req->r_op == CEPH_MDS_OP_MKSNAP)) {
1146 struct dentry *dn = req->r_dentry; 1176 struct dentry *dn = req->r_dentry;
1147 1177
1148 /* fill out a snapdir LOOKUPSNAP dentry */ 1178 /* fill out a snapdir LOOKUPSNAP dentry */
@@ -1152,52 +1182,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1152 ininfo = rinfo->targeti.in; 1182 ininfo = rinfo->targeti.in;
1153 vino.ino = le64_to_cpu(ininfo->ino); 1183 vino.ino = le64_to_cpu(ininfo->ino);
1154 vino.snap = le64_to_cpu(ininfo->snapid); 1184 vino.snap = le64_to_cpu(ininfo->snapid);
1155 in = ceph_get_inode(sb, vino);
1156 if (IS_ERR(in)) {
1157 pr_err("fill_inode get_inode badness %llx.%llx\n",
1158 vino.ino, vino.snap);
1159 err = PTR_ERR(in);
1160 d_delete(dn);
1161 goto done;
1162 }
1163 dout(" linking snapped dir %p to dn %p\n", in, dn); 1185 dout(" linking snapped dir %p to dn %p\n", in, dn);
1186 ihold(in);
1164 dn = splice_dentry(dn, in, NULL, true); 1187 dn = splice_dentry(dn, in, NULL, true);
1165 if (IS_ERR(dn)) { 1188 if (IS_ERR(dn)) {
1166 err = PTR_ERR(dn); 1189 err = PTR_ERR(dn);
1167 goto done; 1190 goto done;
1168 } 1191 }
1169 req->r_dentry = dn; /* may have spliced */ 1192 req->r_dentry = dn; /* may have spliced */
1170 ihold(in);
1171 rinfo->head->is_dentry = 1; /* fool notrace handlers */
1172 }
1173
1174 if (rinfo->head->is_target) {
1175 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1176 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1177
1178 if (in == NULL || ceph_ino(in) != vino.ino ||
1179 ceph_snap(in) != vino.snap) {
1180 in = ceph_get_inode(sb, vino);
1181 if (IS_ERR(in)) {
1182 err = PTR_ERR(in);
1183 goto done;
1184 }
1185 }
1186 req->r_target_inode = in;
1187
1188 err = fill_inode(in,
1189 &rinfo->targeti, NULL,
1190 session, req->r_request_started,
1191 (le32_to_cpu(rinfo->head->result) == 0) ?
1192 req->r_fmode : -1,
1193 &req->r_caps_reservation);
1194 if (err < 0) {
1195 pr_err("fill_inode badness %p %llx.%llx\n",
1196 in, ceph_vinop(in));
1197 goto done;
1198 }
1199 } 1193 }
1200
1201done: 1194done:
1202 dout("fill_trace done err=%d\n", err); 1195 dout("fill_trace done err=%d\n", err);
1203 return err; 1196 return err;
@@ -1247,11 +1240,23 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1247 struct qstr dname; 1240 struct qstr dname;
1248 struct dentry *dn; 1241 struct dentry *dn;
1249 struct inode *in; 1242 struct inode *in;
1250 int err = 0, i; 1243 int err = 0, ret, i;
1251 struct inode *snapdir = NULL; 1244 struct inode *snapdir = NULL;
1252 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; 1245 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1253 u64 frag = le32_to_cpu(rhead->args.readdir.frag);
1254 struct ceph_dentry_info *di; 1246 struct ceph_dentry_info *di;
1247 u64 r_readdir_offset = req->r_readdir_offset;
1248 u32 frag = le32_to_cpu(rhead->args.readdir.frag);
1249
1250 if (rinfo->dir_dir &&
1251 le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1252 dout("readdir_prepopulate got new frag %x -> %x\n",
1253 frag, le32_to_cpu(rinfo->dir_dir->frag));
1254 frag = le32_to_cpu(rinfo->dir_dir->frag);
1255 if (ceph_frag_is_leftmost(frag))
1256 r_readdir_offset = 2;
1257 else
1258 r_readdir_offset = 0;
1259 }
1255 1260
1256 if (req->r_aborted) 1261 if (req->r_aborted)
1257 return readdir_prepopulate_inodes_only(req, session); 1262 return readdir_prepopulate_inodes_only(req, session);
@@ -1268,6 +1273,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1268 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); 1273 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1269 } 1274 }
1270 1275
1276 /* FIXME: release caps/leases if error occurs */
1271 for (i = 0; i < rinfo->dir_nr; i++) { 1277 for (i = 0; i < rinfo->dir_nr; i++) {
1272 struct ceph_vino vino; 1278 struct ceph_vino vino;
1273 1279
@@ -1292,9 +1298,10 @@ retry_lookup:
1292 err = -ENOMEM; 1298 err = -ENOMEM;
1293 goto out; 1299 goto out;
1294 } 1300 }
1295 err = ceph_init_dentry(dn); 1301 ret = ceph_init_dentry(dn);
1296 if (err < 0) { 1302 if (ret < 0) {
1297 dput(dn); 1303 dput(dn);
1304 err = ret;
1298 goto out; 1305 goto out;
1299 } 1306 }
1300 } else if (dn->d_inode && 1307 } else if (dn->d_inode &&
@@ -1314,9 +1321,6 @@ retry_lookup:
1314 spin_unlock(&parent->d_lock); 1321 spin_unlock(&parent->d_lock);
1315 } 1322 }
1316 1323
1317 di = dn->d_fsdata;
1318 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
1319
1320 /* inode */ 1324 /* inode */
1321 if (dn->d_inode) { 1325 if (dn->d_inode) {
1322 in = dn->d_inode; 1326 in = dn->d_inode;
@@ -1329,26 +1333,39 @@ retry_lookup:
1329 err = PTR_ERR(in); 1333 err = PTR_ERR(in);
1330 goto out; 1334 goto out;
1331 } 1335 }
1332 dn = splice_dentry(dn, in, NULL, false);
1333 if (IS_ERR(dn))
1334 dn = NULL;
1335 } 1336 }
1336 1337
1337 if (fill_inode(in, &rinfo->dir_in[i], NULL, session, 1338 if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1338 req->r_request_started, -1, 1339 req->r_request_started, -1,
1339 &req->r_caps_reservation) < 0) { 1340 &req->r_caps_reservation) < 0) {
1340 pr_err("fill_inode badness on %p\n", in); 1341 pr_err("fill_inode badness on %p\n", in);
1342 if (!dn->d_inode)
1343 iput(in);
1344 d_drop(dn);
1341 goto next_item; 1345 goto next_item;
1342 } 1346 }
1343 if (dn) 1347
1344 update_dentry_lease(dn, rinfo->dir_dlease[i], 1348 if (!dn->d_inode) {
1345 req->r_session, 1349 dn = splice_dentry(dn, in, NULL, false);
1346 req->r_request_started); 1350 if (IS_ERR(dn)) {
1351 err = PTR_ERR(dn);
1352 dn = NULL;
1353 goto next_item;
1354 }
1355 }
1356
1357 di = dn->d_fsdata;
1358 di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
1359
1360 update_dentry_lease(dn, rinfo->dir_dlease[i],
1361 req->r_session,
1362 req->r_request_started);
1347next_item: 1363next_item:
1348 if (dn) 1364 if (dn)
1349 dput(dn); 1365 dput(dn);
1350 } 1366 }
1351 req->r_did_prepopulate = true; 1367 if (err == 0)
1368 req->r_did_prepopulate = true;
1352 1369
1353out: 1370out:
1354 if (snapdir) { 1371 if (snapdir) {
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index b7bda5d9611d..d90861f45210 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -43,6 +43,7 @@
43 */ 43 */
44 44
45struct ceph_reconnect_state { 45struct ceph_reconnect_state {
46 int nr_caps;
46 struct ceph_pagelist *pagelist; 47 struct ceph_pagelist *pagelist;
47 bool flock; 48 bool flock;
48}; 49};
@@ -443,6 +444,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
443 INIT_LIST_HEAD(&s->s_waiting); 444 INIT_LIST_HEAD(&s->s_waiting);
444 INIT_LIST_HEAD(&s->s_unsafe); 445 INIT_LIST_HEAD(&s->s_unsafe);
445 s->s_num_cap_releases = 0; 446 s->s_num_cap_releases = 0;
447 s->s_cap_reconnect = 0;
446 s->s_cap_iterator = NULL; 448 s->s_cap_iterator = NULL;
447 INIT_LIST_HEAD(&s->s_cap_releases); 449 INIT_LIST_HEAD(&s->s_cap_releases);
448 INIT_LIST_HEAD(&s->s_cap_releases_done); 450 INIT_LIST_HEAD(&s->s_cap_releases_done);
@@ -642,6 +644,8 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
642 req->r_unsafe_dir = NULL; 644 req->r_unsafe_dir = NULL;
643 } 645 }
644 646
647 complete_all(&req->r_safe_completion);
648
645 ceph_mdsc_put_request(req); 649 ceph_mdsc_put_request(req);
646} 650}
647 651
@@ -986,7 +990,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
986 dout("removing cap %p, ci is %p, inode is %p\n", 990 dout("removing cap %p, ci is %p, inode is %p\n",
987 cap, ci, &ci->vfs_inode); 991 cap, ci, &ci->vfs_inode);
988 spin_lock(&ci->i_ceph_lock); 992 spin_lock(&ci->i_ceph_lock);
989 __ceph_remove_cap(cap); 993 __ceph_remove_cap(cap, false);
990 if (!__ceph_is_any_real_caps(ci)) { 994 if (!__ceph_is_any_real_caps(ci)) {
991 struct ceph_mds_client *mdsc = 995 struct ceph_mds_client *mdsc =
992 ceph_sb_to_client(inode->i_sb)->mdsc; 996 ceph_sb_to_client(inode->i_sb)->mdsc;
@@ -1231,9 +1235,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1231 session->s_trim_caps--; 1235 session->s_trim_caps--;
1232 if (oissued) { 1236 if (oissued) {
1233 /* we aren't the only cap.. just remove us */ 1237 /* we aren't the only cap.. just remove us */
1234 __queue_cap_release(session, ceph_ino(inode), cap->cap_id, 1238 __ceph_remove_cap(cap, true);
1235 cap->mseq, cap->issue_seq);
1236 __ceph_remove_cap(cap);
1237 } else { 1239 } else {
1238 /* try to drop referring dentries */ 1240 /* try to drop referring dentries */
1239 spin_unlock(&ci->i_ceph_lock); 1241 spin_unlock(&ci->i_ceph_lock);
@@ -1416,7 +1418,6 @@ static void discard_cap_releases(struct ceph_mds_client *mdsc,
1416 unsigned num; 1418 unsigned num;
1417 1419
1418 dout("discard_cap_releases mds%d\n", session->s_mds); 1420 dout("discard_cap_releases mds%d\n", session->s_mds);
1419 spin_lock(&session->s_cap_lock);
1420 1421
1421 /* zero out the in-progress message */ 1422 /* zero out the in-progress message */
1422 msg = list_first_entry(&session->s_cap_releases, 1423 msg = list_first_entry(&session->s_cap_releases,
@@ -1443,8 +1444,6 @@ static void discard_cap_releases(struct ceph_mds_client *mdsc,
1443 msg->front.iov_len = sizeof(*head); 1444 msg->front.iov_len = sizeof(*head);
1444 list_add(&msg->list_head, &session->s_cap_releases); 1445 list_add(&msg->list_head, &session->s_cap_releases);
1445 } 1446 }
1446
1447 spin_unlock(&session->s_cap_lock);
1448} 1447}
1449 1448
1450/* 1449/*
@@ -1875,8 +1874,11 @@ static int __do_request(struct ceph_mds_client *mdsc,
1875 int mds = -1; 1874 int mds = -1;
1876 int err = -EAGAIN; 1875 int err = -EAGAIN;
1877 1876
1878 if (req->r_err || req->r_got_result) 1877 if (req->r_err || req->r_got_result) {
1878 if (req->r_aborted)
1879 __unregister_request(mdsc, req);
1879 goto out; 1880 goto out;
1881 }
1880 1882
1881 if (req->r_timeout && 1883 if (req->r_timeout &&
1882 time_after_eq(jiffies, req->r_started + req->r_timeout)) { 1884 time_after_eq(jiffies, req->r_started + req->r_timeout)) {
@@ -2186,7 +2188,6 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2186 if (head->safe) { 2188 if (head->safe) {
2187 req->r_got_safe = true; 2189 req->r_got_safe = true;
2188 __unregister_request(mdsc, req); 2190 __unregister_request(mdsc, req);
2189 complete_all(&req->r_safe_completion);
2190 2191
2191 if (req->r_got_unsafe) { 2192 if (req->r_got_unsafe) {
2192 /* 2193 /*
@@ -2238,8 +2239,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2238 err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session); 2239 err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
2239 if (err == 0) { 2240 if (err == 0) {
2240 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR || 2241 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
2241 req->r_op == CEPH_MDS_OP_LSSNAP) && 2242 req->r_op == CEPH_MDS_OP_LSSNAP))
2242 rinfo->dir_nr)
2243 ceph_readdir_prepopulate(req, req->r_session); 2243 ceph_readdir_prepopulate(req, req->r_session);
2244 ceph_unreserve_caps(mdsc, &req->r_caps_reservation); 2244 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
2245 } 2245 }
@@ -2490,6 +2490,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2490 cap->seq = 0; /* reset cap seq */ 2490 cap->seq = 0; /* reset cap seq */
2491 cap->issue_seq = 0; /* and issue_seq */ 2491 cap->issue_seq = 0; /* and issue_seq */
2492 cap->mseq = 0; /* and migrate_seq */ 2492 cap->mseq = 0; /* and migrate_seq */
2493 cap->cap_gen = cap->session->s_cap_gen;
2493 2494
2494 if (recon_state->flock) { 2495 if (recon_state->flock) {
2495 rec.v2.cap_id = cpu_to_le64(cap->cap_id); 2496 rec.v2.cap_id = cpu_to_le64(cap->cap_id);
@@ -2552,6 +2553,8 @@ encode_again:
2552 } else { 2553 } else {
2553 err = ceph_pagelist_append(pagelist, &rec, reclen); 2554 err = ceph_pagelist_append(pagelist, &rec, reclen);
2554 } 2555 }
2556
2557 recon_state->nr_caps++;
2555out_free: 2558out_free:
2556 kfree(path); 2559 kfree(path);
2557out_dput: 2560out_dput:
@@ -2579,6 +2582,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
2579 struct rb_node *p; 2582 struct rb_node *p;
2580 int mds = session->s_mds; 2583 int mds = session->s_mds;
2581 int err = -ENOMEM; 2584 int err = -ENOMEM;
2585 int s_nr_caps;
2582 struct ceph_pagelist *pagelist; 2586 struct ceph_pagelist *pagelist;
2583 struct ceph_reconnect_state recon_state; 2587 struct ceph_reconnect_state recon_state;
2584 2588
@@ -2610,20 +2614,38 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
2610 dout("session %p state %s\n", session, 2614 dout("session %p state %s\n", session,
2611 session_state_name(session->s_state)); 2615 session_state_name(session->s_state));
2612 2616
2617 spin_lock(&session->s_gen_ttl_lock);
2618 session->s_cap_gen++;
2619 spin_unlock(&session->s_gen_ttl_lock);
2620
2621 spin_lock(&session->s_cap_lock);
2622 /*
2623 * notify __ceph_remove_cap() that we are composing cap reconnect.
2624 * If a cap get released before being added to the cap reconnect,
2625 * __ceph_remove_cap() should skip queuing cap release.
2626 */
2627 session->s_cap_reconnect = 1;
2613 /* drop old cap expires; we're about to reestablish that state */ 2628 /* drop old cap expires; we're about to reestablish that state */
2614 discard_cap_releases(mdsc, session); 2629 discard_cap_releases(mdsc, session);
2630 spin_unlock(&session->s_cap_lock);
2615 2631
2616 /* traverse this session's caps */ 2632 /* traverse this session's caps */
2617 err = ceph_pagelist_encode_32(pagelist, session->s_nr_caps); 2633 s_nr_caps = session->s_nr_caps;
2634 err = ceph_pagelist_encode_32(pagelist, s_nr_caps);
2618 if (err) 2635 if (err)
2619 goto fail; 2636 goto fail;
2620 2637
2638 recon_state.nr_caps = 0;
2621 recon_state.pagelist = pagelist; 2639 recon_state.pagelist = pagelist;
2622 recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK; 2640 recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK;
2623 err = iterate_session_caps(session, encode_caps_cb, &recon_state); 2641 err = iterate_session_caps(session, encode_caps_cb, &recon_state);
2624 if (err < 0) 2642 if (err < 0)
2625 goto fail; 2643 goto fail;
2626 2644
2645 spin_lock(&session->s_cap_lock);
2646 session->s_cap_reconnect = 0;
2647 spin_unlock(&session->s_cap_lock);
2648
2627 /* 2649 /*
2628 * snaprealms. we provide mds with the ino, seq (version), and 2650 * snaprealms. we provide mds with the ino, seq (version), and
2629 * parent for all of our realms. If the mds has any newer info, 2651 * parent for all of our realms. If the mds has any newer info,
@@ -2646,11 +2668,18 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
2646 2668
2647 if (recon_state.flock) 2669 if (recon_state.flock)
2648 reply->hdr.version = cpu_to_le16(2); 2670 reply->hdr.version = cpu_to_le16(2);
2649 if (pagelist->length) { 2671
2650 /* set up outbound data if we have any */ 2672 /* raced with cap release? */
2651 reply->hdr.data_len = cpu_to_le32(pagelist->length); 2673 if (s_nr_caps != recon_state.nr_caps) {
2652 ceph_msg_data_add_pagelist(reply, pagelist); 2674 struct page *page = list_first_entry(&pagelist->head,
2675 struct page, lru);
2676 __le32 *addr = kmap_atomic(page);
2677 *addr = cpu_to_le32(recon_state.nr_caps);
2678 kunmap_atomic(addr);
2653 } 2679 }
2680
2681 reply->hdr.data_len = cpu_to_le32(pagelist->length);
2682 ceph_msg_data_add_pagelist(reply, pagelist);
2654 ceph_con_send(&session->s_con, reply); 2683 ceph_con_send(&session->s_con, reply);
2655 2684
2656 mutex_unlock(&session->s_mutex); 2685 mutex_unlock(&session->s_mutex);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index c2a19fbbe517..4c053d099ae4 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -132,6 +132,7 @@ struct ceph_mds_session {
132 struct list_head s_caps; /* all caps issued by this session */ 132 struct list_head s_caps; /* all caps issued by this session */
133 int s_nr_caps, s_trim_caps; 133 int s_nr_caps, s_trim_caps;
134 int s_num_cap_releases; 134 int s_num_cap_releases;
135 int s_cap_reconnect;
135 struct list_head s_cap_releases; /* waiting cap_release messages */ 136 struct list_head s_cap_releases; /* waiting cap_release messages */
136 struct list_head s_cap_releases_done; /* ready to send */ 137 struct list_head s_cap_releases_done; /* ready to send */
137 struct ceph_cap *s_cap_iterator; 138 struct ceph_cap *s_cap_iterator;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 6014b0a3c405..ef4ac38bb614 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -741,13 +741,7 @@ extern int ceph_add_cap(struct inode *inode,
741 int fmode, unsigned issued, unsigned wanted, 741 int fmode, unsigned issued, unsigned wanted,
742 unsigned cap, unsigned seq, u64 realmino, int flags, 742 unsigned cap, unsigned seq, u64 realmino, int flags,
743 struct ceph_cap_reservation *caps_reservation); 743 struct ceph_cap_reservation *caps_reservation);
744extern void __ceph_remove_cap(struct ceph_cap *cap); 744extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
745static inline void ceph_remove_cap(struct ceph_cap *cap)
746{
747 spin_lock(&cap->ci->i_ceph_lock);
748 __ceph_remove_cap(cap);
749 spin_unlock(&cap->ci->i_ceph_lock);
750}
751extern void ceph_put_cap(struct ceph_mds_client *mdsc, 745extern void ceph_put_cap(struct ceph_mds_client *mdsc,
752 struct ceph_cap *cap); 746 struct ceph_cap *cap);
753 747
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index d9ea7ada1378..f918a998a087 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -384,6 +384,7 @@ struct smb_version_operations {
384 int (*clone_range)(const unsigned int, struct cifsFileInfo *src_file, 384 int (*clone_range)(const unsigned int, struct cifsFileInfo *src_file,
385 struct cifsFileInfo *target_file, u64 src_off, u64 len, 385 struct cifsFileInfo *target_file, u64 src_off, u64 len,
386 u64 dest_off); 386 u64 dest_off);
387 int (*validate_negotiate)(const unsigned int, struct cifs_tcon *);
387}; 388};
388 389
389struct smb_version_values { 390struct smb_version_values {
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index aa3397620342..2c29db6a247e 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -477,9 +477,10 @@ extern int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon,
477 const int netfid, __u64 *pExtAttrBits, __u64 *pMask); 477 const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
478extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb); 478extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
479extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr); 479extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr);
480extern int CIFSCheckMFSymlink(struct cifs_fattr *fattr, 480extern int CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon,
481 const unsigned char *path, 481 struct cifs_sb_info *cifs_sb,
482 struct cifs_sb_info *cifs_sb, unsigned int xid); 482 struct cifs_fattr *fattr,
483 const unsigned char *path);
483extern int mdfour(unsigned char *, unsigned char *, int); 484extern int mdfour(unsigned char *, unsigned char *, int);
484extern int E_md4hash(const unsigned char *passwd, unsigned char *p16, 485extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,
485 const struct nls_table *codepage); 486 const struct nls_table *codepage);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 124aa0230c1b..d707edb6b852 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -4010,7 +4010,7 @@ QFileInfoRetry:
4010 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, 4010 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
4011 (struct smb_hdr *) pSMBr, &bytes_returned, 0); 4011 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
4012 if (rc) { 4012 if (rc) {
4013 cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc); 4013 cifs_dbg(FYI, "Send error in QFileInfo = %d", rc);
4014 } else { /* decode response */ 4014 } else { /* decode response */
4015 rc = validate_t2((struct smb_t2_rsp *)pSMBr); 4015 rc = validate_t2((struct smb_t2_rsp *)pSMBr);
4016 4016
@@ -4179,7 +4179,7 @@ UnixQFileInfoRetry:
4179 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, 4179 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
4180 (struct smb_hdr *) pSMBr, &bytes_returned, 0); 4180 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
4181 if (rc) { 4181 if (rc) {
4182 cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc); 4182 cifs_dbg(FYI, "Send error in UnixQFileInfo = %d", rc);
4183 } else { /* decode response */ 4183 } else { /* decode response */
4184 rc = validate_t2((struct smb_t2_rsp *)pSMBr); 4184 rc = validate_t2((struct smb_t2_rsp *)pSMBr);
4185 4185
@@ -4263,7 +4263,7 @@ UnixQPathInfoRetry:
4263 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, 4263 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
4264 (struct smb_hdr *) pSMBr, &bytes_returned, 0); 4264 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
4265 if (rc) { 4265 if (rc) {
4266 cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc); 4266 cifs_dbg(FYI, "Send error in UnixQPathInfo = %d", rc);
4267 } else { /* decode response */ 4267 } else { /* decode response */
4268 rc = validate_t2((struct smb_t2_rsp *)pSMBr); 4268 rc = validate_t2((struct smb_t2_rsp *)pSMBr);
4269 4269
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 11ff5f116b20..a514e0a65f69 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -193,7 +193,7 @@ check_name(struct dentry *direntry)
193static int 193static int
194cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid, 194cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
195 struct tcon_link *tlink, unsigned oflags, umode_t mode, 195 struct tcon_link *tlink, unsigned oflags, umode_t mode,
196 __u32 *oplock, struct cifs_fid *fid, int *created) 196 __u32 *oplock, struct cifs_fid *fid)
197{ 197{
198 int rc = -ENOENT; 198 int rc = -ENOENT;
199 int create_options = CREATE_NOT_DIR; 199 int create_options = CREATE_NOT_DIR;
@@ -349,7 +349,6 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
349 .device = 0, 349 .device = 0,
350 }; 350 };
351 351
352 *created |= FILE_CREATED;
353 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { 352 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
354 args.uid = current_fsuid(); 353 args.uid = current_fsuid();
355 if (inode->i_mode & S_ISGID) 354 if (inode->i_mode & S_ISGID)
@@ -480,13 +479,16 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
480 cifs_add_pending_open(&fid, tlink, &open); 479 cifs_add_pending_open(&fid, tlink, &open);
481 480
482 rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode, 481 rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
483 &oplock, &fid, opened); 482 &oplock, &fid);
484 483
485 if (rc) { 484 if (rc) {
486 cifs_del_pending_open(&open); 485 cifs_del_pending_open(&open);
487 goto out; 486 goto out;
488 } 487 }
489 488
489 if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
490 *opened |= FILE_CREATED;
491
490 rc = finish_open(file, direntry, generic_file_open, opened); 492 rc = finish_open(file, direntry, generic_file_open, opened);
491 if (rc) { 493 if (rc) {
492 if (server->ops->close) 494 if (server->ops->close)
@@ -529,7 +531,6 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
529 struct TCP_Server_Info *server; 531 struct TCP_Server_Info *server;
530 struct cifs_fid fid; 532 struct cifs_fid fid;
531 __u32 oplock; 533 __u32 oplock;
532 int created = FILE_CREATED;
533 534
534 cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %s and dentry = 0x%p\n", 535 cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %s and dentry = 0x%p\n",
535 inode, direntry->d_name.name, direntry); 536 inode, direntry->d_name.name, direntry);
@@ -546,7 +547,7 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
546 server->ops->new_lease_key(&fid); 547 server->ops->new_lease_key(&fid);
547 548
548 rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode, 549 rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
549 &oplock, &fid, &created); 550 &oplock, &fid);
550 if (!rc && server->ops->close) 551 if (!rc && server->ops->close)
551 server->ops->close(xid, tcon, &fid); 552 server->ops->close(xid, tcon, &fid);
552 553
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 36f9ebb93ceb..49719b8228e5 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -383,7 +383,8 @@ int cifs_get_inode_info_unix(struct inode **pinode,
383 383
384 /* check for Minshall+French symlinks */ 384 /* check for Minshall+French symlinks */
385 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) { 385 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
386 int tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid); 386 int tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr,
387 full_path);
387 if (tmprc) 388 if (tmprc)
388 cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc); 389 cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc);
389 } 390 }
@@ -799,7 +800,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
799 800
800 /* check for Minshall+French symlinks */ 801 /* check for Minshall+French symlinks */
801 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) { 802 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
802 tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid); 803 tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr,
804 full_path);
803 if (tmprc) 805 if (tmprc)
804 cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc); 806 cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc);
805 } 807 }
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 409b45eefe70..77492301cc2b 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -26,13 +26,15 @@
26#include <linux/mount.h> 26#include <linux/mount.h>
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/pagemap.h> 28#include <linux/pagemap.h>
29#include <linux/btrfs.h>
30#include "cifspdu.h" 29#include "cifspdu.h"
31#include "cifsglob.h" 30#include "cifsglob.h"
32#include "cifsproto.h" 31#include "cifsproto.h"
33#include "cifs_debug.h" 32#include "cifs_debug.h"
34#include "cifsfs.h" 33#include "cifsfs.h"
35 34
35#define CIFS_IOCTL_MAGIC 0xCF
36#define CIFS_IOC_COPYCHUNK_FILE _IOW(CIFS_IOCTL_MAGIC, 3, int)
37
36static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file, 38static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
37 unsigned long srcfd, u64 off, u64 len, u64 destoff) 39 unsigned long srcfd, u64 off, u64 len, u64 destoff)
38{ 40{
@@ -213,7 +215,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
213 cifs_dbg(FYI, "set compress flag rc %d\n", rc); 215 cifs_dbg(FYI, "set compress flag rc %d\n", rc);
214 } 216 }
215 break; 217 break;
216 case BTRFS_IOC_CLONE: 218 case CIFS_IOC_COPYCHUNK_FILE:
217 rc = cifs_ioctl_clone(xid, filep, arg, 0, 0, 0); 219 rc = cifs_ioctl_clone(xid, filep, arg, 0, 0, 0);
218 break; 220 break;
219 default: 221 default:
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index cc0234710ddb..92aee08483a5 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -354,34 +354,30 @@ open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
354 354
355 355
356int 356int
357CIFSCheckMFSymlink(struct cifs_fattr *fattr, 357CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon,
358 const unsigned char *path, 358 struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
359 struct cifs_sb_info *cifs_sb, unsigned int xid) 359 const unsigned char *path)
360{ 360{
361 int rc = 0; 361 int rc;
362 u8 *buf = NULL; 362 u8 *buf = NULL;
363 unsigned int link_len = 0; 363 unsigned int link_len = 0;
364 unsigned int bytes_read = 0; 364 unsigned int bytes_read = 0;
365 struct cifs_tcon *ptcon;
366 365
367 if (!CIFSCouldBeMFSymlink(fattr)) 366 if (!CIFSCouldBeMFSymlink(fattr))
368 /* it's not a symlink */ 367 /* it's not a symlink */
369 return 0; 368 return 0;
370 369
371 buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); 370 buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
372 if (!buf) { 371 if (!buf)
373 rc = -ENOMEM; 372 return -ENOMEM;
374 goto out;
375 }
376 373
377 ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb)); 374 if (tcon->ses->server->ops->query_mf_symlink)
378 if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink)) 375 rc = tcon->ses->server->ops->query_mf_symlink(path, buf,
379 rc = ptcon->ses->server->ops->query_mf_symlink(path, buf, 376 &bytes_read, cifs_sb, xid);
380 &bytes_read, cifs_sb, xid);
381 else 377 else
382 goto out; 378 rc = -ENOSYS;
383 379
384 if (rc != 0) 380 if (rc)
385 goto out; 381 goto out;
386 382
387 if (bytes_read == 0) /* not a symlink */ 383 if (bytes_read == 0) /* not a symlink */
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 11dde4b24f8a..757da3e54d3d 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -532,7 +532,10 @@ smb2_clone_range(const unsigned int xid,
532 int rc; 532 int rc;
533 unsigned int ret_data_len; 533 unsigned int ret_data_len;
534 struct copychunk_ioctl *pcchunk; 534 struct copychunk_ioctl *pcchunk;
535 char *retbuf = NULL; 535 struct copychunk_ioctl_rsp *retbuf = NULL;
536 struct cifs_tcon *tcon;
537 int chunks_copied = 0;
538 bool chunk_sizes_updated = false;
536 539
537 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL); 540 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
538 541
@@ -547,27 +550,96 @@ smb2_clone_range(const unsigned int xid,
547 550
548 /* Note: request_res_key sets res_key null only if rc !=0 */ 551 /* Note: request_res_key sets res_key null only if rc !=0 */
549 if (rc) 552 if (rc)
550 return rc; 553 goto cchunk_out;
551 554
552 /* For now array only one chunk long, will make more flexible later */ 555 /* For now array only one chunk long, will make more flexible later */
553 pcchunk->ChunkCount = __constant_cpu_to_le32(1); 556 pcchunk->ChunkCount = __constant_cpu_to_le32(1);
554 pcchunk->Reserved = 0; 557 pcchunk->Reserved = 0;
555 pcchunk->SourceOffset = cpu_to_le64(src_off);
556 pcchunk->TargetOffset = cpu_to_le64(dest_off);
557 pcchunk->Length = cpu_to_le32(len);
558 pcchunk->Reserved2 = 0; 558 pcchunk->Reserved2 = 0;
559 559
560 /* Request that server copy to target from src file identified by key */ 560 tcon = tlink_tcon(trgtfile->tlink);
561 rc = SMB2_ioctl(xid, tlink_tcon(trgtfile->tlink),
562 trgtfile->fid.persistent_fid,
563 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
564 true /* is_fsctl */, (char *)pcchunk,
565 sizeof(struct copychunk_ioctl), &retbuf, &ret_data_len);
566 561
567 /* BB need to special case rc = EINVAL to alter chunk size */ 562 while (len > 0) {
563 pcchunk->SourceOffset = cpu_to_le64(src_off);
564 pcchunk->TargetOffset = cpu_to_le64(dest_off);
565 pcchunk->Length =
566 cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
568 567
569 cifs_dbg(FYI, "rc %d data length out %d\n", rc, ret_data_len); 568 /* Request server copy to target from src identified by key */
569 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
570 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
571 true /* is_fsctl */, (char *)pcchunk,
572 sizeof(struct copychunk_ioctl), (char **)&retbuf,
573 &ret_data_len);
574 if (rc == 0) {
575 if (ret_data_len !=
576 sizeof(struct copychunk_ioctl_rsp)) {
577 cifs_dbg(VFS, "invalid cchunk response size\n");
578 rc = -EIO;
579 goto cchunk_out;
580 }
581 if (retbuf->TotalBytesWritten == 0) {
582 cifs_dbg(FYI, "no bytes copied\n");
583 rc = -EIO;
584 goto cchunk_out;
585 }
586 /*
587 * Check if server claimed to write more than we asked
588 */
589 if (le32_to_cpu(retbuf->TotalBytesWritten) >
590 le32_to_cpu(pcchunk->Length)) {
591 cifs_dbg(VFS, "invalid copy chunk response\n");
592 rc = -EIO;
593 goto cchunk_out;
594 }
595 if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
596 cifs_dbg(VFS, "invalid num chunks written\n");
597 rc = -EIO;
598 goto cchunk_out;
599 }
600 chunks_copied++;
601
602 src_off += le32_to_cpu(retbuf->TotalBytesWritten);
603 dest_off += le32_to_cpu(retbuf->TotalBytesWritten);
604 len -= le32_to_cpu(retbuf->TotalBytesWritten);
605
606 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %d\n",
607 le32_to_cpu(retbuf->ChunksWritten),
608 le32_to_cpu(retbuf->ChunkBytesWritten),
609 le32_to_cpu(retbuf->TotalBytesWritten));
610 } else if (rc == -EINVAL) {
611 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
612 goto cchunk_out;
613
614 cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
615 le32_to_cpu(retbuf->ChunksWritten),
616 le32_to_cpu(retbuf->ChunkBytesWritten),
617 le32_to_cpu(retbuf->TotalBytesWritten));
618
619 /*
620 * Check if this is the first request using these sizes,
621 * (ie check if copy succeed once with original sizes
622 * and check if the server gave us different sizes after
623 * we already updated max sizes on previous request).
624 * if not then why is the server returning an error now
625 */
626 if ((chunks_copied != 0) || chunk_sizes_updated)
627 goto cchunk_out;
628
629 /* Check that server is not asking us to grow size */
630 if (le32_to_cpu(retbuf->ChunkBytesWritten) <
631 tcon->max_bytes_chunk)
632 tcon->max_bytes_chunk =
633 le32_to_cpu(retbuf->ChunkBytesWritten);
634 else
635 goto cchunk_out; /* server gave us bogus size */
636
637 /* No need to change MaxChunks since already set to 1 */
638 chunk_sizes_updated = true;
639 }
640 }
570 641
642cchunk_out:
571 kfree(pcchunk); 643 kfree(pcchunk);
572 return rc; 644 return rc;
573} 645}
@@ -1247,6 +1319,7 @@ struct smb_version_operations smb30_operations = {
1247 .create_lease_buf = smb3_create_lease_buf, 1319 .create_lease_buf = smb3_create_lease_buf,
1248 .parse_lease_buf = smb3_parse_lease_buf, 1320 .parse_lease_buf = smb3_parse_lease_buf,
1249 .clone_range = smb2_clone_range, 1321 .clone_range = smb2_clone_range,
1322 .validate_negotiate = smb3_validate_negotiate,
1250}; 1323};
1251 1324
1252struct smb_version_values smb20_values = { 1325struct smb_version_values smb20_values = {
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index d65270c290a1..2013234b73ad 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -454,6 +454,81 @@ neg_exit:
454 return rc; 454 return rc;
455} 455}
456 456
457int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
458{
459 int rc = 0;
460 struct validate_negotiate_info_req vneg_inbuf;
461 struct validate_negotiate_info_rsp *pneg_rsp;
462 u32 rsplen;
463
464 cifs_dbg(FYI, "validate negotiate\n");
465
466 /*
467 * validation ioctl must be signed, so no point sending this if we
468 * can not sign it. We could eventually change this to selectively
469 * sign just this, the first and only signed request on a connection.
470 * This is good enough for now since a user who wants better security
471 * would also enable signing on the mount. Having validation of
472 * negotiate info for signed connections helps reduce attack vectors
473 */
474 if (tcon->ses->server->sign == false)
475 return 0; /* validation requires signing */
476
477 vneg_inbuf.Capabilities =
478 cpu_to_le32(tcon->ses->server->vals->req_capabilities);
479 memcpy(vneg_inbuf.Guid, cifs_client_guid, SMB2_CLIENT_GUID_SIZE);
480
481 if (tcon->ses->sign)
482 vneg_inbuf.SecurityMode =
483 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
484 else if (global_secflags & CIFSSEC_MAY_SIGN)
485 vneg_inbuf.SecurityMode =
486 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
487 else
488 vneg_inbuf.SecurityMode = 0;
489
490 vneg_inbuf.DialectCount = cpu_to_le16(1);
491 vneg_inbuf.Dialects[0] =
492 cpu_to_le16(tcon->ses->server->vals->protocol_id);
493
494 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
495 FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
496 (char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req),
497 (char **)&pneg_rsp, &rsplen);
498
499 if (rc != 0) {
500 cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
501 return -EIO;
502 }
503
504 if (rsplen != sizeof(struct validate_negotiate_info_rsp)) {
505 cifs_dbg(VFS, "invalid size of protocol negotiate response\n");
506 return -EIO;
507 }
508
509 /* check validate negotiate info response matches what we got earlier */
510 if (pneg_rsp->Dialect !=
511 cpu_to_le16(tcon->ses->server->vals->protocol_id))
512 goto vneg_out;
513
514 if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
515 goto vneg_out;
516
517 /* do not validate server guid because not saved at negprot time yet */
518
519 if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
520 SMB2_LARGE_FILES) != tcon->ses->server->capabilities)
521 goto vneg_out;
522
523 /* validate negotiate successful */
524 cifs_dbg(FYI, "validate negotiate info successful\n");
525 return 0;
526
527vneg_out:
528 cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
529 return -EIO;
530}
531
457int 532int
458SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, 533SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
459 const struct nls_table *nls_cp) 534 const struct nls_table *nls_cp)
@@ -829,6 +904,8 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
829 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0)) 904 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
830 cifs_dbg(VFS, "DFS capability contradicts DFS flag\n"); 905 cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
831 init_copy_chunk_defaults(tcon); 906 init_copy_chunk_defaults(tcon);
907 if (tcon->ses->server->ops->validate_negotiate)
908 rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
832tcon_exit: 909tcon_exit:
833 free_rsp_buf(resp_buftype, rsp); 910 free_rsp_buf(resp_buftype, rsp);
834 kfree(unc_path); 911 kfree(unc_path);
@@ -1214,10 +1291,17 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1214 rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0); 1291 rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
1215 rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base; 1292 rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
1216 1293
1217 if (rc != 0) { 1294 if ((rc != 0) && (rc != -EINVAL)) {
1218 if (tcon) 1295 if (tcon)
1219 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); 1296 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
1220 goto ioctl_exit; 1297 goto ioctl_exit;
1298 } else if (rc == -EINVAL) {
1299 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
1300 (opcode != FSCTL_SRV_COPYCHUNK)) {
1301 if (tcon)
1302 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
1303 goto ioctl_exit;
1304 }
1221 } 1305 }
1222 1306
1223 /* check if caller wants to look at return data or just return rc */ 1307 /* check if caller wants to look at return data or just return rc */
@@ -2154,11 +2238,9 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
2154 rc = SendReceive2(xid, ses, iov, num, &resp_buftype, 0); 2238 rc = SendReceive2(xid, ses, iov, num, &resp_buftype, 0);
2155 rsp = (struct smb2_set_info_rsp *)iov[0].iov_base; 2239 rsp = (struct smb2_set_info_rsp *)iov[0].iov_base;
2156 2240
2157 if (rc != 0) { 2241 if (rc != 0)
2158 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE); 2242 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
2159 goto out; 2243
2160 }
2161out:
2162 free_rsp_buf(resp_buftype, rsp); 2244 free_rsp_buf(resp_buftype, rsp);
2163 kfree(iov); 2245 kfree(iov);
2164 return rc; 2246 return rc;
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index f88320bbb477..2022c542ea3a 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -577,13 +577,19 @@ struct copychunk_ioctl_rsp {
577 __le32 TotalBytesWritten; 577 __le32 TotalBytesWritten;
578} __packed; 578} __packed;
579 579
580/* Response and Request are the same format */ 580struct validate_negotiate_info_req {
581struct validate_negotiate_info {
582 __le32 Capabilities; 581 __le32 Capabilities;
583 __u8 Guid[SMB2_CLIENT_GUID_SIZE]; 582 __u8 Guid[SMB2_CLIENT_GUID_SIZE];
584 __le16 SecurityMode; 583 __le16 SecurityMode;
585 __le16 DialectCount; 584 __le16 DialectCount;
586 __le16 Dialect[1]; 585 __le16 Dialects[1]; /* dialect (someday maybe list) client asked for */
586} __packed;
587
588struct validate_negotiate_info_rsp {
589 __le32 Capabilities;
590 __u8 Guid[SMB2_CLIENT_GUID_SIZE];
591 __le16 SecurityMode;
592 __le16 Dialect; /* Dialect in use for the connection */
587} __packed; 593} __packed;
588 594
589#define RSS_CAPABLE 0x00000001 595#define RSS_CAPABLE 0x00000001
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index b4eea105b08c..93adc64666f3 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -162,5 +162,6 @@ extern int smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
162 struct smb2_lock_element *buf); 162 struct smb2_lock_element *buf);
163extern int SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon, 163extern int SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
164 __u8 *lease_key, const __le32 lease_state); 164 __u8 *lease_key, const __le32 lease_state);
165extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *);
165 166
166#endif /* _SMB2PROTO_H */ 167#endif /* _SMB2PROTO_H */
diff --git a/fs/cifs/smbfsctl.h b/fs/cifs/smbfsctl.h
index a4b2391fe66e..0e538b5c9622 100644
--- a/fs/cifs/smbfsctl.h
+++ b/fs/cifs/smbfsctl.h
@@ -90,7 +90,7 @@
90#define FSCTL_LMR_REQUEST_RESILIENCY 0x001401D4 /* BB add struct */ 90#define FSCTL_LMR_REQUEST_RESILIENCY 0x001401D4 /* BB add struct */
91#define FSCTL_LMR_GET_LINK_TRACK_INF 0x001400E8 /* BB add struct */ 91#define FSCTL_LMR_GET_LINK_TRACK_INF 0x001400E8 /* BB add struct */
92#define FSCTL_LMR_SET_LINK_TRACK_INF 0x001400EC /* BB add struct */ 92#define FSCTL_LMR_SET_LINK_TRACK_INF 0x001400EC /* BB add struct */
93#define FSCTL_VALIDATE_NEGOTIATE_INFO 0x00140204 /* BB add struct */ 93#define FSCTL_VALIDATE_NEGOTIATE_INFO 0x00140204
94/* Perform server-side data movement */ 94/* Perform server-side data movement */
95#define FSCTL_SRV_COPYCHUNK 0x001440F2 95#define FSCTL_SRV_COPYCHUNK 0x001440F2
96#define FSCTL_SRV_COPYCHUNK_WRITE 0x001480F2 96#define FSCTL_SRV_COPYCHUNK_WRITE 0x001480F2
diff --git a/fs/dcache.c b/fs/dcache.c
index 4bdb300b16e2..6055d61811d3 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -192,7 +192,7 @@ static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char
192 if (!tcount) 192 if (!tcount)
193 return 0; 193 return 0;
194 } 194 }
195 mask = ~(~0ul << tcount*8); 195 mask = bytemask_from_count(tcount);
196 return unlikely(!!((a ^ b) & mask)); 196 return unlikely(!!((a ^ b) & mask));
197} 197}
198 198
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 79b65c3b9e87..af903128891c 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1852,8 +1852,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1852 goto error_tgt_fput; 1852 goto error_tgt_fput;
1853 1853
1854 /* Check if EPOLLWAKEUP is allowed */ 1854 /* Check if EPOLLWAKEUP is allowed */
1855 if ((epds.events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND)) 1855 ep_take_care_of_epollwakeup(&epds);
1856 epds.events &= ~EPOLLWAKEUP;
1857 1856
1858 /* 1857 /*
1859 * We have to check that the file structure underneath the file descriptor 1858 * We have to check that the file structure underneath the file descriptor
@@ -1908,10 +1907,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1908 } 1907 }
1909 } 1908 }
1910 } 1909 }
1911 if (op == EPOLL_CTL_DEL && is_file_epoll(tf.file)) {
1912 tep = tf.file->private_data;
1913 mutex_lock_nested(&tep->mtx, 1);
1914 }
1915 1910
1916 /* 1911 /*
1917 * Try to lookup the file inside our RB tree, Since we grabbed "mtx" 1912 * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 288534920fe5..20d6697bd638 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1493,6 +1493,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
1493 sb->s_blocksize - offset : towrite; 1493 sb->s_blocksize - offset : towrite;
1494 1494
1495 tmp_bh.b_state = 0; 1495 tmp_bh.b_state = 0;
1496 tmp_bh.b_size = sb->s_blocksize;
1496 err = ext2_get_block(inode, blk, &tmp_bh, 1); 1497 err = ext2_get_block(inode, blk, &tmp_bh, 1);
1497 if (err < 0) 1498 if (err < 0)
1498 goto out; 1499 goto out;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index e6185031c1cc..ece55565b9cd 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -268,6 +268,16 @@ struct ext4_io_submit {
268/* Translate # of blks to # of clusters */ 268/* Translate # of blks to # of clusters */
269#define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \ 269#define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \
270 (sbi)->s_cluster_bits) 270 (sbi)->s_cluster_bits)
271/* Mask out the low bits to get the starting block of the cluster */
272#define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \
273 ~((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
274#define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \
275 ~((ext4_lblk_t) (s)->s_cluster_ratio - 1))
276/* Get the cluster offset */
277#define EXT4_PBLK_COFF(s, pblk) ((pblk) & \
278 ((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
279#define EXT4_LBLK_COFF(s, lblk) ((lblk) & \
280 ((ext4_lblk_t) (s)->s_cluster_ratio - 1))
271 281
272/* 282/*
273 * Structure of a blocks group descriptor 283 * Structure of a blocks group descriptor
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 17ac112ab101..3fe29de832c8 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -259,6 +259,15 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
259 if (WARN_ON_ONCE(err)) { 259 if (WARN_ON_ONCE(err)) {
260 ext4_journal_abort_handle(where, line, __func__, bh, 260 ext4_journal_abort_handle(where, line, __func__, bh,
261 handle, err); 261 handle, err);
262 ext4_error_inode(inode, where, line,
263 bh->b_blocknr,
264 "journal_dirty_metadata failed: "
265 "handle type %u started at line %u, "
266 "credits %u/%u, errcode %d",
267 handle->h_type,
268 handle->h_line_no,
269 handle->h_requested_credits,
270 handle->h_buffer_credits, err);
262 } 271 }
263 } else { 272 } else {
264 if (inode) 273 if (inode)
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 35f65cf4f318..3384dc4bed40 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -360,8 +360,10 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
360{ 360{
361 ext4_fsblk_t block = ext4_ext_pblock(ext); 361 ext4_fsblk_t block = ext4_ext_pblock(ext);
362 int len = ext4_ext_get_actual_len(ext); 362 int len = ext4_ext_get_actual_len(ext);
363 ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
364 ext4_lblk_t last = lblock + len - 1;
363 365
364 if (len == 0) 366 if (lblock > last)
365 return 0; 367 return 0;
366 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); 368 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
367} 369}
@@ -387,11 +389,26 @@ static int ext4_valid_extent_entries(struct inode *inode,
387 if (depth == 0) { 389 if (depth == 0) {
388 /* leaf entries */ 390 /* leaf entries */
389 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); 391 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
392 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
393 ext4_fsblk_t pblock = 0;
394 ext4_lblk_t lblock = 0;
395 ext4_lblk_t prev = 0;
396 int len = 0;
390 while (entries) { 397 while (entries) {
391 if (!ext4_valid_extent(inode, ext)) 398 if (!ext4_valid_extent(inode, ext))
392 return 0; 399 return 0;
400
401 /* Check for overlapping extents */
402 lblock = le32_to_cpu(ext->ee_block);
403 len = ext4_ext_get_actual_len(ext);
404 if ((lblock <= prev) && prev) {
405 pblock = ext4_ext_pblock(ext);
406 es->s_last_error_block = cpu_to_le64(pblock);
407 return 0;
408 }
393 ext++; 409 ext++;
394 entries--; 410 entries--;
411 prev = lblock + len - 1;
395 } 412 }
396 } else { 413 } else {
397 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); 414 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
@@ -1834,8 +1851,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1834 depth = ext_depth(inode); 1851 depth = ext_depth(inode);
1835 if (!path[depth].p_ext) 1852 if (!path[depth].p_ext)
1836 goto out; 1853 goto out;
1837 b2 = le32_to_cpu(path[depth].p_ext->ee_block); 1854 b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
1838 b2 &= ~(sbi->s_cluster_ratio - 1);
1839 1855
1840 /* 1856 /*
1841 * get the next allocated block if the extent in the path 1857 * get the next allocated block if the extent in the path
@@ -1845,7 +1861,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1845 b2 = ext4_ext_next_allocated_block(path); 1861 b2 = ext4_ext_next_allocated_block(path);
1846 if (b2 == EXT_MAX_BLOCKS) 1862 if (b2 == EXT_MAX_BLOCKS)
1847 goto out; 1863 goto out;
1848 b2 &= ~(sbi->s_cluster_ratio - 1); 1864 b2 = EXT4_LBLK_CMASK(sbi, b2);
1849 } 1865 }
1850 1866
1851 /* check for wrap through zero on extent logical start block*/ 1867 /* check for wrap through zero on extent logical start block*/
@@ -2504,7 +2520,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2504 * extent, we have to mark the cluster as used (store negative 2520 * extent, we have to mark the cluster as used (store negative
2505 * cluster number in partial_cluster). 2521 * cluster number in partial_cluster).
2506 */ 2522 */
2507 unaligned = pblk & (sbi->s_cluster_ratio - 1); 2523 unaligned = EXT4_PBLK_COFF(sbi, pblk);
2508 if (unaligned && (ee_len == num) && 2524 if (unaligned && (ee_len == num) &&
2509 (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk)))) 2525 (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk))))
2510 *partial_cluster = EXT4_B2C(sbi, pblk); 2526 *partial_cluster = EXT4_B2C(sbi, pblk);
@@ -2598,7 +2614,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2598 * accidentally freeing it later on 2614 * accidentally freeing it later on
2599 */ 2615 */
2600 pblk = ext4_ext_pblock(ex); 2616 pblk = ext4_ext_pblock(ex);
2601 if (pblk & (sbi->s_cluster_ratio - 1)) 2617 if (EXT4_PBLK_COFF(sbi, pblk))
2602 *partial_cluster = 2618 *partial_cluster =
2603 -((long long)EXT4_B2C(sbi, pblk)); 2619 -((long long)EXT4_B2C(sbi, pblk));
2604 ex--; 2620 ex--;
@@ -3753,7 +3769,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
3753{ 3769{
3754 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3770 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3755 ext4_lblk_t lblk_start, lblk_end; 3771 ext4_lblk_t lblk_start, lblk_end;
3756 lblk_start = lblk & (~(sbi->s_cluster_ratio - 1)); 3772 lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
3757 lblk_end = lblk_start + sbi->s_cluster_ratio - 1; 3773 lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
3758 3774
3759 return ext4_find_delalloc_range(inode, lblk_start, lblk_end); 3775 return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
@@ -3812,9 +3828,9 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3812 trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); 3828 trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3813 3829
3814 /* Check towards left side */ 3830 /* Check towards left side */
3815 c_offset = lblk_start & (sbi->s_cluster_ratio - 1); 3831 c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
3816 if (c_offset) { 3832 if (c_offset) {
3817 lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1)); 3833 lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
3818 lblk_to = lblk_from + c_offset - 1; 3834 lblk_to = lblk_from + c_offset - 1;
3819 3835
3820 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) 3836 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
@@ -3822,7 +3838,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3822 } 3838 }
3823 3839
3824 /* Now check towards right. */ 3840 /* Now check towards right. */
3825 c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1); 3841 c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
3826 if (allocated_clusters && c_offset) { 3842 if (allocated_clusters && c_offset) {
3827 lblk_from = lblk_start + num_blks; 3843 lblk_from = lblk_start + num_blks;
3828 lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; 3844 lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
@@ -4030,7 +4046,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
4030 struct ext4_ext_path *path) 4046 struct ext4_ext_path *path)
4031{ 4047{
4032 struct ext4_sb_info *sbi = EXT4_SB(sb); 4048 struct ext4_sb_info *sbi = EXT4_SB(sb);
4033 ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1); 4049 ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4034 ext4_lblk_t ex_cluster_start, ex_cluster_end; 4050 ext4_lblk_t ex_cluster_start, ex_cluster_end;
4035 ext4_lblk_t rr_cluster_start; 4051 ext4_lblk_t rr_cluster_start;
4036 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 4052 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
@@ -4048,8 +4064,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
4048 (rr_cluster_start == ex_cluster_start)) { 4064 (rr_cluster_start == ex_cluster_start)) {
4049 if (rr_cluster_start == ex_cluster_end) 4065 if (rr_cluster_start == ex_cluster_end)
4050 ee_start += ee_len - 1; 4066 ee_start += ee_len - 1;
4051 map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) + 4067 map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
4052 c_offset;
4053 map->m_len = min(map->m_len, 4068 map->m_len = min(map->m_len,
4054 (unsigned) sbi->s_cluster_ratio - c_offset); 4069 (unsigned) sbi->s_cluster_ratio - c_offset);
4055 /* 4070 /*
@@ -4203,7 +4218,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4203 */ 4218 */
4204 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; 4219 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
4205 newex.ee_block = cpu_to_le32(map->m_lblk); 4220 newex.ee_block = cpu_to_le32(map->m_lblk);
4206 cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1); 4221 cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4207 4222
4208 /* 4223 /*
4209 * If we are doing bigalloc, check to see if the extent returned 4224 * If we are doing bigalloc, check to see if the extent returned
@@ -4271,7 +4286,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4271 * needed so that future calls to get_implied_cluster_alloc() 4286 * needed so that future calls to get_implied_cluster_alloc()
4272 * work correctly. 4287 * work correctly.
4273 */ 4288 */
4274 offset = map->m_lblk & (sbi->s_cluster_ratio - 1); 4289 offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4275 ar.len = EXT4_NUM_B2C(sbi, offset+allocated); 4290 ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4276 ar.goal -= offset; 4291 ar.goal -= offset;
4277 ar.logical -= offset; 4292 ar.logical -= offset;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 075763474118..61d49ff22c81 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1206,7 +1206,6 @@ static int ext4_journalled_write_end(struct file *file,
1206 */ 1206 */
1207static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) 1207static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
1208{ 1208{
1209 int retries = 0;
1210 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1209 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1211 struct ext4_inode_info *ei = EXT4_I(inode); 1210 struct ext4_inode_info *ei = EXT4_I(inode);
1212 unsigned int md_needed; 1211 unsigned int md_needed;
@@ -1218,7 +1217,6 @@ static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
1218 * in order to allocate nrblocks 1217 * in order to allocate nrblocks
1219 * worse case is one extent per block 1218 * worse case is one extent per block
1220 */ 1219 */
1221repeat:
1222 spin_lock(&ei->i_block_reservation_lock); 1220 spin_lock(&ei->i_block_reservation_lock);
1223 /* 1221 /*
1224 * ext4_calc_metadata_amount() has side effects, which we have 1222 * ext4_calc_metadata_amount() has side effects, which we have
@@ -1238,10 +1236,6 @@ repeat:
1238 ei->i_da_metadata_calc_len = save_len; 1236 ei->i_da_metadata_calc_len = save_len;
1239 ei->i_da_metadata_calc_last_lblock = save_last_lblock; 1237 ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1240 spin_unlock(&ei->i_block_reservation_lock); 1238 spin_unlock(&ei->i_block_reservation_lock);
1241 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1242 cond_resched();
1243 goto repeat;
1244 }
1245 return -ENOSPC; 1239 return -ENOSPC;
1246 } 1240 }
1247 ei->i_reserved_meta_blocks += md_needed; 1241 ei->i_reserved_meta_blocks += md_needed;
@@ -1255,7 +1249,6 @@ repeat:
1255 */ 1249 */
1256static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) 1250static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1257{ 1251{
1258 int retries = 0;
1259 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1252 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1260 struct ext4_inode_info *ei = EXT4_I(inode); 1253 struct ext4_inode_info *ei = EXT4_I(inode);
1261 unsigned int md_needed; 1254 unsigned int md_needed;
@@ -1277,7 +1270,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1277 * in order to allocate nrblocks 1270 * in order to allocate nrblocks
1278 * worse case is one extent per block 1271 * worse case is one extent per block
1279 */ 1272 */
1280repeat:
1281 spin_lock(&ei->i_block_reservation_lock); 1273 spin_lock(&ei->i_block_reservation_lock);
1282 /* 1274 /*
1283 * ext4_calc_metadata_amount() has side effects, which we have 1275 * ext4_calc_metadata_amount() has side effects, which we have
@@ -1297,10 +1289,6 @@ repeat:
1297 ei->i_da_metadata_calc_len = save_len; 1289 ei->i_da_metadata_calc_len = save_len;
1298 ei->i_da_metadata_calc_last_lblock = save_last_lblock; 1290 ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1299 spin_unlock(&ei->i_block_reservation_lock); 1291 spin_unlock(&ei->i_block_reservation_lock);
1300 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1301 cond_resched();
1302 goto repeat;
1303 }
1304 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1292 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1305 return -ENOSPC; 1293 return -ENOSPC;
1306 } 1294 }
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 4d113efa024c..04a5c7504be9 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3442,6 +3442,9 @@ static void ext4_mb_pa_callback(struct rcu_head *head)
3442{ 3442{
3443 struct ext4_prealloc_space *pa; 3443 struct ext4_prealloc_space *pa;
3444 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 3444 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3445
3446 BUG_ON(atomic_read(&pa->pa_count));
3447 BUG_ON(pa->pa_deleted == 0);
3445 kmem_cache_free(ext4_pspace_cachep, pa); 3448 kmem_cache_free(ext4_pspace_cachep, pa);
3446} 3449}
3447 3450
@@ -3455,11 +3458,13 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3455 ext4_group_t grp; 3458 ext4_group_t grp;
3456 ext4_fsblk_t grp_blk; 3459 ext4_fsblk_t grp_blk;
3457 3460
3458 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3459 return;
3460
3461 /* in this short window concurrent discard can set pa_deleted */ 3461 /* in this short window concurrent discard can set pa_deleted */
3462 spin_lock(&pa->pa_lock); 3462 spin_lock(&pa->pa_lock);
3463 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
3464 spin_unlock(&pa->pa_lock);
3465 return;
3466 }
3467
3463 if (pa->pa_deleted == 1) { 3468 if (pa->pa_deleted == 1) {
3464 spin_unlock(&pa->pa_lock); 3469 spin_unlock(&pa->pa_lock);
3465 return; 3470 return;
@@ -4121,7 +4126,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4121 ext4_get_group_no_and_offset(sb, goal, &group, &block); 4126 ext4_get_group_no_and_offset(sb, goal, &group, &block);
4122 4127
4123 /* set up allocation goals */ 4128 /* set up allocation goals */
4124 ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1); 4129 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
4125 ac->ac_status = AC_STATUS_CONTINUE; 4130 ac->ac_status = AC_STATUS_CONTINUE;
4126 ac->ac_sb = sb; 4131 ac->ac_sb = sb;
4127 ac->ac_inode = ar->inode; 4132 ac->ac_inode = ar->inode;
@@ -4663,7 +4668,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
4663 * blocks at the beginning or the end unless we are explicitly 4668 * blocks at the beginning or the end unless we are explicitly
4664 * requested to avoid doing so. 4669 * requested to avoid doing so.
4665 */ 4670 */
4666 overflow = block & (sbi->s_cluster_ratio - 1); 4671 overflow = EXT4_PBLK_COFF(sbi, block);
4667 if (overflow) { 4672 if (overflow) {
4668 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 4673 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
4669 overflow = sbi->s_cluster_ratio - overflow; 4674 overflow = sbi->s_cluster_ratio - overflow;
@@ -4677,7 +4682,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
4677 count += overflow; 4682 count += overflow;
4678 } 4683 }
4679 } 4684 }
4680 overflow = count & (sbi->s_cluster_ratio - 1); 4685 overflow = EXT4_LBLK_COFF(sbi, count);
4681 if (overflow) { 4686 if (overflow) {
4682 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 4687 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
4683 if (count > overflow) 4688 if (count > overflow)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index c977f4e4e63b..1f7784de05b6 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -792,7 +792,7 @@ static void ext4_put_super(struct super_block *sb)
792 } 792 }
793 793
794 ext4_es_unregister_shrinker(sbi); 794 ext4_es_unregister_shrinker(sbi);
795 del_timer(&sbi->s_err_report); 795 del_timer_sync(&sbi->s_err_report);
796 ext4_release_system_zone(sb); 796 ext4_release_system_zone(sb);
797 ext4_mb_release(sb); 797 ext4_mb_release(sb);
798 ext4_ext_release(sb); 798 ext4_ext_release(sb);
@@ -3316,11 +3316,19 @@ int ext4_calculate_overhead(struct super_block *sb)
3316} 3316}
3317 3317
3318 3318
3319static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi) 3319static ext4_fsblk_t ext4_calculate_resv_clusters(struct super_block *sb)
3320{ 3320{
3321 ext4_fsblk_t resv_clusters; 3321 ext4_fsblk_t resv_clusters;
3322 3322
3323 /* 3323 /*
3324 * There's no need to reserve anything when we aren't using extents.
3325 * The space estimates are exact, there are no unwritten extents,
3326 * hole punching doesn't need new metadata... This is needed especially
3327 * to keep ext2/3 backward compatibility.
3328 */
3329 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
3330 return 0;
3331 /*
3324 * By default we reserve 2% or 4096 clusters, whichever is smaller. 3332 * By default we reserve 2% or 4096 clusters, whichever is smaller.
3325 * This should cover the situations where we can not afford to run 3333 * This should cover the situations where we can not afford to run
3326 * out of space like for example punch hole, or converting 3334 * out of space like for example punch hole, or converting
@@ -3328,7 +3336,8 @@ static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi)
3328 * allocation would require 1, or 2 blocks, higher numbers are 3336 * allocation would require 1, or 2 blocks, higher numbers are
3329 * very rare. 3337 * very rare.
3330 */ 3338 */
3331 resv_clusters = ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits; 3339 resv_clusters = ext4_blocks_count(EXT4_SB(sb)->s_es) >>
3340 EXT4_SB(sb)->s_cluster_bits;
3332 3341
3333 do_div(resv_clusters, 50); 3342 do_div(resv_clusters, 50);
3334 resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096); 3343 resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
@@ -4071,10 +4080,10 @@ no_journal:
4071 "available"); 4080 "available");
4072 } 4081 }
4073 4082
4074 err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sbi)); 4083 err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sb));
4075 if (err) { 4084 if (err) {
4076 ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for " 4085 ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for "
4077 "reserved pool", ext4_calculate_resv_clusters(sbi)); 4086 "reserved pool", ext4_calculate_resv_clusters(sb));
4078 goto failed_mount4a; 4087 goto failed_mount4a;
4079 } 4088 }
4080 4089
@@ -4184,7 +4193,7 @@ failed_mount_wq:
4184 } 4193 }
4185failed_mount3: 4194failed_mount3:
4186 ext4_es_unregister_shrinker(sbi); 4195 ext4_es_unregister_shrinker(sbi);
4187 del_timer(&sbi->s_err_report); 4196 del_timer_sync(&sbi->s_err_report);
4188 if (sbi->s_flex_groups) 4197 if (sbi->s_flex_groups)
4189 ext4_kvfree(sbi->s_flex_groups); 4198 ext4_kvfree(sbi->s_flex_groups);
4190 percpu_counter_destroy(&sbi->s_freeclusters_counter); 4199 percpu_counter_destroy(&sbi->s_freeclusters_counter);
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index b7fc035a6943..73f3e4ee4037 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -986,6 +986,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
986{ 986{
987 struct file *file = iocb->ki_filp; 987 struct file *file = iocb->ki_filp;
988 struct inode *inode = file->f_mapping->host; 988 struct inode *inode = file->f_mapping->host;
989 struct address_space *mapping = inode->i_mapping;
989 struct gfs2_inode *ip = GFS2_I(inode); 990 struct gfs2_inode *ip = GFS2_I(inode);
990 struct gfs2_holder gh; 991 struct gfs2_holder gh;
991 int rv; 992 int rv;
@@ -1006,6 +1007,35 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
1006 if (rv != 1) 1007 if (rv != 1)
1007 goto out; /* dio not valid, fall back to buffered i/o */ 1008 goto out; /* dio not valid, fall back to buffered i/o */
1008 1009
1010 /*
1011 * Now since we are holding a deferred (CW) lock at this point, you
1012 * might be wondering why this is ever needed. There is a case however
1013 * where we've granted a deferred local lock against a cached exclusive
1014 * glock. That is ok provided all granted local locks are deferred, but
1015 * it also means that it is possible to encounter pages which are
1016 * cached and possibly also mapped. So here we check for that and sort
1017 * them out ahead of the dio. The glock state machine will take care of
1018 * everything else.
1019 *
1020 * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
1021 * the first place, mapping->nr_pages will always be zero.
1022 */
1023 if (mapping->nrpages) {
1024 loff_t lstart = offset & (PAGE_CACHE_SIZE - 1);
1025 loff_t len = iov_length(iov, nr_segs);
1026 loff_t end = PAGE_ALIGN(offset + len) - 1;
1027
1028 rv = 0;
1029 if (len == 0)
1030 goto out;
1031 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
1032 unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len);
1033 rv = filemap_write_and_wait_range(mapping, lstart, end);
1034 if (rv)
1035 return rv;
1036 truncate_inode_pages_range(mapping, lstart, end);
1037 }
1038
1009 rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 1039 rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1010 offset, nr_segs, gfs2_get_block_direct, 1040 offset, nr_segs, gfs2_get_block_direct,
1011 NULL, NULL, 0); 1041 NULL, NULL, 0);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index c8420f7e4db6..6f7a47c05259 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1655,6 +1655,7 @@ static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1655 struct task_struct *gh_owner = NULL; 1655 struct task_struct *gh_owner = NULL;
1656 char flags_buf[32]; 1656 char flags_buf[32];
1657 1657
1658 rcu_read_lock();
1658 if (gh->gh_owner_pid) 1659 if (gh->gh_owner_pid)
1659 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); 1660 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1660 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n", 1661 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
@@ -1664,6 +1665,7 @@ static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1664 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1, 1665 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1665 gh_owner ? gh_owner->comm : "(ended)", 1666 gh_owner ? gh_owner->comm : "(ended)",
1666 (void *)gh->gh_ip); 1667 (void *)gh->gh_ip);
1668 rcu_read_unlock();
1667 return 0; 1669 return 0;
1668} 1670}
1669 1671
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index db908f697139..f88dcd925010 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -192,8 +192,11 @@ static void inode_go_sync(struct gfs2_glock *gl)
192 192
193 if (ip && !S_ISREG(ip->i_inode.i_mode)) 193 if (ip && !S_ISREG(ip->i_inode.i_mode))
194 ip = NULL; 194 ip = NULL;
195 if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) 195 if (ip) {
196 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); 196 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
197 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
198 inode_dio_wait(&ip->i_inode);
199 }
197 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 200 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
198 return; 201 return;
199 202
@@ -410,6 +413,9 @@ static int inode_go_lock(struct gfs2_holder *gh)
410 return error; 413 return error;
411 } 414 }
412 415
416 if (gh->gh_state != LM_ST_DEFERRED)
417 inode_dio_wait(&ip->i_inode);
418
413 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && 419 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
414 (gl->gl_state == LM_ST_EXCLUSIVE) && 420 (gl->gl_state == LM_ST_EXCLUSIVE) &&
415 (gh->gh_state == LM_ST_EXCLUSIVE)) { 421 (gh->gh_state == LM_ST_EXCLUSIVE)) {
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 610613fb65b5..9dcb9777a5f8 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -551,10 +551,10 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
551 struct buffer_head *bh = bd->bd_bh; 551 struct buffer_head *bh = bd->bd_bh;
552 struct gfs2_glock *gl = bd->bd_gl; 552 struct gfs2_glock *gl = bd->bd_gl;
553 553
554 gfs2_remove_from_ail(bd);
555 bd->bd_bh = NULL;
556 bh->b_private = NULL; 554 bh->b_private = NULL;
557 bd->bd_blkno = bh->b_blocknr; 555 bd->bd_blkno = bh->b_blocknr;
556 gfs2_remove_from_ail(bd); /* drops ref on bh */
557 bd->bd_bh = NULL;
558 bd->bd_ops = &gfs2_revoke_lops; 558 bd->bd_ops = &gfs2_revoke_lops;
559 sdp->sd_log_num_revoke++; 559 sdp->sd_log_num_revoke++;
560 atomic_inc(&gl->gl_revokes); 560 atomic_inc(&gl->gl_revokes);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 932415050540..52f177be3bf8 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -258,6 +258,7 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int
258 struct address_space *mapping = bh->b_page->mapping; 258 struct address_space *mapping = bh->b_page->mapping;
259 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 259 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
260 struct gfs2_bufdata *bd = bh->b_private; 260 struct gfs2_bufdata *bd = bh->b_private;
261 int was_pinned = 0;
261 262
262 if (test_clear_buffer_pinned(bh)) { 263 if (test_clear_buffer_pinned(bh)) {
263 trace_gfs2_pin(bd, 0); 264 trace_gfs2_pin(bd, 0);
@@ -273,12 +274,16 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int
273 tr->tr_num_databuf_rm++; 274 tr->tr_num_databuf_rm++;
274 } 275 }
275 tr->tr_touched = 1; 276 tr->tr_touched = 1;
277 was_pinned = 1;
276 brelse(bh); 278 brelse(bh);
277 } 279 }
278 if (bd) { 280 if (bd) {
279 spin_lock(&sdp->sd_ail_lock); 281 spin_lock(&sdp->sd_ail_lock);
280 if (bd->bd_tr) { 282 if (bd->bd_tr) {
281 gfs2_trans_add_revoke(sdp, bd); 283 gfs2_trans_add_revoke(sdp, bd);
284 } else if (was_pinned) {
285 bh->b_private = NULL;
286 kmem_cache_free(gfs2_bufdata_cachep, bd);
282 } 287 }
283 spin_unlock(&sdp->sd_ail_lock); 288 spin_unlock(&sdp->sd_ail_lock);
284 } 289 }
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 82303b474958..52fa88314f5c 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1366,8 +1366,18 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
1366 if (IS_ERR(s)) 1366 if (IS_ERR(s))
1367 goto error_bdev; 1367 goto error_bdev;
1368 1368
1369 if (s->s_root) 1369 if (s->s_root) {
1370 /*
1371 * s_umount nests inside bd_mutex during
1372 * __invalidate_device(). blkdev_put() acquires
1373 * bd_mutex and can't be called under s_umount. Drop
1374 * s_umount temporarily. This is safe as we're
1375 * holding an active reference.
1376 */
1377 up_write(&s->s_umount);
1370 blkdev_put(bdev, mode); 1378 blkdev_put(bdev, mode);
1379 down_write(&s->s_umount);
1380 }
1371 1381
1372 memset(&args, 0, sizeof(args)); 1382 memset(&args, 0, sizeof(args));
1373 args.ar_quota = GFS2_QUOTA_DEFAULT; 1383 args.ar_quota = GFS2_QUOTA_DEFAULT;
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index b51a6079108d..e9a97a0d4314 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -24,13 +24,6 @@ struct hfsplus_wd {
24 u16 embed_count; 24 u16 embed_count;
25}; 25};
26 26
27static void hfsplus_end_io_sync(struct bio *bio, int err)
28{
29 if (err)
30 clear_bit(BIO_UPTODATE, &bio->bi_flags);
31 complete(bio->bi_private);
32}
33
34/* 27/*
35 * hfsplus_submit_bio - Perfrom block I/O 28 * hfsplus_submit_bio - Perfrom block I/O
36 * @sb: super block of volume for I/O 29 * @sb: super block of volume for I/O
@@ -53,7 +46,6 @@ static void hfsplus_end_io_sync(struct bio *bio, int err)
53int hfsplus_submit_bio(struct super_block *sb, sector_t sector, 46int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
54 void *buf, void **data, int rw) 47 void *buf, void **data, int rw)
55{ 48{
56 DECLARE_COMPLETION_ONSTACK(wait);
57 struct bio *bio; 49 struct bio *bio;
58 int ret = 0; 50 int ret = 0;
59 u64 io_size; 51 u64 io_size;
@@ -73,8 +65,6 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
73 bio = bio_alloc(GFP_NOIO, 1); 65 bio = bio_alloc(GFP_NOIO, 1);
74 bio->bi_sector = sector; 66 bio->bi_sector = sector;
75 bio->bi_bdev = sb->s_bdev; 67 bio->bi_bdev = sb->s_bdev;
76 bio->bi_end_io = hfsplus_end_io_sync;
77 bio->bi_private = &wait;
78 68
79 if (!(rw & WRITE) && data) 69 if (!(rw & WRITE) && data)
80 *data = (u8 *)buf + offset; 70 *data = (u8 *)buf + offset;
@@ -93,12 +83,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
93 buf = (u8 *)buf + len; 83 buf = (u8 *)buf + len;
94 } 84 }
95 85
96 submit_bio(rw, bio); 86 ret = submit_bio_wait(rw, bio);
97 wait_for_completion(&wait);
98
99 if (!bio_flagged(bio, BIO_UPTODATE))
100 ret = -EIO;
101
102out: 87out:
103 bio_put(bio); 88 bio_put(bio);
104 return ret < 0 ? ret : 0; 89 return ret < 0 ? ret : 0;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 52032647dd4a..5fa344afb49a 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -702,7 +702,7 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
702 read_lock(&journal->j_state_lock); 702 read_lock(&journal->j_state_lock);
703#ifdef CONFIG_JBD2_DEBUG 703#ifdef CONFIG_JBD2_DEBUG
704 if (!tid_geq(journal->j_commit_request, tid)) { 704 if (!tid_geq(journal->j_commit_request, tid)) {
705 printk(KERN_EMERG 705 printk(KERN_ERR
706 "%s: error: j_commit_request=%d, tid=%d\n", 706 "%s: error: j_commit_request=%d, tid=%d\n",
707 __func__, journal->j_commit_request, tid); 707 __func__, journal->j_commit_request, tid);
708 } 708 }
@@ -718,10 +718,8 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
718 } 718 }
719 read_unlock(&journal->j_state_lock); 719 read_unlock(&journal->j_state_lock);
720 720
721 if (unlikely(is_journal_aborted(journal))) { 721 if (unlikely(is_journal_aborted(journal)))
722 printk(KERN_EMERG "journal commit I/O error\n");
723 err = -EIO; 722 err = -EIO;
724 }
725 return err; 723 return err;
726} 724}
727 725
@@ -1527,13 +1525,13 @@ static int journal_get_superblock(journal_t *journal)
1527 if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) && 1525 if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) &&
1528 JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { 1526 JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
1529 /* Can't have checksum v1 and v2 on at the same time! */ 1527 /* Can't have checksum v1 and v2 on at the same time! */
1530 printk(KERN_ERR "JBD: Can't enable checksumming v1 and v2 " 1528 printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2 "
1531 "at the same time!\n"); 1529 "at the same time!\n");
1532 goto out; 1530 goto out;
1533 } 1531 }
1534 1532
1535 if (!jbd2_verify_csum_type(journal, sb)) { 1533 if (!jbd2_verify_csum_type(journal, sb)) {
1536 printk(KERN_ERR "JBD: Unknown checksum type\n"); 1534 printk(KERN_ERR "JBD2: Unknown checksum type\n");
1537 goto out; 1535 goto out;
1538 } 1536 }
1539 1537
@@ -1541,7 +1539,7 @@ static int journal_get_superblock(journal_t *journal)
1541 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { 1539 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
1542 journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); 1540 journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
1543 if (IS_ERR(journal->j_chksum_driver)) { 1541 if (IS_ERR(journal->j_chksum_driver)) {
1544 printk(KERN_ERR "JBD: Cannot load crc32c driver.\n"); 1542 printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
1545 err = PTR_ERR(journal->j_chksum_driver); 1543 err = PTR_ERR(journal->j_chksum_driver);
1546 journal->j_chksum_driver = NULL; 1544 journal->j_chksum_driver = NULL;
1547 goto out; 1545 goto out;
@@ -1550,7 +1548,7 @@ static int journal_get_superblock(journal_t *journal)
1550 1548
1551 /* Check superblock checksum */ 1549 /* Check superblock checksum */
1552 if (!jbd2_superblock_csum_verify(journal, sb)) { 1550 if (!jbd2_superblock_csum_verify(journal, sb)) {
1553 printk(KERN_ERR "JBD: journal checksum error\n"); 1551 printk(KERN_ERR "JBD2: journal checksum error\n");
1554 goto out; 1552 goto out;
1555 } 1553 }
1556 1554
@@ -1836,7 +1834,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
1836 journal->j_chksum_driver = crypto_alloc_shash("crc32c", 1834 journal->j_chksum_driver = crypto_alloc_shash("crc32c",
1837 0, 0); 1835 0, 0);
1838 if (IS_ERR(journal->j_chksum_driver)) { 1836 if (IS_ERR(journal->j_chksum_driver)) {
1839 printk(KERN_ERR "JBD: Cannot load crc32c " 1837 printk(KERN_ERR "JBD2: Cannot load crc32c "
1840 "driver.\n"); 1838 "driver.\n");
1841 journal->j_chksum_driver = NULL; 1839 journal->j_chksum_driver = NULL;
1842 return 0; 1840 return 0;
@@ -2645,7 +2643,7 @@ static void __exit journal_exit(void)
2645#ifdef CONFIG_JBD2_DEBUG 2643#ifdef CONFIG_JBD2_DEBUG
2646 int n = atomic_read(&nr_journal_heads); 2644 int n = atomic_read(&nr_journal_heads);
2647 if (n) 2645 if (n)
2648 printk(KERN_EMERG "JBD2: leaked %d journal_heads!\n", n); 2646 printk(KERN_ERR "JBD2: leaked %d journal_heads!\n", n);
2649#endif 2647#endif
2650 jbd2_remove_jbd_stats_proc_entry(); 2648 jbd2_remove_jbd_stats_proc_entry();
2651 jbd2_journal_destroy_caches(); 2649 jbd2_journal_destroy_caches();
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 3929c50428b1..3b6bb19d60b1 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -594,7 +594,7 @@ static int do_one_pass(journal_t *journal,
594 be32_to_cpu(tmp->h_sequence))) { 594 be32_to_cpu(tmp->h_sequence))) {
595 brelse(obh); 595 brelse(obh);
596 success = -EIO; 596 success = -EIO;
597 printk(KERN_ERR "JBD: Invalid " 597 printk(KERN_ERR "JBD2: Invalid "
598 "checksum recovering " 598 "checksum recovering "
599 "block %llu in log\n", 599 "block %llu in log\n",
600 blocknr); 600 blocknr);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 7aa9a32573bb..8360674c85bc 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -932,7 +932,7 @@ repeat:
932 jbd2_alloc(jh2bh(jh)->b_size, 932 jbd2_alloc(jh2bh(jh)->b_size,
933 GFP_NOFS); 933 GFP_NOFS);
934 if (!frozen_buffer) { 934 if (!frozen_buffer) {
935 printk(KERN_EMERG 935 printk(KERN_ERR
936 "%s: OOM for frozen_buffer\n", 936 "%s: OOM for frozen_buffer\n",
937 __func__); 937 __func__);
938 JBUFFER_TRACE(jh, "oom!"); 938 JBUFFER_TRACE(jh, "oom!");
@@ -1166,7 +1166,7 @@ repeat:
1166 if (!jh->b_committed_data) { 1166 if (!jh->b_committed_data) {
1167 committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); 1167 committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
1168 if (!committed_data) { 1168 if (!committed_data) {
1169 printk(KERN_EMERG "%s: No memory for committed data\n", 1169 printk(KERN_ERR "%s: No memory for committed data\n",
1170 __func__); 1170 __func__);
1171 err = -ENOMEM; 1171 err = -ENOMEM;
1172 goto out; 1172 goto out;
@@ -1290,7 +1290,10 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1290 * once a transaction -bzzz 1290 * once a transaction -bzzz
1291 */ 1291 */
1292 jh->b_modified = 1; 1292 jh->b_modified = 1;
1293 J_ASSERT_JH(jh, handle->h_buffer_credits > 0); 1293 if (handle->h_buffer_credits <= 0) {
1294 ret = -ENOSPC;
1295 goto out_unlock_bh;
1296 }
1294 handle->h_buffer_credits--; 1297 handle->h_buffer_credits--;
1295 } 1298 }
1296 1299
@@ -1305,7 +1308,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1305 JBUFFER_TRACE(jh, "fastpath"); 1308 JBUFFER_TRACE(jh, "fastpath");
1306 if (unlikely(jh->b_transaction != 1309 if (unlikely(jh->b_transaction !=
1307 journal->j_running_transaction)) { 1310 journal->j_running_transaction)) {
1308 printk(KERN_EMERG "JBD: %s: " 1311 printk(KERN_ERR "JBD2: %s: "
1309 "jh->b_transaction (%llu, %p, %u) != " 1312 "jh->b_transaction (%llu, %p, %u) != "
1310 "journal->j_running_transaction (%p, %u)", 1313 "journal->j_running_transaction (%p, %u)",
1311 journal->j_devname, 1314 journal->j_devname,
@@ -1332,7 +1335,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1332 JBUFFER_TRACE(jh, "already on other transaction"); 1335 JBUFFER_TRACE(jh, "already on other transaction");
1333 if (unlikely(jh->b_transaction != 1336 if (unlikely(jh->b_transaction !=
1334 journal->j_committing_transaction)) { 1337 journal->j_committing_transaction)) {
1335 printk(KERN_EMERG "JBD: %s: " 1338 printk(KERN_ERR "JBD2: %s: "
1336 "jh->b_transaction (%llu, %p, %u) != " 1339 "jh->b_transaction (%llu, %p, %u) != "
1337 "journal->j_committing_transaction (%p, %u)", 1340 "journal->j_committing_transaction (%p, %u)",
1338 journal->j_devname, 1341 journal->j_devname,
@@ -1345,7 +1348,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1345 ret = -EINVAL; 1348 ret = -EINVAL;
1346 } 1349 }
1347 if (unlikely(jh->b_next_transaction != transaction)) { 1350 if (unlikely(jh->b_next_transaction != transaction)) {
1348 printk(KERN_EMERG "JBD: %s: " 1351 printk(KERN_ERR "JBD2: %s: "
1349 "jh->b_next_transaction (%llu, %p, %u) != " 1352 "jh->b_next_transaction (%llu, %p, %u) != "
1350 "transaction (%p, %u)", 1353 "transaction (%p, %u)",
1351 journal->j_devname, 1354 journal->j_devname,
@@ -1373,7 +1376,6 @@ out_unlock_bh:
1373 jbd2_journal_put_journal_head(jh); 1376 jbd2_journal_put_journal_head(jh);
1374out: 1377out:
1375 JBUFFER_TRACE(jh, "exit"); 1378 JBUFFER_TRACE(jh, "exit");
1376 WARN_ON(ret); /* All errors are bugs, so dump the stack */
1377 return ret; 1379 return ret;
1378} 1380}
1379 1381
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 550475ca6a0e..0f95f0d0b313 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -14,16 +14,10 @@
14 14
15#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1)) 15#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
16 16
17static void request_complete(struct bio *bio, int err)
18{
19 complete((struct completion *)bio->bi_private);
20}
21
22static int sync_request(struct page *page, struct block_device *bdev, int rw) 17static int sync_request(struct page *page, struct block_device *bdev, int rw)
23{ 18{
24 struct bio bio; 19 struct bio bio;
25 struct bio_vec bio_vec; 20 struct bio_vec bio_vec;
26 struct completion complete;
27 21
28 bio_init(&bio); 22 bio_init(&bio);
29 bio.bi_max_vecs = 1; 23 bio.bi_max_vecs = 1;
@@ -35,13 +29,8 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
35 bio.bi_size = PAGE_SIZE; 29 bio.bi_size = PAGE_SIZE;
36 bio.bi_bdev = bdev; 30 bio.bi_bdev = bdev;
37 bio.bi_sector = page->index * (PAGE_SIZE >> 9); 31 bio.bi_sector = page->index * (PAGE_SIZE >> 9);
38 init_completion(&complete);
39 bio.bi_private = &complete;
40 bio.bi_end_io = request_complete;
41 32
42 submit_bio(rw, &bio); 33 return submit_bio_wait(rw, &bio);
43 wait_for_completion(&complete);
44 return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
45} 34}
46 35
47static int bdev_readpage(void *_sb, struct page *page) 36static int bdev_readpage(void *_sb, struct page *page)
diff --git a/fs/namei.c b/fs/namei.c
index 8f77a8cea289..3531deebad30 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -513,8 +513,7 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
513 513
514 if (!lockref_get_not_dead(&parent->d_lockref)) { 514 if (!lockref_get_not_dead(&parent->d_lockref)) {
515 nd->path.dentry = NULL; 515 nd->path.dentry = NULL;
516 rcu_read_unlock(); 516 goto out;
517 return -ECHILD;
518 } 517 }
519 518
520 /* 519 /*
@@ -1599,11 +1598,6 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
1599 * do a "get_unaligned()" if this helps and is sufficiently 1598 * do a "get_unaligned()" if this helps and is sufficiently
1600 * fast. 1599 * fast.
1601 * 1600 *
1602 * - Little-endian machines (so that we can generate the mask
1603 * of low bytes efficiently). Again, we *could* do a byte
1604 * swapping load on big-endian architectures if that is not
1605 * expensive enough to make the optimization worthless.
1606 *
1607 * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we 1601 * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
1608 * do not trap on the (extremely unlikely) case of a page 1602 * do not trap on the (extremely unlikely) case of a page
1609 * crossing operation. 1603 * crossing operation.
@@ -1647,7 +1641,7 @@ unsigned int full_name_hash(const unsigned char *name, unsigned int len)
1647 if (!len) 1641 if (!len)
1648 goto done; 1642 goto done;
1649 } 1643 }
1650 mask = ~(~0ul << len*8); 1644 mask = bytemask_from_count(len);
1651 hash += mask & a; 1645 hash += mask & a;
1652done: 1646done:
1653 return fold_hash(hash); 1647 return fold_hash(hash);
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index 8485978993e8..9838fb020473 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -36,6 +36,7 @@
36#include <linux/nfs_fs.h> 36#include <linux/nfs_fs.h>
37#include <linux/sunrpc/rpc_pipe_fs.h> 37#include <linux/sunrpc/rpc_pipe_fs.h>
38 38
39#include "../nfs4_fs.h"
39#include "../pnfs.h" 40#include "../pnfs.h"
40#include "../netns.h" 41#include "../netns.h"
41 42
diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c
index 9c3e117c3ed1..4d0161442565 100644
--- a/fs/nfs/blocklayout/extents.c
+++ b/fs/nfs/blocklayout/extents.c
@@ -44,7 +44,7 @@
44static inline sector_t normalize(sector_t s, int base) 44static inline sector_t normalize(sector_t s, int base)
45{ 45{
46 sector_t tmp = s; /* Since do_div modifies its argument */ 46 sector_t tmp = s; /* Since do_div modifies its argument */
47 return s - do_div(tmp, base); 47 return s - sector_div(tmp, base);
48} 48}
49 49
50static inline sector_t normalize_up(sector_t s, int base) 50static inline sector_t normalize_up(sector_t s, int base)
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index fc0f95ec7358..d25f10fb4926 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -46,7 +46,9 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen,
46#include <linux/sunrpc/cache.h> 46#include <linux/sunrpc/cache.h>
47#include <linux/sunrpc/svcauth.h> 47#include <linux/sunrpc/svcauth.h>
48#include <linux/sunrpc/rpc_pipe_fs.h> 48#include <linux/sunrpc/rpc_pipe_fs.h>
49#include <linux/nfs_fs.h>
49 50
51#include "nfs4_fs.h"
50#include "dns_resolve.h" 52#include "dns_resolve.h"
51#include "cache_lib.h" 53#include "cache_lib.h"
52#include "netns.h" 54#include "netns.h"
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 18ab2da4eeb6..00ad1c2b217d 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -312,7 +312,7 @@ struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags)
312} 312}
313EXPORT_SYMBOL_GPL(nfs4_label_alloc); 313EXPORT_SYMBOL_GPL(nfs4_label_alloc);
314#else 314#else
315void inline nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, 315void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
316 struct nfs4_label *label) 316 struct nfs4_label *label)
317{ 317{
318} 318}
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index bca6a3e3c49c..8b5cc04a8611 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -269,6 +269,21 @@ extern const u32 nfs41_maxgetdevinfo_overhead;
269extern struct rpc_procinfo nfs4_procedures[]; 269extern struct rpc_procinfo nfs4_procedures[];
270#endif 270#endif
271 271
272#ifdef CONFIG_NFS_V4_SECURITY_LABEL
273extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
274static inline void nfs4_label_free(struct nfs4_label *label)
275{
276 if (label) {
277 kfree(label->label);
278 kfree(label);
279 }
280 return;
281}
282#else
283static inline struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags) { return NULL; }
284static inline void nfs4_label_free(void *label) {}
285#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
286
272/* proc.c */ 287/* proc.c */
273void nfs_close_context(struct nfs_open_context *ctx, int is_sync); 288void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
274extern struct nfs_client *nfs_init_client(struct nfs_client *clp, 289extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 3ce79b04522e..5609edc742a0 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -9,6 +9,14 @@
9#ifndef __LINUX_FS_NFS_NFS4_FS_H 9#ifndef __LINUX_FS_NFS_NFS4_FS_H
10#define __LINUX_FS_NFS_NFS4_FS_H 10#define __LINUX_FS_NFS_NFS4_FS_H
11 11
12#if defined(CONFIG_NFS_V4_2)
13#define NFS4_MAX_MINOR_VERSION 2
14#elif defined(CONFIG_NFS_V4_1)
15#define NFS4_MAX_MINOR_VERSION 1
16#else
17#define NFS4_MAX_MINOR_VERSION 0
18#endif
19
12#if IS_ENABLED(CONFIG_NFS_V4) 20#if IS_ENABLED(CONFIG_NFS_V4)
13 21
14#define NFS4_MAX_LOOP_ON_RECOVER (10) 22#define NFS4_MAX_LOOP_ON_RECOVER (10)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 659990c0109e..15052b81df42 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2518,9 +2518,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
2518 calldata->roc_barrier); 2518 calldata->roc_barrier);
2519 nfs_set_open_stateid(state, &calldata->res.stateid, 0); 2519 nfs_set_open_stateid(state, &calldata->res.stateid, 0);
2520 renew_lease(server, calldata->timestamp); 2520 renew_lease(server, calldata->timestamp);
2521 nfs4_close_clear_stateid_flags(state,
2522 calldata->arg.fmode);
2523 break; 2521 break;
2522 case -NFS4ERR_ADMIN_REVOKED:
2524 case -NFS4ERR_STALE_STATEID: 2523 case -NFS4ERR_STALE_STATEID:
2525 case -NFS4ERR_OLD_STATEID: 2524 case -NFS4ERR_OLD_STATEID:
2526 case -NFS4ERR_BAD_STATEID: 2525 case -NFS4ERR_BAD_STATEID:
@@ -2528,9 +2527,13 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
2528 if (calldata->arg.fmode == 0) 2527 if (calldata->arg.fmode == 0)
2529 break; 2528 break;
2530 default: 2529 default:
2531 if (nfs4_async_handle_error(task, server, state) == -EAGAIN) 2530 if (nfs4_async_handle_error(task, server, state) == -EAGAIN) {
2532 rpc_restart_call_prepare(task); 2531 rpc_restart_call_prepare(task);
2532 goto out_release;
2533 }
2533 } 2534 }
2535 nfs4_close_clear_stateid_flags(state, calldata->arg.fmode);
2536out_release:
2534 nfs_release_seqid(calldata->arg.seqid); 2537 nfs_release_seqid(calldata->arg.seqid);
2535 nfs_refresh_inode(calldata->inode, calldata->res.fattr); 2538 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2536 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); 2539 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
@@ -4802,7 +4805,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
4802 dprintk("%s ERROR %d, Reset session\n", __func__, 4805 dprintk("%s ERROR %d, Reset session\n", __func__,
4803 task->tk_status); 4806 task->tk_status);
4804 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); 4807 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
4805 goto restart_call; 4808 goto wait_on_recovery;
4806#endif /* CONFIG_NFS_V4_1 */ 4809#endif /* CONFIG_NFS_V4_1 */
4807 case -NFS4ERR_DELAY: 4810 case -NFS4ERR_DELAY:
4808 nfs_inc_server_stats(server, NFSIOS_DELAY); 4811 nfs_inc_server_stats(server, NFSIOS_DELAY);
@@ -4987,11 +4990,17 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
4987 4990
4988 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 4991 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
4989 switch (task->tk_status) { 4992 switch (task->tk_status) {
4990 case -NFS4ERR_STALE_STATEID:
4991 case -NFS4ERR_EXPIRED:
4992 case 0: 4993 case 0:
4993 renew_lease(data->res.server, data->timestamp); 4994 renew_lease(data->res.server, data->timestamp);
4994 break; 4995 break;
4996 case -NFS4ERR_ADMIN_REVOKED:
4997 case -NFS4ERR_DELEG_REVOKED:
4998 case -NFS4ERR_BAD_STATEID:
4999 case -NFS4ERR_OLD_STATEID:
5000 case -NFS4ERR_STALE_STATEID:
5001 case -NFS4ERR_EXPIRED:
5002 task->tk_status = 0;
5003 break;
4995 default: 5004 default:
4996 if (nfs4_async_handle_error(task, data->res.server, NULL) == 5005 if (nfs4_async_handle_error(task, data->res.server, NULL) ==
4997 -EAGAIN) { 5006 -EAGAIN) {
@@ -7589,7 +7598,14 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
7589 return; 7598 return;
7590 7599
7591 server = NFS_SERVER(lrp->args.inode); 7600 server = NFS_SERVER(lrp->args.inode);
7592 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { 7601 switch (task->tk_status) {
7602 default:
7603 task->tk_status = 0;
7604 case 0:
7605 break;
7606 case -NFS4ERR_DELAY:
7607 if (nfs4_async_handle_error(task, server, NULL) != -EAGAIN)
7608 break;
7593 rpc_restart_call_prepare(task); 7609 rpc_restart_call_prepare(task);
7594 return; 7610 return;
7595 } 7611 }
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 9186c7ce0b14..b6af150c96b8 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -132,6 +132,13 @@ nfsd_reply_cache_alloc(void)
132} 132}
133 133
134static void 134static void
135nfsd_reply_cache_unhash(struct svc_cacherep *rp)
136{
137 hlist_del_init(&rp->c_hash);
138 list_del_init(&rp->c_lru);
139}
140
141static void
135nfsd_reply_cache_free_locked(struct svc_cacherep *rp) 142nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
136{ 143{
137 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { 144 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
@@ -417,7 +424,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
417 rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); 424 rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
418 if (nfsd_cache_entry_expired(rp) || 425 if (nfsd_cache_entry_expired(rp) ||
419 num_drc_entries >= max_drc_entries) { 426 num_drc_entries >= max_drc_entries) {
420 lru_put_end(rp); 427 nfsd_reply_cache_unhash(rp);
421 prune_cache_entries(); 428 prune_cache_entries();
422 goto search_cache; 429 goto search_cache;
423 } 430 }
diff --git a/fs/pipe.c b/fs/pipe.c
index d2c45e14e6d8..0e0752ef2715 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -726,11 +726,25 @@ pipe_poll(struct file *filp, poll_table *wait)
726 return mask; 726 return mask;
727} 727}
728 728
729static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
730{
731 int kill = 0;
732
733 spin_lock(&inode->i_lock);
734 if (!--pipe->files) {
735 inode->i_pipe = NULL;
736 kill = 1;
737 }
738 spin_unlock(&inode->i_lock);
739
740 if (kill)
741 free_pipe_info(pipe);
742}
743
729static int 744static int
730pipe_release(struct inode *inode, struct file *file) 745pipe_release(struct inode *inode, struct file *file)
731{ 746{
732 struct pipe_inode_info *pipe = inode->i_pipe; 747 struct pipe_inode_info *pipe = file->private_data;
733 int kill = 0;
734 748
735 __pipe_lock(pipe); 749 __pipe_lock(pipe);
736 if (file->f_mode & FMODE_READ) 750 if (file->f_mode & FMODE_READ)
@@ -743,17 +757,9 @@ pipe_release(struct inode *inode, struct file *file)
743 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 757 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
744 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 758 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
745 } 759 }
746 spin_lock(&inode->i_lock);
747 if (!--pipe->files) {
748 inode->i_pipe = NULL;
749 kill = 1;
750 }
751 spin_unlock(&inode->i_lock);
752 __pipe_unlock(pipe); 760 __pipe_unlock(pipe);
753 761
754 if (kill) 762 put_pipe_info(inode, pipe);
755 free_pipe_info(pipe);
756
757 return 0; 763 return 0;
758} 764}
759 765
@@ -1014,7 +1020,6 @@ static int fifo_open(struct inode *inode, struct file *filp)
1014{ 1020{
1015 struct pipe_inode_info *pipe; 1021 struct pipe_inode_info *pipe;
1016 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC; 1022 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
1017 int kill = 0;
1018 int ret; 1023 int ret;
1019 1024
1020 filp->f_version = 0; 1025 filp->f_version = 0;
@@ -1130,15 +1135,9 @@ err_wr:
1130 goto err; 1135 goto err;
1131 1136
1132err: 1137err:
1133 spin_lock(&inode->i_lock);
1134 if (!--pipe->files) {
1135 inode->i_pipe = NULL;
1136 kill = 1;
1137 }
1138 spin_unlock(&inode->i_lock);
1139 __pipe_unlock(pipe); 1138 __pipe_unlock(pipe);
1140 if (kill) 1139
1141 free_pipe_info(pipe); 1140 put_pipe_info(inode, pipe);
1142 return ret; 1141 return ret;
1143} 1142}
1144 1143
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 28955d4b7218..124fc43c7090 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -292,16 +292,20 @@ proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr,
292{ 292{
293 struct proc_dir_entry *pde = PDE(file_inode(file)); 293 struct proc_dir_entry *pde = PDE(file_inode(file));
294 unsigned long rv = -EIO; 294 unsigned long rv = -EIO;
295 unsigned long (*get_area)(struct file *, unsigned long, unsigned long, 295
296 unsigned long, unsigned long) = NULL;
297 if (use_pde(pde)) { 296 if (use_pde(pde)) {
297 typeof(proc_reg_get_unmapped_area) *get_area;
298
299 get_area = pde->proc_fops->get_unmapped_area;
298#ifdef CONFIG_MMU 300#ifdef CONFIG_MMU
299 get_area = current->mm->get_unmapped_area; 301 if (!get_area)
302 get_area = current->mm->get_unmapped_area;
300#endif 303#endif
301 if (pde->proc_fops->get_unmapped_area) 304
302 get_area = pde->proc_fops->get_unmapped_area;
303 if (get_area) 305 if (get_area)
304 rv = get_area(file, orig_addr, len, pgoff, flags); 306 rv = get_area(file, orig_addr, len, pgoff, flags);
307 else
308 rv = orig_addr;
305 unuse_pde(pde); 309 unuse_pde(pde);
306 } 310 }
307 return rv; 311 return rv;
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index b8e93a40a5d3..78c3c2097787 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -443,8 +443,11 @@ int pstore_register(struct pstore_info *psi)
443 pstore_get_records(0); 443 pstore_get_records(0);
444 444
445 kmsg_dump_register(&pstore_dumper); 445 kmsg_dump_register(&pstore_dumper);
446 pstore_register_console(); 446
447 pstore_register_ftrace(); 447 if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) {
448 pstore_register_console();
449 pstore_register_ftrace();
450 }
448 451
449 if (pstore_update_ms >= 0) { 452 if (pstore_update_ms >= 0) {
450 pstore_timer.expires = jiffies + 453 pstore_timer.expires = jiffies +
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
index 2943b2bfae48..62a0de6632e1 100644
--- a/fs/squashfs/file_direct.c
+++ b/fs/squashfs/file_direct.c
@@ -84,6 +84,9 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
84 */ 84 */
85 res = squashfs_read_cache(target_page, block, bsize, pages, 85 res = squashfs_read_cache(target_page, block, bsize, pages,
86 page); 86 page);
87 if (res < 0)
88 goto mark_errored;
89
87 goto out; 90 goto out;
88 } 91 }
89 92
@@ -119,7 +122,7 @@ mark_errored:
119 * dealt with by the caller 122 * dealt with by the caller
120 */ 123 */
121 for (i = 0; i < pages; i++) { 124 for (i = 0; i < pages; i++) {
122 if (page[i] == target_page) 125 if (page[i] == NULL || page[i] == target_page)
123 continue; 126 continue;
124 flush_dcache_page(page[i]); 127 flush_dcache_page(page[i]);
125 SetPageError(page[i]); 128 SetPageError(page[i]);
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 79b5da2acbe1..35e7d08fe629 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -649,7 +649,23 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
649 if (!of) 649 if (!of)
650 goto err_out; 650 goto err_out;
651 651
652 mutex_init(&of->mutex); 652 /*
653 * The following is done to give a different lockdep key to
654 * @of->mutex for files which implement mmap. This is a rather
655 * crude way to avoid false positive lockdep warning around
656 * mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and
657 * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
658 * which mm->mmap_sem nests, while holding @of->mutex. As each
659 * open file has a separate mutex, it's okay as long as those don't
660 * happen on the same file. At this point, we can't easily give
661 * each file a separate locking class. Let's differentiate on
662 * whether the file is bin or not for now.
663 */
664 if (sysfs_is_bin(attr_sd))
665 mutex_init(&of->mutex);
666 else
667 mutex_init(&of->mutex);
668
653 of->sd = attr_sd; 669 of->sd = attr_sd;
654 of->file = file; 670 of->file = file;
655 671
diff --git a/fs/xfs/xfs_attr_remote.c b/fs/xfs/xfs_attr_remote.c
index 739e0a52deda..5549d69ddb45 100644
--- a/fs/xfs/xfs_attr_remote.c
+++ b/fs/xfs/xfs_attr_remote.c
@@ -110,7 +110,7 @@ xfs_attr3_rmt_verify(
110 if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt)) 110 if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt))
111 return false; 111 return false;
112 if (be32_to_cpu(rmt->rm_offset) + 112 if (be32_to_cpu(rmt->rm_offset) +
113 be32_to_cpu(rmt->rm_bytes) >= XATTR_SIZE_MAX) 113 be32_to_cpu(rmt->rm_bytes) > XATTR_SIZE_MAX)
114 return false; 114 return false;
115 if (rmt->rm_owner == 0) 115 if (rmt->rm_owner == 0)
116 return false; 116 return false;
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 3ef11b22e750..3b2c14b6f0fb 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -1635,7 +1635,7 @@ xfs_bmap_last_extent(
1635 * blocks at the end of the file which do not start at the previous data block, 1635 * blocks at the end of the file which do not start at the previous data block,
1636 * we will try to align the new blocks at stripe unit boundaries. 1636 * we will try to align the new blocks at stripe unit boundaries.
1637 * 1637 *
1638 * Returns 0 in bma->aeof if the file (fork) is empty as any new write will be 1638 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1639 * at, or past the EOF. 1639 * at, or past the EOF.
1640 */ 1640 */
1641STATIC int 1641STATIC int
@@ -1650,9 +1650,14 @@ xfs_bmap_isaeof(
1650 bma->aeof = 0; 1650 bma->aeof = 0;
1651 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 1651 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1652 &is_empty); 1652 &is_empty);
1653 if (error || is_empty) 1653 if (error)
1654 return error; 1654 return error;
1655 1655
1656 if (is_empty) {
1657 bma->aeof = 1;
1658 return 0;
1659 }
1660
1656 /* 1661 /*
1657 * Check if we are allocation or past the last extent, or at least into 1662 * Check if we are allocation or past the last extent, or at least into
1658 * the last delayed allocated extent. 1663 * the last delayed allocated extent.
@@ -3643,10 +3648,19 @@ xfs_bmap_btalloc(
3643 int isaligned; 3648 int isaligned;
3644 int tryagain; 3649 int tryagain;
3645 int error; 3650 int error;
3651 int stripe_align;
3646 3652
3647 ASSERT(ap->length); 3653 ASSERT(ap->length);
3648 3654
3649 mp = ap->ip->i_mount; 3655 mp = ap->ip->i_mount;
3656
3657 /* stripe alignment for allocation is determined by mount parameters */
3658 stripe_align = 0;
3659 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3660 stripe_align = mp->m_swidth;
3661 else if (mp->m_dalign)
3662 stripe_align = mp->m_dalign;
3663
3650 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; 3664 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
3651 if (unlikely(align)) { 3665 if (unlikely(align)) {
3652 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 3666 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
@@ -3655,6 +3669,8 @@ xfs_bmap_btalloc(
3655 ASSERT(!error); 3669 ASSERT(!error);
3656 ASSERT(ap->length); 3670 ASSERT(ap->length);
3657 } 3671 }
3672
3673
3658 nullfb = *ap->firstblock == NULLFSBLOCK; 3674 nullfb = *ap->firstblock == NULLFSBLOCK;
3659 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); 3675 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3660 if (nullfb) { 3676 if (nullfb) {
@@ -3730,7 +3746,7 @@ xfs_bmap_btalloc(
3730 */ 3746 */
3731 if (!ap->flist->xbf_low && ap->aeof) { 3747 if (!ap->flist->xbf_low && ap->aeof) {
3732 if (!ap->offset) { 3748 if (!ap->offset) {
3733 args.alignment = mp->m_dalign; 3749 args.alignment = stripe_align;
3734 atype = args.type; 3750 atype = args.type;
3735 isaligned = 1; 3751 isaligned = 1;
3736 /* 3752 /*
@@ -3755,13 +3771,13 @@ xfs_bmap_btalloc(
3755 * of minlen+alignment+slop doesn't go up 3771 * of minlen+alignment+slop doesn't go up
3756 * between the calls. 3772 * between the calls.
3757 */ 3773 */
3758 if (blen > mp->m_dalign && blen <= args.maxlen) 3774 if (blen > stripe_align && blen <= args.maxlen)
3759 nextminlen = blen - mp->m_dalign; 3775 nextminlen = blen - stripe_align;
3760 else 3776 else
3761 nextminlen = args.minlen; 3777 nextminlen = args.minlen;
3762 if (nextminlen + mp->m_dalign > args.minlen + 1) 3778 if (nextminlen + stripe_align > args.minlen + 1)
3763 args.minalignslop = 3779 args.minalignslop =
3764 nextminlen + mp->m_dalign - 3780 nextminlen + stripe_align -
3765 args.minlen - 1; 3781 args.minlen - 1;
3766 else 3782 else
3767 args.minalignslop = 0; 3783 args.minalignslop = 0;
@@ -3783,7 +3799,7 @@ xfs_bmap_btalloc(
3783 */ 3799 */
3784 args.type = atype; 3800 args.type = atype;
3785 args.fsbno = ap->blkno; 3801 args.fsbno = ap->blkno;
3786 args.alignment = mp->m_dalign; 3802 args.alignment = stripe_align;
3787 args.minlen = nextminlen; 3803 args.minlen = nextminlen;
3788 args.minalignslop = 0; 3804 args.minalignslop = 0;
3789 isaligned = 1; 3805 isaligned = 1;
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 5887e41c0323..82e0dab46ee5 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -287,6 +287,7 @@ xfs_bmapi_allocate(
287 INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker); 287 INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
288 queue_work(xfs_alloc_wq, &args->work); 288 queue_work(xfs_alloc_wq, &args->work);
289 wait_for_completion(&done); 289 wait_for_completion(&done);
290 destroy_work_on_stack(&args->work);
290 return args->result; 291 return args->result;
291} 292}
292 293
@@ -1187,7 +1188,12 @@ xfs_zero_remaining_bytes(
1187 XFS_BUF_UNWRITE(bp); 1188 XFS_BUF_UNWRITE(bp);
1188 XFS_BUF_READ(bp); 1189 XFS_BUF_READ(bp);
1189 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); 1190 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
1190 xfsbdstrat(mp, bp); 1191
1192 if (XFS_FORCED_SHUTDOWN(mp)) {
1193 error = XFS_ERROR(EIO);
1194 break;
1195 }
1196 xfs_buf_iorequest(bp);
1191 error = xfs_buf_iowait(bp); 1197 error = xfs_buf_iowait(bp);
1192 if (error) { 1198 if (error) {
1193 xfs_buf_ioerror_alert(bp, 1199 xfs_buf_ioerror_alert(bp,
@@ -1200,7 +1206,12 @@ xfs_zero_remaining_bytes(
1200 XFS_BUF_UNDONE(bp); 1206 XFS_BUF_UNDONE(bp);
1201 XFS_BUF_UNREAD(bp); 1207 XFS_BUF_UNREAD(bp);
1202 XFS_BUF_WRITE(bp); 1208 XFS_BUF_WRITE(bp);
1203 xfsbdstrat(mp, bp); 1209
1210 if (XFS_FORCED_SHUTDOWN(mp)) {
1211 error = XFS_ERROR(EIO);
1212 break;
1213 }
1214 xfs_buf_iorequest(bp);
1204 error = xfs_buf_iowait(bp); 1215 error = xfs_buf_iowait(bp);
1205 if (error) { 1216 if (error) {
1206 xfs_buf_ioerror_alert(bp, 1217 xfs_buf_ioerror_alert(bp,
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index c7f0b77dcb00..afe7645e4b2b 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -698,7 +698,11 @@ xfs_buf_read_uncached(
698 bp->b_flags |= XBF_READ; 698 bp->b_flags |= XBF_READ;
699 bp->b_ops = ops; 699 bp->b_ops = ops;
700 700
701 xfsbdstrat(target->bt_mount, bp); 701 if (XFS_FORCED_SHUTDOWN(target->bt_mount)) {
702 xfs_buf_relse(bp);
703 return NULL;
704 }
705 xfs_buf_iorequest(bp);
702 xfs_buf_iowait(bp); 706 xfs_buf_iowait(bp);
703 return bp; 707 return bp;
704} 708}
@@ -1089,7 +1093,7 @@ xfs_bioerror(
1089 * This is meant for userdata errors; metadata bufs come with 1093 * This is meant for userdata errors; metadata bufs come with
1090 * iodone functions attached, so that we can track down errors. 1094 * iodone functions attached, so that we can track down errors.
1091 */ 1095 */
1092STATIC int 1096int
1093xfs_bioerror_relse( 1097xfs_bioerror_relse(
1094 struct xfs_buf *bp) 1098 struct xfs_buf *bp)
1095{ 1099{
@@ -1152,7 +1156,7 @@ xfs_bwrite(
1152 ASSERT(xfs_buf_islocked(bp)); 1156 ASSERT(xfs_buf_islocked(bp));
1153 1157
1154 bp->b_flags |= XBF_WRITE; 1158 bp->b_flags |= XBF_WRITE;
1155 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q); 1159 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_WRITE_FAIL);
1156 1160
1157 xfs_bdstrat_cb(bp); 1161 xfs_bdstrat_cb(bp);
1158 1162
@@ -1164,25 +1168,6 @@ xfs_bwrite(
1164 return error; 1168 return error;
1165} 1169}
1166 1170
1167/*
1168 * Wrapper around bdstrat so that we can stop data from going to disk in case
1169 * we are shutting down the filesystem. Typically user data goes thru this
1170 * path; one of the exceptions is the superblock.
1171 */
1172void
1173xfsbdstrat(
1174 struct xfs_mount *mp,
1175 struct xfs_buf *bp)
1176{
1177 if (XFS_FORCED_SHUTDOWN(mp)) {
1178 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1179 xfs_bioerror_relse(bp);
1180 return;
1181 }
1182
1183 xfs_buf_iorequest(bp);
1184}
1185
1186STATIC void 1171STATIC void
1187_xfs_buf_ioend( 1172_xfs_buf_ioend(
1188 xfs_buf_t *bp, 1173 xfs_buf_t *bp,
@@ -1516,6 +1501,12 @@ xfs_wait_buftarg(
1516 struct xfs_buf *bp; 1501 struct xfs_buf *bp;
1517 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); 1502 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1518 list_del_init(&bp->b_lru); 1503 list_del_init(&bp->b_lru);
1504 if (bp->b_flags & XBF_WRITE_FAIL) {
1505 xfs_alert(btp->bt_mount,
1506"Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n"
1507"Please run xfs_repair to determine the extent of the problem.",
1508 (long long)bp->b_bn);
1509 }
1519 xfs_buf_rele(bp); 1510 xfs_buf_rele(bp);
1520 } 1511 }
1521 if (loop++ != 0) 1512 if (loop++ != 0)
@@ -1799,7 +1790,7 @@ __xfs_buf_delwri_submit(
1799 1790
1800 blk_start_plug(&plug); 1791 blk_start_plug(&plug);
1801 list_for_each_entry_safe(bp, n, io_list, b_list) { 1792 list_for_each_entry_safe(bp, n, io_list, b_list) {
1802 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC); 1793 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
1803 bp->b_flags |= XBF_WRITE; 1794 bp->b_flags |= XBF_WRITE;
1804 1795
1805 if (!wait) { 1796 if (!wait) {
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index e65683361017..1cf21a4a9f22 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -45,6 +45,7 @@ typedef enum {
45#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ 45#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
46#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ 46#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
47#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ 47#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
48#define XBF_WRITE_FAIL (1 << 24)/* async writes have failed on this buffer */
48 49
49/* I/O hints for the BIO layer */ 50/* I/O hints for the BIO layer */
50#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ 51#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */
@@ -70,6 +71,7 @@ typedef unsigned int xfs_buf_flags_t;
70 { XBF_ASYNC, "ASYNC" }, \ 71 { XBF_ASYNC, "ASYNC" }, \
71 { XBF_DONE, "DONE" }, \ 72 { XBF_DONE, "DONE" }, \
72 { XBF_STALE, "STALE" }, \ 73 { XBF_STALE, "STALE" }, \
74 { XBF_WRITE_FAIL, "WRITE_FAIL" }, \
73 { XBF_SYNCIO, "SYNCIO" }, \ 75 { XBF_SYNCIO, "SYNCIO" }, \
74 { XBF_FUA, "FUA" }, \ 76 { XBF_FUA, "FUA" }, \
75 { XBF_FLUSH, "FLUSH" }, \ 77 { XBF_FLUSH, "FLUSH" }, \
@@ -80,6 +82,7 @@ typedef unsigned int xfs_buf_flags_t;
80 { _XBF_DELWRI_Q, "DELWRI_Q" }, \ 82 { _XBF_DELWRI_Q, "DELWRI_Q" }, \
81 { _XBF_COMPOUND, "COMPOUND" } 83 { _XBF_COMPOUND, "COMPOUND" }
82 84
85
83/* 86/*
84 * Internal state flags. 87 * Internal state flags.
85 */ 88 */
@@ -269,9 +272,6 @@ extern void xfs_buf_unlock(xfs_buf_t *);
269 272
270/* Buffer Read and Write Routines */ 273/* Buffer Read and Write Routines */
271extern int xfs_bwrite(struct xfs_buf *bp); 274extern int xfs_bwrite(struct xfs_buf *bp);
272
273extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
274
275extern void xfs_buf_ioend(xfs_buf_t *, int); 275extern void xfs_buf_ioend(xfs_buf_t *, int);
276extern void xfs_buf_ioerror(xfs_buf_t *, int); 276extern void xfs_buf_ioerror(xfs_buf_t *, int);
277extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func); 277extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
@@ -282,6 +282,8 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
282#define xfs_buf_zero(bp, off, len) \ 282#define xfs_buf_zero(bp, off, len) \
283 xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) 283 xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
284 284
285extern int xfs_bioerror_relse(struct xfs_buf *);
286
285static inline int xfs_buf_geterror(xfs_buf_t *bp) 287static inline int xfs_buf_geterror(xfs_buf_t *bp)
286{ 288{
287 return bp ? bp->b_error : ENOMEM; 289 return bp ? bp->b_error : ENOMEM;
@@ -301,7 +303,8 @@ extern void xfs_buf_terminate(void);
301 303
302#define XFS_BUF_ZEROFLAGS(bp) \ 304#define XFS_BUF_ZEROFLAGS(bp) \
303 ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \ 305 ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \
304 XBF_SYNCIO|XBF_FUA|XBF_FLUSH)) 306 XBF_SYNCIO|XBF_FUA|XBF_FLUSH| \
307 XBF_WRITE_FAIL))
305 308
306void xfs_buf_stale(struct xfs_buf *bp); 309void xfs_buf_stale(struct xfs_buf *bp);
307#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) 310#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE)
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index a64f67ba25d3..2227b9b050bb 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -496,6 +496,14 @@ xfs_buf_item_unpin(
496 } 496 }
497} 497}
498 498
499/*
500 * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30
501 * seconds so as to not spam logs too much on repeated detection of the same
502 * buffer being bad..
503 */
504
505DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
506
499STATIC uint 507STATIC uint
500xfs_buf_item_push( 508xfs_buf_item_push(
501 struct xfs_log_item *lip, 509 struct xfs_log_item *lip,
@@ -524,6 +532,14 @@ xfs_buf_item_push(
524 532
525 trace_xfs_buf_item_push(bip); 533 trace_xfs_buf_item_push(bip);
526 534
535 /* has a previous flush failed due to IO errors? */
536 if ((bp->b_flags & XBF_WRITE_FAIL) &&
537 ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) {
538 xfs_warn(bp->b_target->bt_mount,
539"Detected failing async write on buffer block 0x%llx. Retrying async write.\n",
540 (long long)bp->b_bn);
541 }
542
527 if (!xfs_buf_delwri_queue(bp, buffer_list)) 543 if (!xfs_buf_delwri_queue(bp, buffer_list))
528 rval = XFS_ITEM_FLUSHING; 544 rval = XFS_ITEM_FLUSHING;
529 xfs_buf_unlock(bp); 545 xfs_buf_unlock(bp);
@@ -1096,8 +1112,9 @@ xfs_buf_iodone_callbacks(
1096 1112
1097 xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */ 1113 xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
1098 1114
1099 if (!XFS_BUF_ISSTALE(bp)) { 1115 if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) {
1100 bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE; 1116 bp->b_flags |= XBF_WRITE | XBF_ASYNC |
1117 XBF_DONE | XBF_WRITE_FAIL;
1101 xfs_buf_iorequest(bp); 1118 xfs_buf_iorequest(bp);
1102 } else { 1119 } else {
1103 xfs_buf_relse(bp); 1120 xfs_buf_relse(bp);
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index 56369d4509d5..48c7d18f68c3 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -2067,12 +2067,12 @@ xfs_dir2_node_lookup(
2067 */ 2067 */
2068int /* error */ 2068int /* error */
2069xfs_dir2_node_removename( 2069xfs_dir2_node_removename(
2070 xfs_da_args_t *args) /* operation arguments */ 2070 struct xfs_da_args *args) /* operation arguments */
2071{ 2071{
2072 xfs_da_state_blk_t *blk; /* leaf block */ 2072 struct xfs_da_state_blk *blk; /* leaf block */
2073 int error; /* error return value */ 2073 int error; /* error return value */
2074 int rval; /* operation return value */ 2074 int rval; /* operation return value */
2075 xfs_da_state_t *state; /* btree cursor */ 2075 struct xfs_da_state *state; /* btree cursor */
2076 2076
2077 trace_xfs_dir2_node_removename(args); 2077 trace_xfs_dir2_node_removename(args);
2078 2078
@@ -2084,19 +2084,18 @@ xfs_dir2_node_removename(
2084 state->mp = args->dp->i_mount; 2084 state->mp = args->dp->i_mount;
2085 state->blocksize = state->mp->m_dirblksize; 2085 state->blocksize = state->mp->m_dirblksize;
2086 state->node_ents = state->mp->m_dir_node_ents; 2086 state->node_ents = state->mp->m_dir_node_ents;
2087 /* 2087
2088 * Look up the entry we're deleting, set up the cursor. 2088 /* Look up the entry we're deleting, set up the cursor. */
2089 */
2090 error = xfs_da3_node_lookup_int(state, &rval); 2089 error = xfs_da3_node_lookup_int(state, &rval);
2091 if (error) 2090 if (error)
2092 rval = error; 2091 goto out_free;
2093 /* 2092
2094 * Didn't find it, upper layer screwed up. 2093 /* Didn't find it, upper layer screwed up. */
2095 */
2096 if (rval != EEXIST) { 2094 if (rval != EEXIST) {
2097 xfs_da_state_free(state); 2095 error = rval;
2098 return rval; 2096 goto out_free;
2099 } 2097 }
2098
2100 blk = &state->path.blk[state->path.active - 1]; 2099 blk = &state->path.blk[state->path.active - 1];
2101 ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); 2100 ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC);
2102 ASSERT(state->extravalid); 2101 ASSERT(state->extravalid);
@@ -2107,7 +2106,7 @@ xfs_dir2_node_removename(
2107 error = xfs_dir2_leafn_remove(args, blk->bp, blk->index, 2106 error = xfs_dir2_leafn_remove(args, blk->bp, blk->index,
2108 &state->extrablk, &rval); 2107 &state->extrablk, &rval);
2109 if (error) 2108 if (error)
2110 return error; 2109 goto out_free;
2111 /* 2110 /*
2112 * Fix the hash values up the btree. 2111 * Fix the hash values up the btree.
2113 */ 2112 */
@@ -2122,6 +2121,7 @@ xfs_dir2_node_removename(
2122 */ 2121 */
2123 if (!error) 2122 if (!error)
2124 error = xfs_dir2_node_to_leaf(state); 2123 error = xfs_dir2_node_to_leaf(state);
2124out_free:
2125 xfs_da_state_free(state); 2125 xfs_da_state_free(state);
2126 return error; 2126 return error;
2127} 2127}
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 8367d6dc18c9..4f11ef011139 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -157,7 +157,7 @@ xfs_ioc_trim(
157 struct xfs_mount *mp, 157 struct xfs_mount *mp,
158 struct fstrim_range __user *urange) 158 struct fstrim_range __user *urange)
159{ 159{
160 struct request_queue *q = mp->m_ddev_targp->bt_bdev->bd_disk->queue; 160 struct request_queue *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev);
161 unsigned int granularity = q->limits.discard_granularity; 161 unsigned int granularity = q->limits.discard_granularity;
162 struct fstrim_range range; 162 struct fstrim_range range;
163 xfs_daddr_t start, end, minlen; 163 xfs_daddr_t start, end, minlen;
@@ -180,7 +180,8 @@ xfs_ioc_trim(
180 * matter as trimming blocks is an advisory interface. 180 * matter as trimming blocks is an advisory interface.
181 */ 181 */
182 if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) || 182 if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
183 range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp))) 183 range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)) ||
184 range.len < mp->m_sb.sb_blocksize)
184 return -XFS_ERROR(EINVAL); 185 return -XFS_ERROR(EINVAL);
185 186
186 start = BTOBB(range.start); 187 start = BTOBB(range.start);
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index a6e54b3319bd..02fb943cbf22 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -220,6 +220,8 @@ xfs_growfs_data_private(
220 */ 220 */
221 nfree = 0; 221 nfree = 0;
222 for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) { 222 for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
223 __be32 *agfl_bno;
224
223 /* 225 /*
224 * AG freespace header block 226 * AG freespace header block
225 */ 227 */
@@ -279,8 +281,10 @@ xfs_growfs_data_private(
279 agfl->agfl_seqno = cpu_to_be32(agno); 281 agfl->agfl_seqno = cpu_to_be32(agno);
280 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid); 282 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid);
281 } 283 }
284
285 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
282 for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++) 286 for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
283 agfl->agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); 287 agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
284 288
285 error = xfs_bwrite(bp); 289 error = xfs_bwrite(bp);
286 xfs_buf_relse(bp); 290 xfs_buf_relse(bp);
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 4d613401a5e0..33ad9a77791f 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -442,7 +442,8 @@ xfs_attrlist_by_handle(
442 return -XFS_ERROR(EPERM); 442 return -XFS_ERROR(EPERM);
443 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t))) 443 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
444 return -XFS_ERROR(EFAULT); 444 return -XFS_ERROR(EFAULT);
445 if (al_hreq.buflen > XATTR_LIST_MAX) 445 if (al_hreq.buflen < sizeof(struct attrlist) ||
446 al_hreq.buflen > XATTR_LIST_MAX)
446 return -XFS_ERROR(EINVAL); 447 return -XFS_ERROR(EINVAL);
447 448
448 /* 449 /*
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index e8fb1231db81..a7992f8de9d3 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -356,7 +356,8 @@ xfs_compat_attrlist_by_handle(
356 if (copy_from_user(&al_hreq, arg, 356 if (copy_from_user(&al_hreq, arg,
357 sizeof(compat_xfs_fsop_attrlist_handlereq_t))) 357 sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
358 return -XFS_ERROR(EFAULT); 358 return -XFS_ERROR(EFAULT);
359 if (al_hreq.buflen > XATTR_LIST_MAX) 359 if (al_hreq.buflen < sizeof(struct attrlist) ||
360 al_hreq.buflen > XATTR_LIST_MAX)
360 return -XFS_ERROR(EINVAL); 361 return -XFS_ERROR(EINVAL);
361 362
362 /* 363 /*
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 27e0e544e963..104455b8046c 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -618,7 +618,8 @@ xfs_setattr_nonsize(
618 } 618 }
619 if (!gid_eq(igid, gid)) { 619 if (!gid_eq(igid, gid)) {
620 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { 620 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {
621 ASSERT(!XFS_IS_PQUOTA_ON(mp)); 621 ASSERT(xfs_sb_version_has_pquotino(&mp->m_sb) ||
622 !XFS_IS_PQUOTA_ON(mp));
622 ASSERT(mask & ATTR_GID); 623 ASSERT(mask & ATTR_GID);
623 ASSERT(gdqp); 624 ASSERT(gdqp);
624 olddquot2 = xfs_qm_vop_chown(tp, ip, 625 olddquot2 = xfs_qm_vop_chown(tp, ip,
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index b6b669df40f3..eae16920655b 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -193,7 +193,10 @@ xlog_bread_noalign(
193 bp->b_io_length = nbblks; 193 bp->b_io_length = nbblks;
194 bp->b_error = 0; 194 bp->b_error = 0;
195 195
196 xfsbdstrat(log->l_mp, bp); 196 if (XFS_FORCED_SHUTDOWN(log->l_mp))
197 return XFS_ERROR(EIO);
198
199 xfs_buf_iorequest(bp);
197 error = xfs_buf_iowait(bp); 200 error = xfs_buf_iowait(bp);
198 if (error) 201 if (error)
199 xfs_buf_ioerror_alert(bp, __func__); 202 xfs_buf_ioerror_alert(bp, __func__);
@@ -4397,7 +4400,13 @@ xlog_do_recover(
4397 XFS_BUF_READ(bp); 4400 XFS_BUF_READ(bp);
4398 XFS_BUF_UNASYNC(bp); 4401 XFS_BUF_UNASYNC(bp);
4399 bp->b_ops = &xfs_sb_buf_ops; 4402 bp->b_ops = &xfs_sb_buf_ops;
4400 xfsbdstrat(log->l_mp, bp); 4403
4404 if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
4405 xfs_buf_relse(bp);
4406 return XFS_ERROR(EIO);
4407 }
4408
4409 xfs_buf_iorequest(bp);
4401 error = xfs_buf_iowait(bp); 4410 error = xfs_buf_iowait(bp);
4402 if (error) { 4411 if (error) {
4403 xfs_buf_ioerror_alert(bp, __func__); 4412 xfs_buf_ioerror_alert(bp, __func__);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 14a4996cfec6..dd88f0e27bd8 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -134,8 +134,6 @@ xfs_qm_dqpurge(
134{ 134{
135 struct xfs_mount *mp = dqp->q_mount; 135 struct xfs_mount *mp = dqp->q_mount;
136 struct xfs_quotainfo *qi = mp->m_quotainfo; 136 struct xfs_quotainfo *qi = mp->m_quotainfo;
137 struct xfs_dquot *gdqp = NULL;
138 struct xfs_dquot *pdqp = NULL;
139 137
140 xfs_dqlock(dqp); 138 xfs_dqlock(dqp);
141 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { 139 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
@@ -143,21 +141,6 @@ xfs_qm_dqpurge(
143 return EAGAIN; 141 return EAGAIN;
144 } 142 }
145 143
146 /*
147 * If this quota has a hint attached, prepare for releasing it now.
148 */
149 gdqp = dqp->q_gdquot;
150 if (gdqp) {
151 xfs_dqlock(gdqp);
152 dqp->q_gdquot = NULL;
153 }
154
155 pdqp = dqp->q_pdquot;
156 if (pdqp) {
157 xfs_dqlock(pdqp);
158 dqp->q_pdquot = NULL;
159 }
160
161 dqp->dq_flags |= XFS_DQ_FREEING; 144 dqp->dq_flags |= XFS_DQ_FREEING;
162 145
163 xfs_dqflock(dqp); 146 xfs_dqflock(dqp);
@@ -206,11 +189,47 @@ xfs_qm_dqpurge(
206 XFS_STATS_DEC(xs_qm_dquot_unused); 189 XFS_STATS_DEC(xs_qm_dquot_unused);
207 190
208 xfs_qm_dqdestroy(dqp); 191 xfs_qm_dqdestroy(dqp);
192 return 0;
193}
194
195/*
196 * Release the group or project dquot pointers the user dquots maybe carrying
197 * around as a hint, and proceed to purge the user dquot cache if requested.
198*/
199STATIC int
200xfs_qm_dqpurge_hints(
201 struct xfs_dquot *dqp,
202 void *data)
203{
204 struct xfs_dquot *gdqp = NULL;
205 struct xfs_dquot *pdqp = NULL;
206 uint flags = *((uint *)data);
207
208 xfs_dqlock(dqp);
209 if (dqp->dq_flags & XFS_DQ_FREEING) {
210 xfs_dqunlock(dqp);
211 return EAGAIN;
212 }
213
214 /* If this quota has a hint attached, prepare for releasing it now */
215 gdqp = dqp->q_gdquot;
216 if (gdqp)
217 dqp->q_gdquot = NULL;
218
219 pdqp = dqp->q_pdquot;
220 if (pdqp)
221 dqp->q_pdquot = NULL;
222
223 xfs_dqunlock(dqp);
209 224
210 if (gdqp) 225 if (gdqp)
211 xfs_qm_dqput(gdqp); 226 xfs_qm_dqrele(gdqp);
212 if (pdqp) 227 if (pdqp)
213 xfs_qm_dqput(pdqp); 228 xfs_qm_dqrele(pdqp);
229
230 if (flags & XFS_QMOPT_UQUOTA)
231 return xfs_qm_dqpurge(dqp, NULL);
232
214 return 0; 233 return 0;
215} 234}
216 235
@@ -222,8 +241,18 @@ xfs_qm_dqpurge_all(
222 struct xfs_mount *mp, 241 struct xfs_mount *mp,
223 uint flags) 242 uint flags)
224{ 243{
225 if (flags & XFS_QMOPT_UQUOTA) 244 /*
226 xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL); 245 * We have to release group/project dquot hint(s) from the user dquot
246 * at first if they are there, otherwise we would run into an infinite
247 * loop while walking through radix tree to purge other type of dquots
248 * since their refcount is not zero if the user dquot refers to them
249 * as hint.
250 *
251 * Call the special xfs_qm_dqpurge_hints() will end up go through the
252 * general xfs_qm_dqpurge() against user dquot cache if requested.
253 */
254 xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags);
255
227 if (flags & XFS_QMOPT_GQUOTA) 256 if (flags & XFS_QMOPT_GQUOTA)
228 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); 257 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
229 if (flags & XFS_QMOPT_PQUOTA) 258 if (flags & XFS_QMOPT_PQUOTA)
@@ -2082,24 +2111,21 @@ xfs_qm_vop_create_dqattach(
2082 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 2111 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2083 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 2112 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
2084 2113
2085 if (udqp) { 2114 if (udqp && XFS_IS_UQUOTA_ON(mp)) {
2086 ASSERT(ip->i_udquot == NULL); 2115 ASSERT(ip->i_udquot == NULL);
2087 ASSERT(XFS_IS_UQUOTA_ON(mp));
2088 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); 2116 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
2089 2117
2090 ip->i_udquot = xfs_qm_dqhold(udqp); 2118 ip->i_udquot = xfs_qm_dqhold(udqp);
2091 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); 2119 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
2092 } 2120 }
2093 if (gdqp) { 2121 if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
2094 ASSERT(ip->i_gdquot == NULL); 2122 ASSERT(ip->i_gdquot == NULL);
2095 ASSERT(XFS_IS_GQUOTA_ON(mp));
2096 ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); 2123 ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
2097 ip->i_gdquot = xfs_qm_dqhold(gdqp); 2124 ip->i_gdquot = xfs_qm_dqhold(gdqp);
2098 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); 2125 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
2099 } 2126 }
2100 if (pdqp) { 2127 if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
2101 ASSERT(ip->i_pdquot == NULL); 2128 ASSERT(ip->i_pdquot == NULL);
2102 ASSERT(XFS_IS_PQUOTA_ON(mp));
2103 ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id)); 2129 ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
2104 2130
2105 ip->i_pdquot = xfs_qm_dqhold(pdqp); 2131 ip->i_pdquot = xfs_qm_dqhold(pdqp);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index c035d11b7734..647b6f1d8923 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -314,7 +314,18 @@ xfs_trans_read_buf_map(
314 ASSERT(bp->b_iodone == NULL); 314 ASSERT(bp->b_iodone == NULL);
315 XFS_BUF_READ(bp); 315 XFS_BUF_READ(bp);
316 bp->b_ops = ops; 316 bp->b_ops = ops;
317 xfsbdstrat(tp->t_mountp, bp); 317
318 /*
319 * XXX(hch): clean up the error handling here to be less
320 * of a mess..
321 */
322 if (XFS_FORCED_SHUTDOWN(mp)) {
323 trace_xfs_bdstrat_shut(bp, _RET_IP_);
324 xfs_bioerror_relse(bp);
325 } else {
326 xfs_buf_iorequest(bp);
327 }
328
318 error = xfs_buf_iowait(bp); 329 error = xfs_buf_iowait(bp);
319 if (error) { 330 if (error) {
320 xfs_buf_ioerror_alert(bp, __func__); 331 xfs_buf_ioerror_alert(bp, __func__);
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index d98c67001840..3ea214cff349 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -83,7 +83,9 @@
83 * Should the subsystem abort the loading of an ACPI table if the 83 * Should the subsystem abort the loading of an ACPI table if the
84 * table checksum is incorrect? 84 * table checksum is incorrect?
85 */ 85 */
86#ifndef ACPI_CHECKSUM_ABORT
86#define ACPI_CHECKSUM_ABORT FALSE 87#define ACPI_CHECKSUM_ABORT FALSE
88#endif
87 89
88/* 90/*
89 * Generate a version of ACPICA that only supports "reduced hardware" 91 * Generate a version of ACPICA that only supports "reduced hardware"
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 7b2de026a4f3..ddabed1f51c2 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -100,6 +100,7 @@ enum acpi_hotplug_mode {
100struct acpi_hotplug_profile { 100struct acpi_hotplug_profile {
101 struct kobject kobj; 101 struct kobject kobj;
102 bool enabled:1; 102 bool enabled:1;
103 bool ignore:1;
103 enum acpi_hotplug_mode mode; 104 enum acpi_hotplug_mode mode;
104}; 105};
105 106
@@ -168,7 +169,8 @@ struct acpi_device_flags {
168 u32 ejectable:1; 169 u32 ejectable:1;
169 u32 power_manageable:1; 170 u32 power_manageable:1;
170 u32 match_driver:1; 171 u32 match_driver:1;
171 u32 reserved:27; 172 u32 no_hotplug:1;
173 u32 reserved:26;
172}; 174};
173 175
174/* File System */ 176/* File System */
@@ -343,6 +345,7 @@ extern struct kobject *acpi_kobj;
343extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int); 345extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int);
344void acpi_bus_private_data_handler(acpi_handle, void *); 346void acpi_bus_private_data_handler(acpi_handle, void *);
345int acpi_bus_get_private_data(acpi_handle, void **); 347int acpi_bus_get_private_data(acpi_handle, void **);
348void acpi_bus_no_hotplug(acpi_handle handle);
346extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); 349extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
347extern int register_acpi_notifier(struct notifier_block *); 350extern int register_acpi_notifier(struct notifier_block *);
348extern int unregister_acpi_notifier(struct notifier_block *); 351extern int unregister_acpi_notifier(struct notifier_block *);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index d8f9457755b4..4278aba96503 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
46 46
47/* Current ACPICA subsystem version in YYYYMMDD format */ 47/* Current ACPICA subsystem version in YYYYMMDD format */
48 48
49#define ACPI_CA_VERSION 0x20130927 49#define ACPI_CA_VERSION 0x20131115
50 50
51#include <acpi/acconfig.h> 51#include <acpi/acconfig.h>
52#include <acpi/actypes.h> 52#include <acpi/actypes.h>
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f330d28e4d0e..db0923458940 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -217,7 +217,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
217#endif 217#endif
218 218
219#ifndef pte_accessible 219#ifndef pte_accessible
220# define pte_accessible(pte) ((void)(pte),1) 220# define pte_accessible(mm, pte) ((void)(pte), 1)
221#endif 221#endif
222 222
223#ifndef flush_tlb_fix_spurious_fault 223#ifndef flush_tlb_fix_spurious_fault
@@ -599,11 +599,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
599#ifdef CONFIG_TRANSPARENT_HUGEPAGE 599#ifdef CONFIG_TRANSPARENT_HUGEPAGE
600 barrier(); 600 barrier();
601#endif 601#endif
602 if (pmd_none(pmdval)) 602 if (pmd_none(pmdval) || pmd_trans_huge(pmdval))
603 return 1; 603 return 1;
604 if (unlikely(pmd_bad(pmdval))) { 604 if (unlikely(pmd_bad(pmdval))) {
605 if (!pmd_trans_huge(pmdval)) 605 pmd_clear_bad(pmd);
606 pmd_clear_bad(pmd);
607 return 1; 606 return 1;
608 } 607 }
609 return 0; 608 return 0;
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index ddf2b420ac8f..1cd3f5d767a8 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -3,13 +3,11 @@
3 3
4#include <linux/thread_info.h> 4#include <linux/thread_info.h>
5 5
6/* 6#define PREEMPT_ENABLED (0)
7 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users 7
8 * that think a non-zero value indicates we cannot preempt.
9 */
10static __always_inline int preempt_count(void) 8static __always_inline int preempt_count(void)
11{ 9{
12 return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED; 10 return current_thread_info()->preempt_count;
13} 11}
14 12
15static __always_inline int *preempt_count_ptr(void) 13static __always_inline int *preempt_count_ptr(void)
@@ -17,11 +15,6 @@ static __always_inline int *preempt_count_ptr(void)
17 return &current_thread_info()->preempt_count; 15 return &current_thread_info()->preempt_count;
18} 16}
19 17
20/*
21 * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the
22 * alternative is loosing a reschedule. Better schedule too often -- also this
23 * should be a very rare operation.
24 */
25static __always_inline void preempt_count_set(int pc) 18static __always_inline void preempt_count_set(int pc)
26{ 19{
27 *preempt_count_ptr() = pc; 20 *preempt_count_ptr() = pc;
@@ -41,28 +34,17 @@ static __always_inline void preempt_count_set(int pc)
41 task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ 34 task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
42} while (0) 35} while (0)
43 36
44/*
45 * We fold the NEED_RESCHED bit into the preempt count such that
46 * preempt_enable() can decrement and test for needing to reschedule with a
47 * single instruction.
48 *
49 * We invert the actual bit, so that when the decrement hits 0 we know we both
50 * need to resched (the bit is cleared) and can resched (no preempt count).
51 */
52
53static __always_inline void set_preempt_need_resched(void) 37static __always_inline void set_preempt_need_resched(void)
54{ 38{
55 *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED;
56} 39}
57 40
58static __always_inline void clear_preempt_need_resched(void) 41static __always_inline void clear_preempt_need_resched(void)
59{ 42{
60 *preempt_count_ptr() |= PREEMPT_NEED_RESCHED;
61} 43}
62 44
63static __always_inline bool test_preempt_need_resched(void) 45static __always_inline bool test_preempt_need_resched(void)
64{ 46{
65 return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); 47 return false;
66} 48}
67 49
68/* 50/*
@@ -81,7 +63,12 @@ static __always_inline void __preempt_count_sub(int val)
81 63
82static __always_inline bool __preempt_count_dec_and_test(void) 64static __always_inline bool __preempt_count_dec_and_test(void)
83{ 65{
84 return !--*preempt_count_ptr(); 66 /*
67 * Because of load-store architectures cannot do per-cpu atomic
68 * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
69 * lost.
70 */
71 return !--*preempt_count_ptr() && tif_need_resched();
85} 72}
86 73
87/* 74/*
@@ -89,7 +76,7 @@ static __always_inline bool __preempt_count_dec_and_test(void)
89 */ 76 */
90static __always_inline bool should_resched(void) 77static __always_inline bool should_resched(void)
91{ 78{
92 return unlikely(!*preempt_count_ptr()); 79 return unlikely(!preempt_count() && tif_need_resched());
93} 80}
94 81
95#ifdef CONFIG_PREEMPT 82#ifdef CONFIG_PREEMPT
diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h
new file mode 100644
index 000000000000..f57eb7b5c23b
--- /dev/null
+++ b/include/asm-generic/simd.h
@@ -0,0 +1,14 @@
1
2#include <linux/hardirq.h>
3
4/*
5 * may_use_simd - whether it is allowable at this time to issue SIMD
6 * instructions or access the SIMD register file
7 *
8 * As architectures typically don't preserve the SIMD register file when
9 * taking an interrupt, !in_interrupt() should be a reasonable default.
10 */
11static __must_check inline bool may_use_simd(void)
12{
13 return !in_interrupt();
14}
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
index 3f21f1b72e45..d3909effd725 100644
--- a/include/asm-generic/word-at-a-time.h
+++ b/include/asm-generic/word-at-a-time.h
@@ -49,4 +49,12 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
49 return (val + c->high_bits) & ~rhs; 49 return (val + c->high_bits) & ~rhs;
50} 50}
51 51
52#ifndef zero_bytemask
53#ifdef CONFIG_64BIT
54#define zero_bytemask(mask) (~0ul << fls64(mask))
55#else
56#define zero_bytemask(mask) (~0ul << fls(mask))
57#endif /* CONFIG_64BIT */
58#endif /* zero_bytemask */
59
52#endif /* _ASM_WORD_AT_A_TIME_H */ 60#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/x86/include/asm/crypto/ablk_helper.h b/include/crypto/ablk_helper.h
index 4f93df50c23e..4f93df50c23e 100644
--- a/arch/x86/include/asm/crypto/ablk_helper.h
+++ b/include/crypto/ablk_helper.h
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 418d270e1806..e73c19e90e38 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -386,5 +386,21 @@ static inline int crypto_requires_sync(u32 type, u32 mask)
386 return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC; 386 return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC;
387} 387}
388 388
389#endif /* _CRYPTO_ALGAPI_H */ 389noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
390
391/**
392 * crypto_memneq - Compare two areas of memory without leaking
393 * timing information.
394 *
395 * @a: One area of memory
396 * @b: Another area of memory
397 * @size: The size of the area.
398 *
399 * Returns 0 when data is equal, 1 otherwise.
400 */
401static inline int crypto_memneq(const void *a, const void *b, size_t size)
402{
403 return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
404}
390 405
406#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h
index e47b044929a8..6775059539b5 100644
--- a/include/crypto/authenc.h
+++ b/include/crypto/authenc.h
@@ -23,5 +23,15 @@ struct crypto_authenc_key_param {
23 __be32 enckeylen; 23 __be32 enckeylen;
24}; 24};
25 25
26#endif /* _CRYPTO_AUTHENC_H */ 26struct crypto_authenc_keys {
27 const u8 *authkey;
28 const u8 *enckey;
29
30 unsigned int authkeylen;
31 unsigned int enckeylen;
32};
27 33
34int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
35 unsigned int keylen);
36
37#endif /* _CRYPTO_AUTHENC_H */
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 13621cc8cf4c..6a626a507b8c 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -36,6 +36,7 @@ static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
36{ 36{
37 sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0); 37 sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
38 sg1[num - 1].page_link &= ~0x02; 38 sg1[num - 1].page_link &= ~0x02;
39 sg1[num - 1].page_link |= 0x01;
39} 40}
40 41
41static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg) 42static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
@@ -43,7 +44,7 @@ static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
43 if (sg_is_last(sg)) 44 if (sg_is_last(sg))
44 return NULL; 45 return NULL;
45 46
46 return (++sg)->length ? sg : (void *)sg_page(sg); 47 return (++sg)->length ? sg : sg_chain_ptr(sg);
47} 48}
48 49
49static inline void scatterwalk_crypto_chain(struct scatterlist *head, 50static inline void scatterwalk_crypto_chain(struct scatterlist *head,
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 87578c109e48..49376aec2fbb 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -600,7 +600,7 @@
600 {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 600 {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
601 {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ 601 {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
602 {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ 602 {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
603 {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ 603 {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
604 {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 604 {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
605 {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 605 {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
606 {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 606 {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h
index 9a193b84238a..a89df3be1686 100644
--- a/include/linux/assoc_array.h
+++ b/include/linux/assoc_array.h
@@ -41,10 +41,10 @@ struct assoc_array_ops {
41 /* Is this the object we're looking for? */ 41 /* Is this the object we're looking for? */
42 bool (*compare_object)(const void *object, const void *index_key); 42 bool (*compare_object)(const void *object, const void *index_key);
43 43
44 /* How different are two objects, to a bit position in their keys? (or 44 /* How different is an object from an index key, to a bit position in
45 * -1 if they're the same) 45 * their keys? (or -1 if they're the same)
46 */ 46 */
47 int (*diff_objects)(const void *a, const void *b); 47 int (*diff_objects)(const void *object, const void *index_key);
48 48
49 /* Method to free an object. */ 49 /* Method to free an object. */
50 void (*free_object)(void *object); 50 void (*free_object)(void *object);
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
index 669fef5c745a..3e0fbe441763 100644
--- a/include/linux/auxvec.h
+++ b/include/linux/auxvec.h
@@ -3,6 +3,6 @@
3 3
4#include <uapi/linux/auxvec.h> 4#include <uapi/linux/auxvec.h>
5 5
6#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */ 6#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
7 /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ 7 /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
8#endif /* _LINUX_AUXVEC_H */ 8#endif /* _LINUX_AUXVEC_H */
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index 973ce10c40b6..dc1bd3dcf11f 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -28,8 +28,6 @@
28 28
29#endif 29#endif
30 30
31#define uninitialized_var(x) x
32
33#ifndef __HAVE_BUILTIN_BSWAP16__ 31#ifndef __HAVE_BUILTIN_BSWAP16__
34/* icc has this, but it's called _bswap16 */ 32/* icc has this, but it's called _bswap16 */
35#define __HAVE_BUILTIN_BSWAP16__ 33#define __HAVE_BUILTIN_BSWAP16__
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 57e87e749a48..bf72e9ac6de0 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -29,8 +29,10 @@ struct vfsmount;
29/* The hash is always the low bits of hash_len */ 29/* The hash is always the low bits of hash_len */
30#ifdef __LITTLE_ENDIAN 30#ifdef __LITTLE_ENDIAN
31 #define HASH_LEN_DECLARE u32 hash; u32 len; 31 #define HASH_LEN_DECLARE u32 hash; u32 len;
32 #define bytemask_from_count(cnt) (~(~0ul << (cnt)*8))
32#else 33#else
33 #define HASH_LEN_DECLARE u32 len; u32 hash; 34 #define HASH_LEN_DECLARE u32 len; u32 hash;
35 #define bytemask_from_count(cnt) (~(~0ul >> (cnt)*8))
34#endif 36#endif
35 37
36/* 38/*
diff --git a/include/linux/efi.h b/include/linux/efi.h
index bc5687d0f315..11ce6784a196 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -801,6 +801,8 @@ struct efivar_entry {
801 struct efi_variable var; 801 struct efi_variable var;
802 struct list_head list; 802 struct list_head list;
803 struct kobject kobj; 803 struct kobject kobj;
804 bool scanning;
805 bool deleting;
804}; 806};
805 807
806 808
@@ -866,6 +868,8 @@ void efivar_run_worker(void);
866#if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE) 868#if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE)
867int efivars_sysfs_init(void); 869int efivars_sysfs_init(void);
868 870
871#define EFIVARS_DATA_SIZE_MAX 1024
872
869#endif /* CONFIG_EFI_VARS */ 873#endif /* CONFIG_EFI_VARS */
870 874
871#endif /* _LINUX_EFI_H */ 875#endif /* _LINUX_EFI_H */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 9abbe630c456..8c9b7a1c4138 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -248,6 +248,9 @@ struct ftrace_event_call {
248#ifdef CONFIG_PERF_EVENTS 248#ifdef CONFIG_PERF_EVENTS
249 int perf_refcount; 249 int perf_refcount;
250 struct hlist_head __percpu *perf_events; 250 struct hlist_head __percpu *perf_events;
251
252 int (*perf_perm)(struct ftrace_event_call *,
253 struct perf_event *);
251#endif 254#endif
252}; 255};
253 256
@@ -317,6 +320,19 @@ struct ftrace_event_file {
317 } \ 320 } \
318 early_initcall(trace_init_flags_##name); 321 early_initcall(trace_init_flags_##name);
319 322
323#define __TRACE_EVENT_PERF_PERM(name, expr...) \
324 static int perf_perm_##name(struct ftrace_event_call *tp_event, \
325 struct perf_event *p_event) \
326 { \
327 return ({ expr; }); \
328 } \
329 static int __init trace_init_perf_perm_##name(void) \
330 { \
331 event_##name.perf_perm = &perf_perm_##name; \
332 return 0; \
333 } \
334 early_initcall(trace_init_perf_perm_##name);
335
320#define PERF_MAX_TRACE_SIZE 2048 336#define PERF_MAX_TRACE_SIZE 2048
321 337
322#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ 338#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 656a27efb2c8..3ea2cf6b0e6c 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -2,9 +2,12 @@
2#define __LINUX_GPIO_DRIVER_H 2#define __LINUX_GPIO_DRIVER_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/module.h>
5 6
6struct device; 7struct device;
7struct gpio_desc; 8struct gpio_desc;
9struct of_phandle_args;
10struct device_node;
8struct seq_file; 11struct seq_file;
9 12
10/** 13/**
@@ -125,6 +128,13 @@ extern struct gpio_chip *gpiochip_find(void *data,
125int gpiod_lock_as_irq(struct gpio_desc *desc); 128int gpiod_lock_as_irq(struct gpio_desc *desc);
126void gpiod_unlock_as_irq(struct gpio_desc *desc); 129void gpiod_unlock_as_irq(struct gpio_desc *desc);
127 130
131enum gpio_lookup_flags {
132 GPIO_ACTIVE_HIGH = (0 << 0),
133 GPIO_ACTIVE_LOW = (1 << 0),
134 GPIO_OPEN_DRAIN = (1 << 1),
135 GPIO_OPEN_SOURCE = (1 << 2),
136};
137
128/** 138/**
129 * Lookup table for associating GPIOs to specific devices and functions using 139 * Lookup table for associating GPIOs to specific devices and functions using
130 * platform data. 140 * platform data.
@@ -152,9 +162,9 @@ struct gpiod_lookup {
152 */ 162 */
153 unsigned int idx; 163 unsigned int idx;
154 /* 164 /*
155 * mask of GPIOF_* values 165 * mask of GPIO_* values
156 */ 166 */
157 unsigned long flags; 167 enum gpio_lookup_flags flags;
158}; 168};
159 169
160/* 170/*
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index a265af294ea4..b914ca3f57ba 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -21,6 +21,8 @@
21 21
22#include <linux/hid.h> 22#include <linux/hid.h>
23#include <linux/hid-sensor-ids.h> 23#include <linux/hid-sensor-ids.h>
24#include <linux/iio/iio.h>
25#include <linux/iio/trigger.h>
24 26
25/** 27/**
26 * struct hid_sensor_hub_attribute_info - Attribute info 28 * struct hid_sensor_hub_attribute_info - Attribute info
@@ -40,6 +42,8 @@ struct hid_sensor_hub_attribute_info {
40 s32 units; 42 s32 units;
41 s32 unit_expo; 43 s32 unit_expo;
42 s32 size; 44 s32 size;
45 s32 logical_minimum;
46 s32 logical_maximum;
43}; 47};
44 48
45/** 49/**
@@ -184,6 +188,7 @@ struct hid_sensor_common {
184 struct platform_device *pdev; 188 struct platform_device *pdev;
185 unsigned usage_id; 189 unsigned usage_id;
186 bool data_ready; 190 bool data_ready;
191 struct iio_trigger *trigger;
187 struct hid_sensor_hub_attribute_info poll; 192 struct hid_sensor_hub_attribute_info poll;
188 struct hid_sensor_hub_attribute_info report_state; 193 struct hid_sensor_hub_attribute_info report_state;
189 struct hid_sensor_hub_attribute_info power_state; 194 struct hid_sensor_hub_attribute_info power_state;
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 4f945d3ed49f..8323775ac21d 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -117,4 +117,16 @@
117#define HID_USAGE_SENSOR_PROP_REPORT_STATE 0x200316 117#define HID_USAGE_SENSOR_PROP_REPORT_STATE 0x200316
118#define HID_USAGE_SENSOR_PROY_POWER_STATE 0x200319 118#define HID_USAGE_SENSOR_PROY_POWER_STATE 0x200319
119 119
120/* Power state enumerations */
121#define HID_USAGE_SENSOR_PROP_POWER_STATE_UNDEFINED_ENUM 0x00
122#define HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM 0x01
123#define HID_USAGE_SENSOR_PROP_POWER_STATE_D1_LOW_POWER_ENUM 0x02
124#define HID_USAGE_SENSOR_PROP_POWER_STATE_D2_STANDBY_WITH_WAKE_ENUM 0x03
125#define HID_USAGE_SENSOR_PROP_POWER_STATE_D3_SLEEP_WITH_WAKE_ENUM 0x04
126#define HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM 0x05
127
128/* Report State enumerations */
129#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM 0x00
130#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x01
131
120#endif 132#endif
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 9649ff0c63f8..bd7e98752222 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -142,7 +142,10 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page)
142 return 0; 142 return 0;
143} 143}
144 144
145#define isolate_huge_page(p, l) false 145static inline bool isolate_huge_page(struct page *page, struct list_head *list)
146{
147 return false;
148}
146#define putback_active_hugepage(p) do {} while (0) 149#define putback_active_hugepage(p) do {} while (0)
147#define is_hugepage_active(x) false 150#define is_hugepage_active(x) false
148 151
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 5d89d1b808a6..c56c350324e4 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -4,6 +4,7 @@
4#include <uapi/linux/ipv6.h> 4#include <uapi/linux/ipv6.h>
5 5
6#define ipv6_optlen(p) (((p)->hdrlen+1) << 3) 6#define ipv6_optlen(p) (((p)->hdrlen+1) << 3)
7#define ipv6_authlen(p) (((p)->hdrlen+2) << 2)
7/* 8/*
8 * This structure contains configuration options per IPv6 link. 9 * This structure contains configuration options per IPv6 link.
9 */ 10 */
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h
index 714ba08dc092..e374e369fb2f 100644
--- a/include/linux/irqreturn.h
+++ b/include/linux/irqreturn.h
@@ -14,6 +14,6 @@ enum irqreturn {
14}; 14};
15 15
16typedef enum irqreturn irqreturn_t; 16typedef enum irqreturn irqreturn_t;
17#define IRQ_RETVAL(x) ((x) != IRQ_NONE) 17#define IRQ_RETVAL(x) ((x) ? IRQ_HANDLED : IRQ_NONE)
18 18
19#endif 19#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d4e98d13eff4..ecb87544cc5d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -193,7 +193,8 @@ extern int _cond_resched(void);
193 (__x < 0) ? -__x : __x; \ 193 (__x < 0) ? -__x : __x; \
194 }) 194 })
195 195
196#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) 196#if defined(CONFIG_MMU) && \
197 (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
197void might_fault(void); 198void might_fault(void);
198#else 199#else
199static inline void might_fault(void) { } 200static inline void might_fault(void) { }
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index d78d28a733b1..5fd33dc1fe3a 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -198,6 +198,9 @@ extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
198extern size_t vmcoreinfo_size; 198extern size_t vmcoreinfo_size;
199extern size_t vmcoreinfo_max_size; 199extern size_t vmcoreinfo_max_size;
200 200
201/* flag to track if kexec reboot is in progress */
202extern bool kexec_in_progress;
203
201int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, 204int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
202 unsigned long long *crash_size, unsigned long long *crash_base); 205 unsigned long long *crash_size, unsigned long long *crash_base);
203int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, 206int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 0e23c26485f4..9b503376738f 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -418,6 +418,7 @@ enum {
418 ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ 418 ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
419 ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ 419 ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
420 ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */ 420 ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */
421 ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
421 422
422 /* DMA mask for user DMA control: User visible values; DO NOT 423 /* DMA mask for user DMA control: User visible values; DO NOT
423 renumber */ 424 renumber */
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index c8929c3832db..4bfde0e99ed5 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -19,7 +19,7 @@
19 19
20#define USE_CMPXCHG_LOCKREF \ 20#define USE_CMPXCHG_LOCKREF \
21 (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ 21 (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
22 IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS) 22 IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
23 23
24struct lockref { 24struct lockref {
25 union { 25 union {
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 69ed5f5e9f6e..c45c089bfdac 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
133 return ret; 133 return ret;
134} 134}
135 135
136#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
137
138#ifndef mul_u64_u32_shr
139static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
140{
141 return (u64)(((unsigned __int128)a * mul) >> shift);
142}
143#endif /* mul_u64_u32_shr */
144
145#else
146
147#ifndef mul_u64_u32_shr
148static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
149{
150 u32 ah, al;
151 u64 ret;
152
153 al = a;
154 ah = a >> 32;
155
156 ret = ((u64)al * mul) >> shift;
157 if (ah)
158 ret += ((u64)ah * mul) << (32 - shift);
159
160 return ret;
161}
162#endif /* mul_u64_u32_shr */
163
164#endif
165
136#endif /* _LINUX_MATH64_H */ 166#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 2d0c9071bcfb..cab2dd279076 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -39,7 +39,8 @@ enum sec_device_type {
39struct sec_pmic_dev { 39struct sec_pmic_dev {
40 struct device *dev; 40 struct device *dev;
41 struct sec_platform_data *pdata; 41 struct sec_platform_data *pdata;
42 struct regmap *regmap; 42 struct regmap *regmap_pmic;
43 struct regmap *regmap_rtc;
43 struct i2c_client *i2c; 44 struct i2c_client *i2c;
44 struct i2c_client *rtc; 45 struct i2c_client *rtc;
45 46
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index ad05ce60c1c9..2e5b194b9b19 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -22,6 +22,8 @@
22#define PHY_ID_KSZ8021 0x00221555 22#define PHY_ID_KSZ8021 0x00221555
23#define PHY_ID_KSZ8031 0x00221556 23#define PHY_ID_KSZ8031 0x00221556
24#define PHY_ID_KSZ8041 0x00221510 24#define PHY_ID_KSZ8041 0x00221510
25/* undocumented */
26#define PHY_ID_KSZ8041RNLI 0x00221537
25#define PHY_ID_KSZ8051 0x00221550 27#define PHY_ID_KSZ8051 0x00221550
26/* same id: ks8001 Rev. A/B, and ks8721 Rev 3. */ 28/* same id: ks8001 Rev. A/B, and ks8721 Rev 3. */
27#define PHY_ID_KSZ8001 0x0022161A 29#define PHY_ID_KSZ8001 0x0022161A
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index f5096b58b20d..f015c059e159 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
55 struct page *newpage, struct page *page); 55 struct page *newpage, struct page *page);
56extern int migrate_page_move_mapping(struct address_space *mapping, 56extern int migrate_page_move_mapping(struct address_space *mapping,
57 struct page *newpage, struct page *page, 57 struct page *newpage, struct page *page,
58 struct buffer_head *head, enum migrate_mode mode); 58 struct buffer_head *head, enum migrate_mode mode,
59 int extra_count);
59#else 60#else
60 61
61static inline void putback_lru_pages(struct list_head *l) {} 62static inline void putback_lru_pages(struct list_head *l) {}
@@ -90,10 +91,19 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
90#endif /* CONFIG_MIGRATION */ 91#endif /* CONFIG_MIGRATION */
91 92
92#ifdef CONFIG_NUMA_BALANCING 93#ifdef CONFIG_NUMA_BALANCING
94extern bool pmd_trans_migrating(pmd_t pmd);
95extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
93extern int migrate_misplaced_page(struct page *page, 96extern int migrate_misplaced_page(struct page *page,
94 struct vm_area_struct *vma, int node); 97 struct vm_area_struct *vma, int node);
95extern bool migrate_ratelimited(int node); 98extern bool migrate_ratelimited(int node);
96#else 99#else
100static inline bool pmd_trans_migrating(pmd_t pmd)
101{
102 return false;
103}
104static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
105{
106}
97static inline int migrate_misplaced_page(struct page *page, 107static inline int migrate_misplaced_page(struct page *page,
98 struct vm_area_struct *vma, int node) 108 struct vm_area_struct *vma, int node)
99{ 109{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1cedd000cf29..35527173cf50 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1317,7 +1317,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
1317#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ 1317#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
1318 1318
1319#if USE_SPLIT_PTE_PTLOCKS 1319#if USE_SPLIT_PTE_PTLOCKS
1320#if BLOATED_SPINLOCKS 1320#if ALLOC_SPLIT_PTLOCKS
1321extern bool ptlock_alloc(struct page *page); 1321extern bool ptlock_alloc(struct page *page);
1322extern void ptlock_free(struct page *page); 1322extern void ptlock_free(struct page *page);
1323 1323
@@ -1325,7 +1325,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
1325{ 1325{
1326 return page->ptl; 1326 return page->ptl;
1327} 1327}
1328#else /* BLOATED_SPINLOCKS */ 1328#else /* ALLOC_SPLIT_PTLOCKS */
1329static inline bool ptlock_alloc(struct page *page) 1329static inline bool ptlock_alloc(struct page *page)
1330{ 1330{
1331 return true; 1331 return true;
@@ -1339,7 +1339,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
1339{ 1339{
1340 return &page->ptl; 1340 return &page->ptl;
1341} 1341}
1342#endif /* BLOATED_SPINLOCKS */ 1342#endif /* ALLOC_SPLIT_PTLOCKS */
1343 1343
1344static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 1344static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1345{ 1345{
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index bd299418a934..290901a8c1de 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -26,6 +26,7 @@ struct address_space;
26#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) 26#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
27#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ 27#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
28 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) 28 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
29#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
29 30
30/* 31/*
31 * Each physical page in the system has a struct page associated with 32 * Each physical page in the system has a struct page associated with
@@ -155,7 +156,7 @@ struct page {
155 * system if PG_buddy is set. 156 * system if PG_buddy is set.
156 */ 157 */
157#if USE_SPLIT_PTE_PTLOCKS 158#if USE_SPLIT_PTE_PTLOCKS
158#if BLOATED_SPINLOCKS 159#if ALLOC_SPLIT_PTLOCKS
159 spinlock_t *ptl; 160 spinlock_t *ptl;
160#else 161#else
161 spinlock_t ptl; 162 spinlock_t ptl;
@@ -443,6 +444,14 @@ struct mm_struct {
443 /* numa_scan_seq prevents two threads setting pte_numa */ 444 /* numa_scan_seq prevents two threads setting pte_numa */
444 int numa_scan_seq; 445 int numa_scan_seq;
445#endif 446#endif
447#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
448 /*
449 * An operation with batched TLB flushing is going on. Anything that
450 * can move process memory needs to flush the TLB when moving a
451 * PROT_NONE or PROT_NUMA mapped page.
452 */
453 bool tlb_flush_pending;
454#endif
446 struct uprobes_state uprobes_state; 455 struct uprobes_state uprobes_state;
447}; 456};
448 457
@@ -459,4 +468,45 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
459 return mm->cpu_vm_mask_var; 468 return mm->cpu_vm_mask_var;
460} 469}
461 470
471#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
472/*
473 * Memory barriers to keep this state in sync are graciously provided by
474 * the page table locks, outside of which no page table modifications happen.
475 * The barriers below prevent the compiler from re-ordering the instructions
476 * around the memory barriers that are already present in the code.
477 */
478static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
479{
480 barrier();
481 return mm->tlb_flush_pending;
482}
483static inline void set_tlb_flush_pending(struct mm_struct *mm)
484{
485 mm->tlb_flush_pending = true;
486
487 /*
488 * Guarantee that the tlb_flush_pending store does not leak into the
489 * critical section updating the page tables
490 */
491 smp_mb__before_spinlock();
492}
493/* Clearing is done after a TLB flush, which also provides a barrier. */
494static inline void clear_tlb_flush_pending(struct mm_struct *mm)
495{
496 barrier();
497 mm->tlb_flush_pending = false;
498}
499#else
500static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
501{
502 return false;
503}
504static inline void set_tlb_flush_pending(struct mm_struct *mm)
505{
506}
507static inline void clear_tlb_flush_pending(struct mm_struct *mm)
508{
509}
510#endif
511
462#endif /* _LINUX_MM_TYPES_H */ 512#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/net.h b/include/linux/net.h
index 4bcee94cef93..69be3e6079c8 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -181,7 +181,7 @@ struct proto_ops {
181 int offset, size_t size, int flags); 181 int offset, size_t size, int flags);
182 ssize_t (*splice_read)(struct socket *sock, loff_t *ppos, 182 ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
183 struct pipe_inode_info *pipe, size_t len, unsigned int flags); 183 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
184 void (*set_peek_off)(struct sock *sk, int val); 184 int (*set_peek_off)(struct sock *sk, int val);
185}; 185};
186 186
187#define DECLARE_SOCKADDR(type, dst, src) \ 187#define DECLARE_SOCKADDR(type, dst, src) \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7f0ed423a360..ce2a1f5f9a1e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -769,7 +769,8 @@ struct netdev_phys_port_id {
769 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) 769 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
770 * Required can not be NULL. 770 * Required can not be NULL.
771 * 771 *
772 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); 772 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
773 * void *accel_priv);
773 * Called to decide which queue to when device supports multiple 774 * Called to decide which queue to when device supports multiple
774 * transmit queues. 775 * transmit queues.
775 * 776 *
@@ -990,7 +991,8 @@ struct net_device_ops {
990 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, 991 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
991 struct net_device *dev); 992 struct net_device *dev);
992 u16 (*ndo_select_queue)(struct net_device *dev, 993 u16 (*ndo_select_queue)(struct net_device *dev,
993 struct sk_buff *skb); 994 struct sk_buff *skb,
995 void *accel_priv);
994 void (*ndo_change_rx_flags)(struct net_device *dev, 996 void (*ndo_change_rx_flags)(struct net_device *dev,
995 int flags); 997 int flags);
996 void (*ndo_set_rx_mode)(struct net_device *dev); 998 void (*ndo_set_rx_mode)(struct net_device *dev);
@@ -1255,7 +1257,7 @@ struct net_device {
1255 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ 1257 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
1256 unsigned char addr_assign_type; /* hw address assignment type */ 1258 unsigned char addr_assign_type; /* hw address assignment type */
1257 unsigned char addr_len; /* hardware address length */ 1259 unsigned char addr_len; /* hardware address length */
1258 unsigned char neigh_priv_len; 1260 unsigned short neigh_priv_len;
1259 unsigned short dev_id; /* Used to differentiate devices 1261 unsigned short dev_id; /* Used to differentiate devices
1260 * that share the same link 1262 * that share the same link
1261 * layer address 1263 * layer address
@@ -1529,7 +1531,8 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
1529} 1531}
1530 1532
1531struct netdev_queue *netdev_pick_tx(struct net_device *dev, 1533struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1532 struct sk_buff *skb); 1534 struct sk_buff *skb,
1535 void *accel_priv);
1533u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); 1536u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
1534 1537
1535/* 1538/*
@@ -1819,6 +1822,7 @@ int dev_close(struct net_device *dev);
1819void dev_disable_lro(struct net_device *dev); 1822void dev_disable_lro(struct net_device *dev);
1820int dev_loopback_xmit(struct sk_buff *newskb); 1823int dev_loopback_xmit(struct sk_buff *newskb);
1821int dev_queue_xmit(struct sk_buff *skb); 1824int dev_queue_xmit(struct sk_buff *skb);
1825int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
1822int register_netdevice(struct net_device *dev); 1826int register_netdevice(struct net_device *dev);
1823void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); 1827void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
1824void unregister_netdevice_many(struct list_head *head); 1828void unregister_netdevice_many(struct list_head *head);
@@ -1912,6 +1916,15 @@ static inline int dev_parse_header(const struct sk_buff *skb,
1912 return dev->header_ops->parse(skb, haddr); 1916 return dev->header_ops->parse(skb, haddr);
1913} 1917}
1914 1918
1919static inline int dev_rebuild_header(struct sk_buff *skb)
1920{
1921 const struct net_device *dev = skb->dev;
1922
1923 if (!dev->header_ops || !dev->header_ops->rebuild)
1924 return 0;
1925 return dev->header_ops->rebuild(skb);
1926}
1927
1915typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); 1928typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1916int register_gifconf(unsigned int family, gifconf_func_t *gifconf); 1929int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
1917static inline int unregister_gifconf(unsigned int family) 1930static inline int unregister_gifconf(unsigned int family)
@@ -2417,7 +2430,7 @@ int dev_change_carrier(struct net_device *, bool new_carrier);
2417int dev_get_phys_port_id(struct net_device *dev, 2430int dev_get_phys_port_id(struct net_device *dev,
2418 struct netdev_phys_port_id *ppid); 2431 struct netdev_phys_port_id *ppid);
2419int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2432int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2420 struct netdev_queue *txq, void *accel_priv); 2433 struct netdev_queue *txq);
2421int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 2434int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
2422 2435
2423extern int netdev_budget; 2436extern int netdev_budget;
@@ -3008,6 +3021,19 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
3008 dev->gso_max_size = size; 3021 dev->gso_max_size = size;
3009} 3022}
3010 3023
3024static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
3025 int pulled_hlen, u16 mac_offset,
3026 int mac_len)
3027{
3028 skb->protocol = protocol;
3029 skb->encapsulation = 1;
3030 skb_push(skb, pulled_hlen);
3031 skb_reset_transport_header(skb);
3032 skb->mac_header = mac_offset;
3033 skb->network_header = skb->mac_header + mac_len;
3034 skb->mac_len = mac_len;
3035}
3036
3011static inline bool netif_is_macvlan(struct net_device *dev) 3037static inline bool netif_is_macvlan(struct net_device *dev)
3012{ 3038{
3013 return dev->priv_flags & IFF_MACVLAN; 3039 return dev->priv_flags & IFF_MACVLAN;
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index c1637062c1ce..12c2cb947df5 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -413,16 +413,6 @@ enum lock_type4 {
413#define NFS4_VERSION 4 413#define NFS4_VERSION 4
414#define NFS4_MINOR_VERSION 0 414#define NFS4_MINOR_VERSION 0
415 415
416#if defined(CONFIG_NFS_V4_2)
417#define NFS4_MAX_MINOR_VERSION 2
418#else
419#if defined(CONFIG_NFS_V4_1)
420#define NFS4_MAX_MINOR_VERSION 1
421#else
422#define NFS4_MAX_MINOR_VERSION 0
423#endif /* CONFIG_NFS_V4_1 */
424#endif /* CONFIG_NFS_V4_2 */
425
426#define NFS4_DEBUG 1 416#define NFS4_DEBUG 1
427 417
428/* Index of predefined Linux client operations */ 418/* Index of predefined Linux client operations */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 14a48207a304..48997374eaf0 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -507,24 +507,6 @@ extern int nfs_mountpoint_expiry_timeout;
507extern void nfs_release_automount_timer(void); 507extern void nfs_release_automount_timer(void);
508 508
509/* 509/*
510 * linux/fs/nfs/nfs4proc.c
511 */
512#ifdef CONFIG_NFS_V4_SECURITY_LABEL
513extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
514static inline void nfs4_label_free(struct nfs4_label *label)
515{
516 if (label) {
517 kfree(label->label);
518 kfree(label);
519 }
520 return;
521}
522#else
523static inline struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags) { return NULL; }
524static inline void nfs4_label_free(void *label) {}
525#endif
526
527/*
528 * linux/fs/nfs/unlink.c 510 * linux/fs/nfs/unlink.c
529 */ 511 */
530extern void nfs_complete_unlink(struct dentry *dentry, struct inode *); 512extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 86292beebfe2..438694650471 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -129,10 +129,9 @@ struct parallel_data {
129 struct padata_serial_queue __percpu *squeue; 129 struct padata_serial_queue __percpu *squeue;
130 atomic_t reorder_objects; 130 atomic_t reorder_objects;
131 atomic_t refcnt; 131 atomic_t refcnt;
132 atomic_t seq_nr;
132 struct padata_cpumask cpumask; 133 struct padata_cpumask cpumask;
133 spinlock_t lock ____cacheline_aligned; 134 spinlock_t lock ____cacheline_aligned;
134 spinlock_t seq_lock;
135 unsigned int seq_nr;
136 unsigned int processed; 135 unsigned int processed;
137 struct timer_list timer; 136 struct timer_list timer;
138}; 137};
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 1084a15175e0..a13d6825e586 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -960,6 +960,7 @@ void pci_update_resource(struct pci_dev *dev, int resno);
960int __must_check pci_assign_resource(struct pci_dev *dev, int i); 960int __must_check pci_assign_resource(struct pci_dev *dev, int i);
961int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align); 961int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
962int pci_select_bars(struct pci_dev *dev, unsigned long flags); 962int pci_select_bars(struct pci_dev *dev, unsigned long flags);
963bool pci_device_is_present(struct pci_dev *pdev);
963 964
964/* ROM control related routines */ 965/* ROM control related routines */
965int pci_enable_rom(struct pci_dev *pdev); 966int pci_enable_rom(struct pci_dev *pdev);
@@ -1567,65 +1568,65 @@ enum pci_fixup_pass {
1567/* Anonymous variables would be nice... */ 1568/* Anonymous variables would be nice... */
1568#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \ 1569#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
1569 class_shift, hook) \ 1570 class_shift, hook) \
1570 static const struct pci_fixup __pci_fixup_##name __used \ 1571 static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
1571 __attribute__((__section__(#section), aligned((sizeof(void *))))) \ 1572 __attribute__((__section__(#section), aligned((sizeof(void *))))) \
1572 = { vendor, device, class, class_shift, hook }; 1573 = { vendor, device, class, class_shift, hook };
1573 1574
1574#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \ 1575#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
1575 class_shift, hook) \ 1576 class_shift, hook) \
1576 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 1577 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
1577 vendor##device##hook, vendor, device, class, class_shift, hook) 1578 hook, vendor, device, class, class_shift, hook)
1578#define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \ 1579#define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \
1579 class_shift, hook) \ 1580 class_shift, hook) \
1580 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ 1581 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
1581 vendor##device##hook, vendor, device, class, class_shift, hook) 1582 hook, vendor, device, class, class_shift, hook)
1582#define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \ 1583#define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \
1583 class_shift, hook) \ 1584 class_shift, hook) \
1584 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ 1585 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
1585 vendor##device##hook, vendor, device, class, class_shift, hook) 1586 hook, vendor, device, class, class_shift, hook)
1586#define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \ 1587#define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \
1587 class_shift, hook) \ 1588 class_shift, hook) \
1588 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ 1589 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
1589 vendor##device##hook, vendor, device, class, class_shift, hook) 1590 hook, vendor, device, class, class_shift, hook)
1590#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ 1591#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
1591 class_shift, hook) \ 1592 class_shift, hook) \
1592 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 1593 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1593 resume##vendor##device##hook, vendor, device, class, \ 1594 resume##hook, vendor, device, class, \
1594 class_shift, hook) 1595 class_shift, hook)
1595#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ 1596#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
1596 class_shift, hook) \ 1597 class_shift, hook) \
1597 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 1598 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1598 resume_early##vendor##device##hook, vendor, device, \ 1599 resume_early##hook, vendor, device, \
1599 class, class_shift, hook) 1600 class, class_shift, hook)
1600#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ 1601#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
1601 class_shift, hook) \ 1602 class_shift, hook) \
1602 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 1603 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1603 suspend##vendor##device##hook, vendor, device, class, \ 1604 suspend##hook, vendor, device, class, \
1604 class_shift, hook) 1605 class_shift, hook)
1605 1606
1606#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ 1607#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
1607 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 1608 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
1608 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1609 hook, vendor, device, PCI_ANY_ID, 0, hook)
1609#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ 1610#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
1610 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ 1611 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
1611 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1612 hook, vendor, device, PCI_ANY_ID, 0, hook)
1612#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ 1613#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
1613 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ 1614 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
1614 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1615 hook, vendor, device, PCI_ANY_ID, 0, hook)
1615#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ 1616#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
1616 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ 1617 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
1617 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1618 hook, vendor, device, PCI_ANY_ID, 0, hook)
1618#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ 1619#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
1619 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 1620 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1620 resume##vendor##device##hook, vendor, device, \ 1621 resume##hook, vendor, device, \
1621 PCI_ANY_ID, 0, hook) 1622 PCI_ANY_ID, 0, hook)
1622#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ 1623#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
1623 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 1624 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1624 resume_early##vendor##device##hook, vendor, device, \ 1625 resume_early##hook, vendor, device, \
1625 PCI_ANY_ID, 0, hook) 1626 PCI_ANY_ID, 0, hook)
1626#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ 1627#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
1627 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 1628 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1628 suspend##vendor##device##hook, vendor, device, \ 1629 suspend##hook, vendor, device, \
1629 PCI_ANY_ID, 0, hook) 1630 PCI_ANY_ID, 0, hook)
1630 1631
1631#ifdef CONFIG_PCI_QUIRKS 1632#ifdef CONFIG_PCI_QUIRKS
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 57e890abe1f0..a5fc7d01aad6 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -69,6 +69,7 @@
69 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ 69 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
70 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ 70 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
71 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ 71 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
72 extern __PCPU_ATTRS(sec) __typeof__(type) name; \
72 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ 73 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
73 __typeof__(type) name 74 __typeof__(type) name
74#else 75#else
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index abd437d0a8a7..ece0c6bbfcc5 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -51,6 +51,7 @@ struct pstore_info {
51 char *buf; 51 char *buf;
52 size_t bufsize; 52 size_t bufsize;
53 struct mutex read_mutex; /* serialize open/read/close */ 53 struct mutex read_mutex; /* serialize open/read/close */
54 int flags;
54 int (*open)(struct pstore_info *psi); 55 int (*open)(struct pstore_info *psi);
55 int (*close)(struct pstore_info *psi); 56 int (*close)(struct pstore_info *psi);
56 ssize_t (*read)(u64 *id, enum pstore_type_id *type, 57 ssize_t (*read)(u64 *id, enum pstore_type_id *type,
@@ -70,6 +71,8 @@ struct pstore_info {
70 void *data; 71 void *data;
71}; 72};
72 73
74#define PSTORE_FLAGS_FRAGILE 1
75
73#ifdef CONFIG_PSTORE 76#ifdef CONFIG_PSTORE
74extern int pstore_register(struct pstore_info *); 77extern int pstore_register(struct pstore_info *);
75extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); 78extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 8e00f9f6f963..9e7db9e73cc1 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -43,6 +43,7 @@ extern int unregister_reboot_notifier(struct notifier_block *);
43 * Architecture-specific implementations of sys_reboot commands. 43 * Architecture-specific implementations of sys_reboot commands.
44 */ 44 */
45 45
46extern void migrate_to_reboot_cpu(void);
46extern void machine_restart(char *cmd); 47extern void machine_restart(char *cmd);
47extern void machine_halt(void); 48extern void machine_halt(void);
48extern void machine_power_off(void); 49extern void machine_power_off(void);
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 939428ad25ac..8e3e66ac0a52 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -24,6 +24,11 @@ extern int rtnl_trylock(void);
24extern int rtnl_is_locked(void); 24extern int rtnl_is_locked(void);
25#ifdef CONFIG_PROVE_LOCKING 25#ifdef CONFIG_PROVE_LOCKING
26extern int lockdep_rtnl_is_held(void); 26extern int lockdep_rtnl_is_held(void);
27#else
28static inline int lockdep_rtnl_is_held(void)
29{
30 return 1;
31}
27#endif /* #ifdef CONFIG_PROVE_LOCKING */ 32#endif /* #ifdef CONFIG_PROVE_LOCKING */
28 33
29/** 34/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7e35d4b9e14a..53f97eb8dbc7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -440,8 +440,6 @@ struct task_cputime {
440 .sum_exec_runtime = 0, \ 440 .sum_exec_runtime = 0, \
441 } 441 }
442 442
443#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
444
445#ifdef CONFIG_PREEMPT_COUNT 443#ifdef CONFIG_PREEMPT_COUNT
446#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) 444#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
447#else 445#else
@@ -831,8 +829,6 @@ struct sched_domain {
831 unsigned int balance_interval; /* initialise to 1. units in ms. */ 829 unsigned int balance_interval; /* initialise to 1. units in ms. */
832 unsigned int nr_balance_failed; /* initialise to 0 */ 830 unsigned int nr_balance_failed; /* initialise to 0 */
833 831
834 u64 last_update;
835
836 /* idle_balance() stats */ 832 /* idle_balance() stats */
837 u64 max_newidle_lb_cost; 833 u64 max_newidle_lb_cost;
838 unsigned long next_decay_max_lb_cost; 834 unsigned long next_decay_max_lb_cost;
@@ -934,7 +930,8 @@ struct pipe_inode_info;
934struct uts_namespace; 930struct uts_namespace;
935 931
936struct load_weight { 932struct load_weight {
937 unsigned long weight, inv_weight; 933 unsigned long weight;
934 u32 inv_weight;
938}; 935};
939 936
940struct sched_avg { 937struct sched_avg {
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 30aa0dc60d75..9d55438bc4ad 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -47,6 +47,8 @@ extern int shmem_init(void);
47extern int shmem_fill_super(struct super_block *sb, void *data, int silent); 47extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
48extern struct file *shmem_file_setup(const char *name, 48extern struct file *shmem_file_setup(const char *name,
49 loff_t size, unsigned long flags); 49 loff_t size, unsigned long flags);
50extern struct file *shmem_kernel_file_setup(const char *name, loff_t size,
51 unsigned long flags);
50extern int shmem_zero_setup(struct vm_area_struct *); 52extern int shmem_zero_setup(struct vm_area_struct *);
51extern int shmem_lock(struct file *file, int lock, struct user_struct *user); 53extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
52extern void shmem_unlock_mapping(struct address_space *mapping); 54extern void shmem_unlock_mapping(struct address_space *mapping);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index bec1cc7d5e3c..6f69b3f914fb 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1638,6 +1638,11 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1638 skb->mac_header += offset; 1638 skb->mac_header += offset;
1639} 1639}
1640 1640
1641static inline void skb_pop_mac_header(struct sk_buff *skb)
1642{
1643 skb->mac_header = skb->network_header;
1644}
1645
1641static inline void skb_probe_transport_header(struct sk_buff *skb, 1646static inline void skb_probe_transport_header(struct sk_buff *skb,
1642 const int offset_hint) 1647 const int offset_hint)
1643{ 1648{
@@ -2263,6 +2268,24 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
2263 2268
2264unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 2269unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2265 2270
2271/**
2272 * pskb_trim_rcsum - trim received skb and update checksum
2273 * @skb: buffer to trim
2274 * @len: new length
2275 *
2276 * This is exactly the same as pskb_trim except that it ensures the
2277 * checksum of received packets are still valid after the operation.
2278 */
2279
2280static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2281{
2282 if (likely(len >= skb->len))
2283 return 0;
2284 if (skb->ip_summed == CHECKSUM_COMPLETE)
2285 skb->ip_summed = CHECKSUM_NONE;
2286 return __pskb_trim(skb, len);
2287}
2288
2266#define skb_queue_walk(queue, skb) \ 2289#define skb_queue_walk(queue, skb) \
2267 for (skb = (queue)->next; \ 2290 for (skb = (queue)->next; \
2268 skb != (struct sk_buff *)(queue); \ 2291 skb != (struct sk_buff *)(queue); \
@@ -2360,27 +2383,6 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2360__wsum skb_checksum(const struct sk_buff *skb, int offset, int len, 2383__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
2361 __wsum csum); 2384 __wsum csum);
2362 2385
2363/**
2364 * pskb_trim_rcsum - trim received skb and update checksum
2365 * @skb: buffer to trim
2366 * @len: new length
2367 *
2368 * This is exactly the same as pskb_trim except that it ensures the
2369 * checksum of received packets are still valid after the operation.
2370 */
2371
2372static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2373{
2374 if (likely(len >= skb->len))
2375 return 0;
2376 if (skb->ip_summed == CHECKSUM_COMPLETE) {
2377 __wsum adj = skb_checksum(skb, len, skb->len - len, 0);
2378
2379 skb->csum = csum_sub(skb->csum, adj);
2380 }
2381 return __pskb_trim(skb, len);
2382}
2383
2384static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 2386static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2385 int len, void *buffer) 2387 int len, void *buffer)
2386{ 2388{
@@ -2529,6 +2531,10 @@ static inline void sw_tx_timestamp(struct sk_buff *skb)
2529 * Ethernet MAC Drivers should call this function in their hard_xmit() 2531 * Ethernet MAC Drivers should call this function in their hard_xmit()
2530 * function immediately before giving the sk_buff to the MAC hardware. 2532 * function immediately before giving the sk_buff to the MAC hardware.
2531 * 2533 *
2534 * Specifically, one should make absolutely sure that this function is
2535 * called before TX completion of this packet can trigger. Otherwise
2536 * the packet could potentially already be freed.
2537 *
2532 * @skb: A socket buffer. 2538 * @skb: A socket buffer.
2533 */ 2539 */
2534static inline void skb_tx_timestamp(struct sk_buff *skb) 2540static inline void skb_tx_timestamp(struct sk_buff *skb)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index c2bba248fa63..1e2f4fe12773 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -388,10 +388,55 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
388/** 388/**
389 * kmalloc - allocate memory 389 * kmalloc - allocate memory
390 * @size: how many bytes of memory are required. 390 * @size: how many bytes of memory are required.
391 * @flags: the type of memory to allocate (see kcalloc). 391 * @flags: the type of memory to allocate.
392 * 392 *
393 * kmalloc is the normal method of allocating memory 393 * kmalloc is the normal method of allocating memory
394 * for objects smaller than page size in the kernel. 394 * for objects smaller than page size in the kernel.
395 *
396 * The @flags argument may be one of:
397 *
398 * %GFP_USER - Allocate memory on behalf of user. May sleep.
399 *
400 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
401 *
402 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
403 * For example, use this inside interrupt handlers.
404 *
405 * %GFP_HIGHUSER - Allocate pages from high memory.
406 *
407 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
408 *
409 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
410 *
411 * %GFP_NOWAIT - Allocation will not sleep.
412 *
413 * %GFP_THISNODE - Allocate node-local memory only.
414 *
415 * %GFP_DMA - Allocation suitable for DMA.
416 * Should only be used for kmalloc() caches. Otherwise, use a
417 * slab created with SLAB_DMA.
418 *
419 * Also it is possible to set different flags by OR'ing
420 * in one or more of the following additional @flags:
421 *
422 * %__GFP_COLD - Request cache-cold pages instead of
423 * trying to return cache-warm pages.
424 *
425 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
426 *
427 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
428 * (think twice before using).
429 *
430 * %__GFP_NORETRY - If memory is not immediately available,
431 * then give up at once.
432 *
433 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
434 *
435 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
436 *
437 * There are other flags available as well, but these are not intended
438 * for general use, and so are not documented here. For a full list of
439 * potential flags, always refer to linux/gfp.h.
395 */ 440 */
396static __always_inline void *kmalloc(size_t size, gfp_t flags) 441static __always_inline void *kmalloc(size_t size, gfp_t flags)
397{ 442{
@@ -502,61 +547,6 @@ int cache_show(struct kmem_cache *s, struct seq_file *m);
502void print_slabinfo_header(struct seq_file *m); 547void print_slabinfo_header(struct seq_file *m);
503 548
504/** 549/**
505 * kmalloc - allocate memory
506 * @size: how many bytes of memory are required.
507 * @flags: the type of memory to allocate.
508 *
509 * The @flags argument may be one of:
510 *
511 * %GFP_USER - Allocate memory on behalf of user. May sleep.
512 *
513 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
514 *
515 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
516 * For example, use this inside interrupt handlers.
517 *
518 * %GFP_HIGHUSER - Allocate pages from high memory.
519 *
520 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
521 *
522 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
523 *
524 * %GFP_NOWAIT - Allocation will not sleep.
525 *
526 * %GFP_THISNODE - Allocate node-local memory only.
527 *
528 * %GFP_DMA - Allocation suitable for DMA.
529 * Should only be used for kmalloc() caches. Otherwise, use a
530 * slab created with SLAB_DMA.
531 *
532 * Also it is possible to set different flags by OR'ing
533 * in one or more of the following additional @flags:
534 *
535 * %__GFP_COLD - Request cache-cold pages instead of
536 * trying to return cache-warm pages.
537 *
538 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
539 *
540 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
541 * (think twice before using).
542 *
543 * %__GFP_NORETRY - If memory is not immediately available,
544 * then give up at once.
545 *
546 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
547 *
548 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
549 *
550 * There are other flags available as well, but these are not intended
551 * for general use, and so are not documented here. For a full list of
552 * potential flags, always refer to linux/gfp.h.
553 *
554 * kmalloc is the normal method of allocating memory
555 * in the kernel.
556 */
557static __always_inline void *kmalloc(size_t size, gfp_t flags);
558
559/**
560 * kmalloc_array - allocate memory for an array. 550 * kmalloc_array - allocate memory for an array.
561 * @n: number of elements. 551 * @n: number of elements.
562 * @size: element size. 552 * @size: element size.
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 21a7251d85ee..a1d4ca290862 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -277,15 +277,17 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
277 * @unprepare_transfer_hardware: there are currently no more messages on the 277 * @unprepare_transfer_hardware: there are currently no more messages on the
278 * queue so the subsystem notifies the driver that it may relax the 278 * queue so the subsystem notifies the driver that it may relax the
279 * hardware by issuing this call 279 * hardware by issuing this call
280 * @set_cs: assert or deassert chip select, true to assert. May be called 280 * @set_cs: set the logic level of the chip select line. May be called
281 * from interrupt context. 281 * from interrupt context.
282 * @prepare_message: set up the controller to transfer a single message, 282 * @prepare_message: set up the controller to transfer a single message,
283 * for example doing DMA mapping. Called from threaded 283 * for example doing DMA mapping. Called from threaded
284 * context. 284 * context.
285 * @transfer_one: transfer a single spi_transfer. When the 285 * @transfer_one: transfer a single spi_transfer.
286 * driver is finished with this transfer it must call 286 * - return 0 if the transfer is finished,
287 * spi_finalize_current_transfer() so the subsystem can issue 287 * - return 1 if the transfer is still in progress. When
288 * the next transfer 288 * the driver is finished with this transfer it must
289 * call spi_finalize_current_transfer() so the subsystem
290 * can issue the next transfer
289 * @unprepare_message: undo any work done by prepare_message(). 291 * @unprepare_message: undo any work done by prepare_message().
290 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS 292 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
291 * number. Any individual value may be -ENOENT for CS lines that 293 * number. Any individual value may be -ENOENT for CS lines that
diff --git a/include/linux/tegra-powergate.h b/include/linux/tegra-powergate.h
index c98cfa406952..fd4498329c7c 100644
--- a/include/linux/tegra-powergate.h
+++ b/include/linux/tegra-powergate.h
@@ -45,6 +45,7 @@ struct clk;
45 45
46#define TEGRA_POWERGATE_3D0 TEGRA_POWERGATE_3D 46#define TEGRA_POWERGATE_3D0 TEGRA_POWERGATE_3D
47 47
48#ifdef CONFIG_ARCH_TEGRA
48int tegra_powergate_is_powered(int id); 49int tegra_powergate_is_powered(int id);
49int tegra_powergate_power_on(int id); 50int tegra_powergate_power_on(int id);
50int tegra_powergate_power_off(int id); 51int tegra_powergate_power_off(int id);
@@ -52,5 +53,31 @@ int tegra_powergate_remove_clamping(int id);
52 53
53/* Must be called with clk disabled, and returns with clk enabled */ 54/* Must be called with clk disabled, and returns with clk enabled */
54int tegra_powergate_sequence_power_up(int id, struct clk *clk); 55int tegra_powergate_sequence_power_up(int id, struct clk *clk);
56#else
57static inline int tegra_powergate_is_powered(int id)
58{
59 return -ENOSYS;
60}
61
62static inline int tegra_powergate_power_on(int id)
63{
64 return -ENOSYS;
65}
66
67static inline int tegra_powergate_power_off(int id)
68{
69 return -ENOSYS;
70}
71
72static inline int tegra_powergate_remove_clamping(int id)
73{
74 return -ENOSYS;
75}
76
77static inline int tegra_powergate_sequence_power_up(int id, struct clk *clk)
78{
79 return -ENOSYS;
80}
81#endif
55 82
56#endif /* _MACH_TEGRA_POWERGATE_H_ */ 83#endif /* _MACH_TEGRA_POWERGATE_H_ */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index ebeab360d851..f16dc0a40049 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -267,6 +267,8 @@ static inline void tracepoint_synchronize_unregister(void)
267 267
268#define TRACE_EVENT_FLAGS(event, flag) 268#define TRACE_EVENT_FLAGS(event, flag)
269 269
270#define TRACE_EVENT_PERF_PERM(event, expr...)
271
270#endif /* DECLARE_TRACE */ 272#endif /* DECLARE_TRACE */
271 273
272#ifndef TRACE_EVENT 274#ifndef TRACE_EVENT
@@ -399,4 +401,6 @@ static inline void tracepoint_synchronize_unregister(void)
399 401
400#define TRACE_EVENT_FLAGS(event, flag) 402#define TRACE_EVENT_FLAGS(event, flag)
401 403
404#define TRACE_EVENT_PERF_PERM(event, expr...)
405
402#endif /* ifdef TRACE_EVENT (see note above) */ 406#endif /* ifdef TRACE_EVENT (see note above) */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 7454865ad148..512ab162832c 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1264,6 +1264,8 @@ typedef void (*usb_complete_t)(struct urb *);
1264 * @sg: scatter gather buffer list, the buffer size of each element in 1264 * @sg: scatter gather buffer list, the buffer size of each element in
1265 * the list (except the last) must be divisible by the endpoint's 1265 * the list (except the last) must be divisible by the endpoint's
1266 * max packet size if no_sg_constraint isn't set in 'struct usb_bus' 1266 * max packet size if no_sg_constraint isn't set in 'struct usb_bus'
1267 * (FIXME: scatter-gather under xHCI is broken for periodic transfers.
1268 * Do not use urb->sg for interrupt endpoints for now, only bulk.)
1267 * @num_mapped_sgs: (internal) number of mapped sg entries 1269 * @num_mapped_sgs: (internal) number of mapped sg entries
1268 * @num_sgs: number of entries in the sg list 1270 * @num_sgs: number of entries in the sg list
1269 * @transfer_buffer_length: How big is transfer_buffer. The transfer may 1271 * @transfer_buffer_length: How big is transfer_buffer. The transfer may
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h
index 0c4d4ca370ec..eeb28329fa3c 100644
--- a/include/linux/usb/wusb.h
+++ b/include/linux/usb/wusb.h
@@ -271,6 +271,8 @@ static inline u8 wusb_key_index(int index, int type, int originator)
271#define WUSB_KEY_INDEX_TYPE_GTK 2 271#define WUSB_KEY_INDEX_TYPE_GTK 2
272#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0 272#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0
273#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1 273#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1
274/* bits 0-3 used for the key index. */
275#define WUSB_KEY_INDEX_MAX 15
274 276
275/* A CCM Nonce, defined in WUSB1.0[6.4.1] */ 277/* A CCM Nonce, defined in WUSB1.0[6.4.1] */
276struct aes_ccm_nonce { 278struct aes_ccm_nonce {
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index bd8218b15009..941055e9d125 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -83,7 +83,7 @@ struct vb2_fileio_data;
83struct vb2_mem_ops { 83struct vb2_mem_ops {
84 void *(*alloc)(void *alloc_ctx, unsigned long size, gfp_t gfp_flags); 84 void *(*alloc)(void *alloc_ctx, unsigned long size, gfp_t gfp_flags);
85 void (*put)(void *buf_priv); 85 void (*put)(void *buf_priv);
86 struct dma_buf *(*get_dmabuf)(void *buf_priv); 86 struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags);
87 87
88 void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr, 88 void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr,
89 unsigned long size, int write); 89 unsigned long size, int write);
diff --git a/include/net/ip.h b/include/net/ip.h
index 217bc5bfc6c6..5a25f36fe3a7 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -473,7 +473,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
473int ip_ra_control(struct sock *sk, unsigned char on, 473int ip_ra_control(struct sock *sk, unsigned char on,
474 void (*destructor)(struct sock *)); 474 void (*destructor)(struct sock *));
475 475
476int ip_recv_error(struct sock *sk, struct msghdr *msg, int len); 476int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
477void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, 477void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
478 u32 info, u8 *payload); 478 u32 info, u8 *payload);
479void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, 479void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 2a5f668cd683..488316e339a1 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -110,7 +110,8 @@ struct frag_hdr {
110 __be32 identification; 110 __be32 identification;
111}; 111};
112 112
113#define IP6_MF 0x0001 113#define IP6_MF 0x0001
114#define IP6_OFFSET 0xFFF8
114 115
115#include <net/sock.h> 116#include <net/sock.h>
116 117
@@ -776,8 +777,10 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
776 777
777int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); 778int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
778 779
779int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len); 780int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
780int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len); 781 int *addr_len);
782int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
783 int *addr_len);
781void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, 784void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
782 u32 info, u8 *payload); 785 u32 info, u8 *payload);
783void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info); 786void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index 31e2de7d57c5..c0f0a13ed818 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -142,7 +142,7 @@
142#define LLC_S_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 1 : 0) 142#define LLC_S_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 1 : 0)
143 143
144#define PDU_SUPV_GET_Nr(pdu) ((pdu->ctrl_2 & 0xFE) >> 1) 144#define PDU_SUPV_GET_Nr(pdu) ((pdu->ctrl_2 & 0xFE) >> 1)
145#define PDU_GET_NEXT_Vr(sn) (++sn & ~LLC_2_SEQ_NBR_MODULO) 145#define PDU_GET_NEXT_Vr(sn) (((sn) + 1) & ~LLC_2_SEQ_NBR_MODULO)
146 146
147/* FRMR information field macros */ 147/* FRMR information field macros */
148 148
diff --git a/include/net/ping.h b/include/net/ping.h
index 3f67704f3747..90f48417b03d 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -31,7 +31,8 @@
31 31
32/* Compatibility glue so we can support IPv6 when it's compiled as a module */ 32/* Compatibility glue so we can support IPv6 when it's compiled as a module */
33struct pingv6_ops { 33struct pingv6_ops {
34 int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len); 34 int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len,
35 int *addr_len);
35 int (*ip6_datagram_recv_ctl)(struct sock *sk, struct msghdr *msg, 36 int (*ip6_datagram_recv_ctl)(struct sock *sk, struct msghdr *msg,
36 struct sk_buff *skb); 37 struct sk_buff *skb);
37 int (*icmpv6_err_convert)(u8 type, u8 code, int *err); 38 int (*icmpv6_err_convert)(u8 type, u8 code, int *err);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 2174d8da0770..0a248b323d87 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -629,6 +629,7 @@ struct sctp_chunk {
629#define SCTP_NEED_FRTX 0x1 629#define SCTP_NEED_FRTX 0x1
630#define SCTP_DONT_FRTX 0x2 630#define SCTP_DONT_FRTX 0x2
631 __u16 rtt_in_progress:1, /* This chunk used for RTT calc? */ 631 __u16 rtt_in_progress:1, /* This chunk used for RTT calc? */
632 resent:1, /* Has this chunk ever been resent. */
632 has_tsn:1, /* Does this chunk have a TSN yet? */ 633 has_tsn:1, /* Does this chunk have a TSN yet? */
633 has_ssn:1, /* Does this chunk have a SSN yet? */ 634 has_ssn:1, /* Does this chunk have a SSN yet? */
634 singleton:1, /* Only chunk in the packet? */ 635 singleton:1, /* Only chunk in the packet? */
@@ -1045,9 +1046,6 @@ struct sctp_outq {
1045 1046
1046 /* Corked? */ 1047 /* Corked? */
1047 char cork; 1048 char cork;
1048
1049 /* Is this structure empty? */
1050 char empty;
1051}; 1049};
1052 1050
1053void sctp_outq_init(struct sctp_association *, struct sctp_outq *); 1051void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
@@ -1725,12 +1723,6 @@ struct sctp_association {
1725 /* How many duplicated TSNs have we seen? */ 1723 /* How many duplicated TSNs have we seen? */
1726 int numduptsns; 1724 int numduptsns;
1727 1725
1728 /* Number of seconds of idle time before an association is closed.
1729 * In the association context, this is really used as a boolean
1730 * since the real timeout is stored in the timeouts array
1731 */
1732 __u32 autoclose;
1733
1734 /* These are to support 1726 /* These are to support
1735 * "SCTP Extensions for Dynamic Reconfiguration of IP Addresses 1727 * "SCTP Extensions for Dynamic Reconfiguration of IP Addresses
1736 * and Enforcement of Flow and Message Limits" 1728 * and Enforcement of Flow and Message Limits"
diff --git a/include/net/sock.h b/include/net/sock.h
index e3a18ff0c38b..2ef3c3eca47a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1035,7 +1035,6 @@ enum cg_proto_flags {
1035}; 1035};
1036 1036
1037struct cg_proto { 1037struct cg_proto {
1038 void (*enter_memory_pressure)(struct sock *sk);
1039 struct res_counter memory_allocated; /* Current allocated memory. */ 1038 struct res_counter memory_allocated; /* Current allocated memory. */
1040 struct percpu_counter sockets_allocated; /* Current number of sockets. */ 1039 struct percpu_counter sockets_allocated; /* Current number of sockets. */
1041 int memory_pressure; 1040 int memory_pressure;
@@ -1155,8 +1154,7 @@ static inline void sk_leave_memory_pressure(struct sock *sk)
1155 struct proto *prot = sk->sk_prot; 1154 struct proto *prot = sk->sk_prot;
1156 1155
1157 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) 1156 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
1158 if (cg_proto->memory_pressure) 1157 cg_proto->memory_pressure = 0;
1159 cg_proto->memory_pressure = 0;
1160 } 1158 }
1161 1159
1162} 1160}
@@ -1171,7 +1169,7 @@ static inline void sk_enter_memory_pressure(struct sock *sk)
1171 struct proto *prot = sk->sk_prot; 1169 struct proto *prot = sk->sk_prot;
1172 1170
1173 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) 1171 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
1174 cg_proto->enter_memory_pressure(sk); 1172 cg_proto->memory_pressure = 1;
1175 } 1173 }
1176 1174
1177 sk->sk_prot->enter_memory_pressure(sk); 1175 sk->sk_prot->enter_memory_pressure(sk);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 979874c627ee..61e1935c91b1 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -978,7 +978,7 @@ struct ib_uobject {
978}; 978};
979 979
980struct ib_udata { 980struct ib_udata {
981 void __user *inbuf; 981 const void __user *inbuf;
982 void __user *outbuf; 982 void __user *outbuf;
983 size_t inlen; 983 size_t inlen;
984 size_t outlen; 984 size_t outlen;
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 546084964d55..fe3b58e836c8 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -475,6 +475,9 @@ struct scsi_host_template {
475 */ 475 */
476 unsigned ordered_tag:1; 476 unsigned ordered_tag:1;
477 477
478 /* True if the controller does not support WRITE SAME */
479 unsigned no_write_same:1;
480
478 /* 481 /*
479 * Countdown for host blocking with no commands outstanding. 482 * Countdown for host blocking with no commands outstanding.
480 */ 483 */
@@ -677,6 +680,9 @@ struct Scsi_Host {
677 /* Don't resume host in EH */ 680 /* Don't resume host in EH */
678 unsigned eh_noresume:1; 681 unsigned eh_noresume:1;
679 682
683 /* The controller does not support WRITE SAME */
684 unsigned no_write_same:1;
685
680 /* 686 /*
681 * Optional work queue to be utilized by the transport 687 * Optional work queue to be utilized by the transport
682 */ 688 */
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index af9983970417..5f73785f5977 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -108,7 +108,7 @@ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
108{ 108{
109 struct snd_sg_buf *sgbuf = dmab->private_data; 109 struct snd_sg_buf *sgbuf = dmab->private_data;
110 dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr; 110 dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
111 addr &= PAGE_MASK; 111 addr &= ~((dma_addr_t)PAGE_SIZE - 1);
112 return addr + offset % PAGE_SIZE; 112 return addr + offset % PAGE_SIZE;
113} 113}
114 114
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 2037c45adfe6..56ebdfca6273 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -104,7 +104,8 @@ struct device;
104 SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \ 104 SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
105 .kcontrol_news = wcontrols, .num_kcontrols = 1} 105 .kcontrol_news = wcontrols, .num_kcontrols = 1}
106#define SND_SOC_DAPM_MUX(wname, wreg, wshift, winvert, wcontrols) \ 106#define SND_SOC_DAPM_MUX(wname, wreg, wshift, winvert, wcontrols) \
107{ .id = snd_soc_dapm_mux, .name = wname, .reg = wreg, \ 107{ .id = snd_soc_dapm_mux, .name = wname, \
108 SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
108 .kcontrol_news = wcontrols, .num_kcontrols = 1} 109 .kcontrol_news = wcontrols, .num_kcontrols = 1}
109#define SND_SOC_DAPM_VIRT_MUX(wname, wreg, wshift, winvert, wcontrols) \ 110#define SND_SOC_DAPM_VIRT_MUX(wname, wreg, wshift, winvert, wcontrols) \
110{ .id = snd_soc_dapm_virt_mux, .name = wname, \ 111{ .id = snd_soc_dapm_virt_mux, .name = wname, \
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 45412a6afa69..321301c0a643 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -517,10 +517,6 @@ struct se_node_acl {
517 u32 acl_index; 517 u32 acl_index;
518#define MAX_ACL_TAG_SIZE 64 518#define MAX_ACL_TAG_SIZE 64
519 char acl_tag[MAX_ACL_TAG_SIZE]; 519 char acl_tag[MAX_ACL_TAG_SIZE];
520 u64 num_cmds;
521 u64 read_bytes;
522 u64 write_bytes;
523 spinlock_t stats_lock;
524 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 520 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
525 atomic_t acl_pr_ref_count; 521 atomic_t acl_pr_ref_count;
526 struct se_dev_entry **device_list; 522 struct se_dev_entry **device_list;
@@ -624,6 +620,7 @@ struct se_dev_attrib {
624 u32 unmap_granularity; 620 u32 unmap_granularity;
625 u32 unmap_granularity_alignment; 621 u32 unmap_granularity_alignment;
626 u32 max_write_same_len; 622 u32 max_write_same_len;
623 u32 max_bytes_per_io;
627 struct se_device *da_dev; 624 struct se_device *da_dev;
628 struct config_group da_group; 625 struct config_group da_group;
629}; 626};
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 52594b20179e..5c38606613d8 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -90,6 +90,10 @@
90#define TRACE_EVENT_FLAGS(name, value) \ 90#define TRACE_EVENT_FLAGS(name, value) \
91 __TRACE_EVENT_FLAGS(name, value) 91 __TRACE_EVENT_FLAGS(name, value)
92 92
93#undef TRACE_EVENT_PERF_PERM
94#define TRACE_EVENT_PERF_PERM(name, expr...) \
95 __TRACE_EVENT_PERF_PERM(name, expr)
96
93#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 97#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
94 98
95 99
@@ -140,6 +144,9 @@
140#undef TRACE_EVENT_FLAGS 144#undef TRACE_EVENT_FLAGS
141#define TRACE_EVENT_FLAGS(event, flag) 145#define TRACE_EVENT_FLAGS(event, flag)
142 146
147#undef TRACE_EVENT_PERF_PERM
148#define TRACE_EVENT_PERF_PERM(event, expr...)
149
143#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 150#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
144 151
145/* 152/*
@@ -372,7 +379,8 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
372 __data_size += (len) * sizeof(type); 379 __data_size += (len) * sizeof(type);
373 380
374#undef __string 381#undef __string
375#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) 382#define __string(item, src) __dynamic_array(char, item, \
383 strlen((src) ? (const char *)(src) : "(null)") + 1)
376 384
377#undef DECLARE_EVENT_CLASS 385#undef DECLARE_EVENT_CLASS
378#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 386#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -501,7 +509,7 @@ static inline notrace int ftrace_get_offsets_##call( \
501 509
502#undef __assign_str 510#undef __assign_str
503#define __assign_str(dst, src) \ 511#define __assign_str(dst, src) \
504 strcpy(__get_str(dst), src); 512 strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
505 513
506#undef TP_fast_assign 514#undef TP_fast_assign
507#define TP_fast_assign(args...) args 515#define TP_fast_assign(args...) args
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 2f3f7ea8c77b..fe421e8a431b 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -983,6 +983,8 @@ struct drm_radeon_cs {
983#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17 983#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17
984/* CIK macrotile mode array */ 984/* CIK macrotile mode array */
985#define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18 985#define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18
986/* query the number of render backends */
987#define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19
986 988
987 989
988struct drm_radeon_info { 990struct drm_radeon_info {
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index bcb0912afe7a..f854ca4a1372 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -75,6 +75,7 @@
75#define DRM_VMW_PARAM_FIFO_CAPS 4 75#define DRM_VMW_PARAM_FIFO_CAPS 4
76#define DRM_VMW_PARAM_MAX_FB_SIZE 5 76#define DRM_VMW_PARAM_MAX_FB_SIZE 5
77#define DRM_VMW_PARAM_FIFO_HW_VERSION 6 77#define DRM_VMW_PARAM_FIFO_HW_VERSION 6
78#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
78 79
79/** 80/**
80 * struct drm_vmw_getparam_arg 81 * struct drm_vmw_getparam_arg
diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
index 2c267bcbb85c..bc81fb2e1f0e 100644
--- a/include/uapi/linux/eventpoll.h
+++ b/include/uapi/linux/eventpoll.h
@@ -61,5 +61,16 @@ struct epoll_event {
61 __u64 data; 61 __u64 data;
62} EPOLL_PACKED; 62} EPOLL_PACKED;
63 63
64 64#ifdef CONFIG_PM_SLEEP
65static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
66{
67 if ((epev->events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
68 epev->events &= ~EPOLLWAKEUP;
69}
70#else
71static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
72{
73 epev->events &= ~EPOLLWAKEUP;
74}
75#endif
65#endif /* _UAPI_LINUX_EVENTPOLL_H */ 76#endif /* _UAPI_LINUX_EVENTPOLL_H */
diff --git a/include/uapi/linux/genetlink.h b/include/uapi/linux/genetlink.h
index 1af72d8228e0..c3363ba1ae05 100644
--- a/include/uapi/linux/genetlink.h
+++ b/include/uapi/linux/genetlink.h
@@ -28,6 +28,7 @@ struct genlmsghdr {
28#define GENL_ID_GENERATE 0 28#define GENL_ID_GENERATE 0
29#define GENL_ID_CTRL NLMSG_MIN_TYPE 29#define GENL_ID_CTRL NLMSG_MIN_TYPE
30#define GENL_ID_VFS_DQUOT (NLMSG_MIN_TYPE + 1) 30#define GENL_ID_VFS_DQUOT (NLMSG_MIN_TYPE + 1)
31#define GENL_ID_PMCRAID (NLMSG_MIN_TYPE + 2)
31 32
32/************************************************************************** 33/**************************************************************************
33 * Controller 34 * Controller
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index b78566f59aba..6db460121f84 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -488,7 +488,9 @@ enum {
488 IFLA_HSR_UNSPEC, 488 IFLA_HSR_UNSPEC,
489 IFLA_HSR_SLAVE1, 489 IFLA_HSR_SLAVE1,
490 IFLA_HSR_SLAVE2, 490 IFLA_HSR_SLAVE2,
491 IFLA_HSR_MULTICAST_SPEC, 491 IFLA_HSR_MULTICAST_SPEC, /* Last byte of supervision addr */
492 IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */
493 IFLA_HSR_SEQ_NR,
492 __IFLA_HSR_MAX, 494 __IFLA_HSR_MAX,
493}; 495};
494 496
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index a3726275876d..bd24470d24a2 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -464,7 +464,8 @@ struct input_keymap_entry {
464#define KEY_BRIGHTNESS_ZERO 244 /* brightness off, use ambient */ 464#define KEY_BRIGHTNESS_ZERO 244 /* brightness off, use ambient */
465#define KEY_DISPLAY_OFF 245 /* display device to off state */ 465#define KEY_DISPLAY_OFF 245 /* display device to off state */
466 466
467#define KEY_WIMAX 246 467#define KEY_WWAN 246 /* Wireless WAN (LTE, UMTS, GSM, etc.) */
468#define KEY_WIMAX KEY_WWAN
468#define KEY_RFKILL 247 /* Key that controls all radios */ 469#define KEY_RFKILL 247 /* Key that controls all radios */
469 470
470#define KEY_MICMUTE 248 /* Mute / unmute the microphone */ 471#define KEY_MICMUTE 248 /* Mute / unmute the microphone */
@@ -719,6 +720,8 @@ struct input_keymap_entry {
719#define BTN_DPAD_LEFT 0x222 720#define BTN_DPAD_LEFT 0x222
720#define BTN_DPAD_RIGHT 0x223 721#define BTN_DPAD_RIGHT 0x223
721 722
723#define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */
724
722#define BTN_TRIGGER_HAPPY 0x2c0 725#define BTN_TRIGGER_HAPPY 0x2c0
723#define BTN_TRIGGER_HAPPY1 0x2c0 726#define BTN_TRIGGER_HAPPY1 0x2c0
724#define BTN_TRIGGER_HAPPY2 0x2c1 727#define BTN_TRIGGER_HAPPY2 0x2c1
@@ -856,6 +859,7 @@ struct input_keymap_entry {
856#define SW_FRONT_PROXIMITY 0x0b /* set = front proximity sensor active */ 859#define SW_FRONT_PROXIMITY 0x0b /* set = front proximity sensor active */
857#define SW_ROTATE_LOCK 0x0c /* set = rotate locked/disabled */ 860#define SW_ROTATE_LOCK 0x0c /* set = rotate locked/disabled */
858#define SW_LINEIN_INSERT 0x0d /* set = inserted */ 861#define SW_LINEIN_INSERT 0x0d /* set = inserted */
862#define SW_MUTE_DEVICE 0x0e /* set = device disabled */
859#define SW_MAX 0x0f 863#define SW_MAX 0x0f
860#define SW_CNT (SW_MAX+1) 864#define SW_CNT (SW_MAX+1)
861 865
diff --git a/include/uapi/linux/mic_common.h b/include/uapi/linux/mic_common.h
index 17e7d95e4f53..6eb40244e019 100644
--- a/include/uapi/linux/mic_common.h
+++ b/include/uapi/linux/mic_common.h
@@ -23,12 +23,7 @@
23 23
24#include <linux/virtio_ring.h> 24#include <linux/virtio_ring.h>
25 25
26#ifndef __KERNEL__ 26#define __mic_align(a, x) (((a) + (x) - 1) & ~((x) - 1))
27#define ALIGN(a, x) (((a) + (x) - 1) & ~((x) - 1))
28#define __aligned(x) __attribute__ ((aligned(x)))
29#endif
30
31#define mic_aligned_size(x) ALIGN(sizeof(x), 8)
32 27
33/** 28/**
34 * struct mic_device_desc: Virtio device information shared between the 29 * struct mic_device_desc: Virtio device information shared between the
@@ -48,8 +43,8 @@ struct mic_device_desc {
48 __u8 feature_len; 43 __u8 feature_len;
49 __u8 config_len; 44 __u8 config_len;
50 __u8 status; 45 __u8 status;
51 __u64 config[0]; 46 __le64 config[0];
52} __aligned(8); 47} __attribute__ ((aligned(8)));
53 48
54/** 49/**
55 * struct mic_device_ctrl: Per virtio device information in the device page 50 * struct mic_device_ctrl: Per virtio device information in the device page
@@ -66,7 +61,7 @@ struct mic_device_desc {
66 * @h2c_vdev_db: The doorbell number to be used by host. Set by guest. 61 * @h2c_vdev_db: The doorbell number to be used by host. Set by guest.
67 */ 62 */
68struct mic_device_ctrl { 63struct mic_device_ctrl {
69 __u64 vdev; 64 __le64 vdev;
70 __u8 config_change; 65 __u8 config_change;
71 __u8 vdev_reset; 66 __u8 vdev_reset;
72 __u8 guest_ack; 67 __u8 guest_ack;
@@ -74,7 +69,7 @@ struct mic_device_ctrl {
74 __u8 used_address_updated; 69 __u8 used_address_updated;
75 __s8 c2h_vdev_db; 70 __s8 c2h_vdev_db;
76 __s8 h2c_vdev_db; 71 __s8 h2c_vdev_db;
77} __aligned(8); 72} __attribute__ ((aligned(8)));
78 73
79/** 74/**
80 * struct mic_bootparam: Virtio device independent information in device page 75 * struct mic_bootparam: Virtio device independent information in device page
@@ -87,13 +82,13 @@ struct mic_device_ctrl {
87 * @shutdown_card: Set to 1 by the host when a card shutdown is initiated 82 * @shutdown_card: Set to 1 by the host when a card shutdown is initiated
88 */ 83 */
89struct mic_bootparam { 84struct mic_bootparam {
90 __u32 magic; 85 __le32 magic;
91 __s8 c2h_shutdown_db; 86 __s8 c2h_shutdown_db;
92 __s8 h2c_shutdown_db; 87 __s8 h2c_shutdown_db;
93 __s8 h2c_config_db; 88 __s8 h2c_config_db;
94 __u8 shutdown_status; 89 __u8 shutdown_status;
95 __u8 shutdown_card; 90 __u8 shutdown_card;
96} __aligned(8); 91} __attribute__ ((aligned(8)));
97 92
98/** 93/**
99 * struct mic_device_page: High level representation of the device page 94 * struct mic_device_page: High level representation of the device page
@@ -116,10 +111,10 @@ struct mic_device_page {
116 * @num: The number of entries in the virtio_ring 111 * @num: The number of entries in the virtio_ring
117 */ 112 */
118struct mic_vqconfig { 113struct mic_vqconfig {
119 __u64 address; 114 __le64 address;
120 __u64 used_address; 115 __le64 used_address;
121 __u16 num; 116 __le16 num;
122} __aligned(8); 117} __attribute__ ((aligned(8)));
123 118
124/* 119/*
125 * The alignment to use between consumer and producer parts of vring. 120 * The alignment to use between consumer and producer parts of vring.
@@ -154,7 +149,7 @@ struct mic_vqconfig {
154 */ 149 */
155struct _mic_vring_info { 150struct _mic_vring_info {
156 __u16 avail_idx; 151 __u16 avail_idx;
157 int magic; 152 __le32 magic;
158}; 153};
159 154
160/** 155/**
@@ -173,15 +168,13 @@ struct mic_vring {
173 int len; 168 int len;
174}; 169};
175 170
176#define mic_aligned_desc_size(d) ALIGN(mic_desc_size(d), 8) 171#define mic_aligned_desc_size(d) __mic_align(mic_desc_size(d), 8)
177 172
178#ifndef INTEL_MIC_CARD 173#ifndef INTEL_MIC_CARD
179static inline unsigned mic_desc_size(const struct mic_device_desc *desc) 174static inline unsigned mic_desc_size(const struct mic_device_desc *desc)
180{ 175{
181 return mic_aligned_size(*desc) 176 return sizeof(*desc) + desc->num_vq * sizeof(struct mic_vqconfig)
182 + desc->num_vq * mic_aligned_size(struct mic_vqconfig) 177 + desc->feature_len * 2 + desc->config_len;
183 + desc->feature_len * 2
184 + desc->config_len;
185} 178}
186 179
187static inline struct mic_vqconfig * 180static inline struct mic_vqconfig *
@@ -201,8 +194,7 @@ static inline __u8 *mic_vq_configspace(const struct mic_device_desc *desc)
201} 194}
202static inline unsigned mic_total_desc_size(struct mic_device_desc *desc) 195static inline unsigned mic_total_desc_size(struct mic_device_desc *desc)
203{ 196{
204 return mic_aligned_desc_size(desc) + 197 return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
205 mic_aligned_size(struct mic_device_ctrl);
206} 198}
207#endif 199#endif
208 200
diff --git a/include/uapi/linux/netlink_diag.h b/include/uapi/linux/netlink_diag.h
index 4e31db4eea41..f2159d30d1f5 100644
--- a/include/uapi/linux/netlink_diag.h
+++ b/include/uapi/linux/netlink_diag.h
@@ -33,6 +33,7 @@ struct netlink_diag_ring {
33}; 33};
34 34
35enum { 35enum {
36 /* NETLINK_DIAG_NONE, standard nl API requires this attribute! */
36 NETLINK_DIAG_MEMINFO, 37 NETLINK_DIAG_MEMINFO,
37 NETLINK_DIAG_GROUPS, 38 NETLINK_DIAG_GROUPS,
38 NETLINK_DIAG_RX_RING, 39 NETLINK_DIAG_RX_RING,
diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h
index b2cc0cd9c4d9..d08c63f3dd6f 100644
--- a/include/uapi/linux/packet_diag.h
+++ b/include/uapi/linux/packet_diag.h
@@ -29,6 +29,7 @@ struct packet_diag_msg {
29}; 29};
30 30
31enum { 31enum {
32 /* PACKET_DIAG_NONE, standard nl API requires this attribute! */
32 PACKET_DIAG_INFO, 33 PACKET_DIAG_INFO,
33 PACKET_DIAG_MCLIST, 34 PACKET_DIAG_MCLIST,
34 PACKET_DIAG_RX_RING, 35 PACKET_DIAG_RX_RING,
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index e1802d6153ae..959d454f76a1 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -679,6 +679,7 @@ enum perf_event_type {
679 * 679 *
680 * { u64 weight; } && PERF_SAMPLE_WEIGHT 680 * { u64 weight; } && PERF_SAMPLE_WEIGHT
681 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC 681 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
682 * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
682 * }; 683 * };
683 */ 684 */
684 PERF_RECORD_SAMPLE = 9, 685 PERF_RECORD_SAMPLE = 9,
diff --git a/include/uapi/linux/unix_diag.h b/include/uapi/linux/unix_diag.h
index b9e2a6a7446f..1eb0b8dd1830 100644
--- a/include/uapi/linux/unix_diag.h
+++ b/include/uapi/linux/unix_diag.h
@@ -31,6 +31,7 @@ struct unix_diag_msg {
31}; 31};
32 32
33enum { 33enum {
34 /* UNIX_DIAG_NONE, standard nl API requires this attribute! */
34 UNIX_DIAG_NAME, 35 UNIX_DIAG_NAME,
35 UNIX_DIAG_VFS, 36 UNIX_DIAG_VFS,
36 UNIX_DIAG_PEER, 37 UNIX_DIAG_PEER,
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index d630163b9a2e..5759810e1c1b 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -30,7 +30,7 @@
30#include <sound/compress_params.h> 30#include <sound/compress_params.h>
31 31
32 32
33#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 1) 33#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 2)
34/** 34/**
35 * struct snd_compressed_buffer: compressed buffer 35 * struct snd_compressed_buffer: compressed buffer
36 * @fragment_size: size of buffer fragment in bytes 36 * @fragment_size: size of buffer fragment in bytes
@@ -67,8 +67,8 @@ struct snd_compr_params {
67struct snd_compr_tstamp { 67struct snd_compr_tstamp {
68 __u32 byte_offset; 68 __u32 byte_offset;
69 __u32 copied_total; 69 __u32 copied_total;
70 snd_pcm_uframes_t pcm_frames; 70 __u32 pcm_frames;
71 snd_pcm_uframes_t pcm_io_frames; 71 __u32 pcm_io_frames;
72 __u32 sampling_rate; 72 __u32 sampling_rate;
73}; 73};
74 74
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 65e12099ef89..ae665ac59c36 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -146,7 +146,7 @@ struct blkif_request_segment_aligned {
146struct blkif_request_rw { 146struct blkif_request_rw {
147 uint8_t nr_segments; /* number of segments */ 147 uint8_t nr_segments; /* number of segments */
148 blkif_vdev_t handle; /* only for read/write requests */ 148 blkif_vdev_t handle; /* only for read/write requests */
149#ifdef CONFIG_X86_64 149#ifndef CONFIG_X86_32
150 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ 150 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */
151#endif 151#endif
152 uint64_t id; /* private guest value, echoed in resp */ 152 uint64_t id; /* private guest value, echoed in resp */
@@ -163,7 +163,7 @@ struct blkif_request_discard {
163 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ 163 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */
164#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ 164#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */
165 blkif_vdev_t _pad1; /* only for read/write requests */ 165 blkif_vdev_t _pad1; /* only for read/write requests */
166#ifdef CONFIG_X86_64 166#ifndef CONFIG_X86_32
167 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ 167 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/
168#endif 168#endif
169 uint64_t id; /* private guest value, echoed in resp */ 169 uint64_t id; /* private guest value, echoed in resp */
@@ -175,7 +175,7 @@ struct blkif_request_discard {
175struct blkif_request_other { 175struct blkif_request_other {
176 uint8_t _pad1; 176 uint8_t _pad1;
177 blkif_vdev_t _pad2; /* only for read/write requests */ 177 blkif_vdev_t _pad2; /* only for read/write requests */
178#ifdef CONFIG_X86_64 178#ifndef CONFIG_X86_32
179 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ 179 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/
180#endif 180#endif
181 uint64_t id; /* private guest value, echoed in resp */ 181 uint64_t id; /* private guest value, echoed in resp */
@@ -184,7 +184,7 @@ struct blkif_request_other {
184struct blkif_request_indirect { 184struct blkif_request_indirect {
185 uint8_t indirect_op; 185 uint8_t indirect_op;
186 uint16_t nr_segments; 186 uint16_t nr_segments;
187#ifdef CONFIG_X86_64 187#ifndef CONFIG_X86_32
188 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ 188 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */
189#endif 189#endif
190 uint64_t id; 190 uint64_t id;
@@ -192,7 +192,7 @@ struct blkif_request_indirect {
192 blkif_vdev_t handle; 192 blkif_vdev_t handle;
193 uint16_t _pad2; 193 uint16_t _pad2;
194 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; 194 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
195#ifdef CONFIG_X86_64 195#ifndef CONFIG_X86_32
196 uint32_t _pad3; /* make it 64 byte aligned */ 196 uint32_t _pad3; /* make it 64 byte aligned */
197#else 197#else
198 uint64_t _pad3; /* make it 64 byte aligned */ 198 uint64_t _pad3; /* make it 64 byte aligned */
diff --git a/init/Kconfig b/init/Kconfig
index 79383d3aa5dc..4e5d96ab2034 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -809,6 +809,12 @@ config GENERIC_SCHED_CLOCK
809config ARCH_SUPPORTS_NUMA_BALANCING 809config ARCH_SUPPORTS_NUMA_BALANCING
810 bool 810 bool
811 811
812#
813# For architectures that know their GCC __int128 support is sound
814#
815config ARCH_SUPPORTS_INT128
816 bool
817
812# For architectures that (ab)use NUMA to represent different memory regions 818# For architectures that (ab)use NUMA to represent different memory regions
813# all cpu-local but of different latencies, such as SuperH. 819# all cpu-local but of different latencies, such as SuperH.
814# 820#
diff --git a/kernel/.gitignore b/kernel/.gitignore
index b3097bde4e9c..790d83c7d160 100644
--- a/kernel/.gitignore
+++ b/kernel/.gitignore
@@ -5,3 +5,4 @@ config_data.h
5config_data.gz 5config_data.gz
6timeconst.h 6timeconst.h
7hz.bc 7hz.bc
8x509_certificate_list
diff --git a/kernel/Makefile b/kernel/Makefile
index bbaf7d59c1bb..bc010ee272b6 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -137,9 +137,10 @@ $(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
137############################################################################### 137###############################################################################
138ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y) 138ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y)
139X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509) 139X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509)
140X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += signing_key.x509 140X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += $(objtree)/signing_key.x509
141X509_CERTIFICATES := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \ 141X509_CERTIFICATES-raw := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \
142 $(or $(realpath $(CERT)),$(CERT)))) 142 $(or $(realpath $(CERT)),$(CERT))))
143X509_CERTIFICATES := $(subst $(realpath $(objtree))/,,$(X509_CERTIFICATES-raw))
143 144
144ifeq ($(X509_CERTIFICATES),) 145ifeq ($(X509_CERTIFICATES),)
145$(warning *** No X.509 certificates found ***) 146$(warning *** No X.509 certificates found ***)
@@ -164,9 +165,9 @@ $(obj)/x509_certificate_list: $(X509_CERTIFICATES) $(obj)/.x509.list
164targets += $(obj)/.x509.list 165targets += $(obj)/.x509.list
165$(obj)/.x509.list: 166$(obj)/.x509.list:
166 @echo $(X509_CERTIFICATES) >$@ 167 @echo $(X509_CERTIFICATES) >$@
168endif
167 169
168clean-files := x509_certificate_list .x509.list 170clean-files := x509_certificate_list .x509.list
169endif
170 171
171ifeq ($(CONFIG_MODULE_SIG),y) 172ifeq ($(CONFIG_MODULE_SIG),y)
172############################################################################### 173###############################################################################
diff --git a/kernel/bounds.c b/kernel/bounds.c
index 5253204afdca..9fd4246b04b8 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -22,6 +22,6 @@ void foo(void)
22#ifdef CONFIG_SMP 22#ifdef CONFIG_SMP
23 DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS)); 23 DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
24#endif 24#endif
25 DEFINE(BLOATED_SPINLOCKS, sizeof(spinlock_t) > sizeof(int)); 25 DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
26 /* End of constants */ 26 /* End of constants */
27} 27}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 4c62513fe19f..bc1dcabe9217 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -90,6 +90,14 @@ static DEFINE_MUTEX(cgroup_mutex);
90static DEFINE_MUTEX(cgroup_root_mutex); 90static DEFINE_MUTEX(cgroup_root_mutex);
91 91
92/* 92/*
93 * cgroup destruction makes heavy use of work items and there can be a lot
94 * of concurrent destructions. Use a separate workqueue so that cgroup
95 * destruction work items don't end up filling up max_active of system_wq
96 * which may lead to deadlock.
97 */
98static struct workqueue_struct *cgroup_destroy_wq;
99
100/*
93 * Generate an array of cgroup subsystem pointers. At boot time, this is 101 * Generate an array of cgroup subsystem pointers. At boot time, this is
94 * populated with the built in subsystems, and modular subsystems are 102 * populated with the built in subsystems, and modular subsystems are
95 * registered after that. The mutable section of this array is protected by 103 * registered after that. The mutable section of this array is protected by
@@ -191,6 +199,7 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp);
191static int cgroup_destroy_locked(struct cgroup *cgrp); 199static int cgroup_destroy_locked(struct cgroup *cgrp);
192static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], 200static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
193 bool is_add); 201 bool is_add);
202static int cgroup_file_release(struct inode *inode, struct file *file);
194 203
195/** 204/**
196 * cgroup_css - obtain a cgroup's css for the specified subsystem 205 * cgroup_css - obtain a cgroup's css for the specified subsystem
@@ -871,7 +880,7 @@ static void cgroup_free_rcu(struct rcu_head *head)
871 struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head); 880 struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head);
872 881
873 INIT_WORK(&cgrp->destroy_work, cgroup_free_fn); 882 INIT_WORK(&cgrp->destroy_work, cgroup_free_fn);
874 schedule_work(&cgrp->destroy_work); 883 queue_work(cgroup_destroy_wq, &cgrp->destroy_work);
875} 884}
876 885
877static void cgroup_diput(struct dentry *dentry, struct inode *inode) 886static void cgroup_diput(struct dentry *dentry, struct inode *inode)
@@ -881,6 +890,16 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
881 struct cgroup *cgrp = dentry->d_fsdata; 890 struct cgroup *cgrp = dentry->d_fsdata;
882 891
883 BUG_ON(!(cgroup_is_dead(cgrp))); 892 BUG_ON(!(cgroup_is_dead(cgrp)));
893
894 /*
895 * XXX: cgrp->id is only used to look up css's. As cgroup
896 * and css's lifetimes will be decoupled, it should be made
897 * per-subsystem and moved to css->id so that lookups are
898 * successful until the target css is released.
899 */
900 idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
901 cgrp->id = -1;
902
884 call_rcu(&cgrp->rcu_head, cgroup_free_rcu); 903 call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
885 } else { 904 } else {
886 struct cfent *cfe = __d_cfe(dentry); 905 struct cfent *cfe = __d_cfe(dentry);
@@ -2421,7 +2440,7 @@ static const struct file_operations cgroup_seqfile_operations = {
2421 .read = seq_read, 2440 .read = seq_read,
2422 .write = cgroup_file_write, 2441 .write = cgroup_file_write,
2423 .llseek = seq_lseek, 2442 .llseek = seq_lseek,
2424 .release = single_release, 2443 .release = cgroup_file_release,
2425}; 2444};
2426 2445
2427static int cgroup_file_open(struct inode *inode, struct file *file) 2446static int cgroup_file_open(struct inode *inode, struct file *file)
@@ -2482,6 +2501,8 @@ static int cgroup_file_release(struct inode *inode, struct file *file)
2482 ret = cft->release(inode, file); 2501 ret = cft->release(inode, file);
2483 if (css->ss) 2502 if (css->ss)
2484 css_put(css); 2503 css_put(css);
2504 if (file->f_op == &cgroup_seqfile_operations)
2505 single_release(inode, file);
2485 return ret; 2506 return ret;
2486} 2507}
2487 2508
@@ -4249,7 +4270,7 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head)
4249 * css_put(). dput() requires process context which we don't have. 4270 * css_put(). dput() requires process context which we don't have.
4250 */ 4271 */
4251 INIT_WORK(&css->destroy_work, css_free_work_fn); 4272 INIT_WORK(&css->destroy_work, css_free_work_fn);
4252 schedule_work(&css->destroy_work); 4273 queue_work(cgroup_destroy_wq, &css->destroy_work);
4253} 4274}
4254 4275
4255static void css_release(struct percpu_ref *ref) 4276static void css_release(struct percpu_ref *ref)
@@ -4257,6 +4278,7 @@ static void css_release(struct percpu_ref *ref)
4257 struct cgroup_subsys_state *css = 4278 struct cgroup_subsys_state *css =
4258 container_of(ref, struct cgroup_subsys_state, refcnt); 4279 container_of(ref, struct cgroup_subsys_state, refcnt);
4259 4280
4281 rcu_assign_pointer(css->cgroup->subsys[css->ss->subsys_id], NULL);
4260 call_rcu(&css->rcu_head, css_free_rcu_fn); 4282 call_rcu(&css->rcu_head, css_free_rcu_fn);
4261} 4283}
4262 4284
@@ -4415,14 +4437,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
4415 list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); 4437 list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
4416 root->number_of_cgroups++; 4438 root->number_of_cgroups++;
4417 4439
4418 /* each css holds a ref to the cgroup's dentry and the parent css */
4419 for_each_root_subsys(root, ss) {
4420 struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
4421
4422 dget(dentry);
4423 css_get(css->parent);
4424 }
4425
4426 /* hold a ref to the parent's dentry */ 4440 /* hold a ref to the parent's dentry */
4427 dget(parent->dentry); 4441 dget(parent->dentry);
4428 4442
@@ -4434,6 +4448,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
4434 if (err) 4448 if (err)
4435 goto err_destroy; 4449 goto err_destroy;
4436 4450
4451 /* each css holds a ref to the cgroup's dentry and parent css */
4452 dget(dentry);
4453 css_get(css->parent);
4454
4455 /* mark it consumed for error path */
4456 css_ar[ss->subsys_id] = NULL;
4457
4437 if (ss->broken_hierarchy && !ss->warned_broken_hierarchy && 4458 if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
4438 parent->parent) { 4459 parent->parent) {
4439 pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n", 4460 pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
@@ -4480,6 +4501,14 @@ err_free_cgrp:
4480 return err; 4501 return err;
4481 4502
4482err_destroy: 4503err_destroy:
4504 for_each_root_subsys(root, ss) {
4505 struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
4506
4507 if (css) {
4508 percpu_ref_cancel_init(&css->refcnt);
4509 ss->css_free(css);
4510 }
4511 }
4483 cgroup_destroy_locked(cgrp); 4512 cgroup_destroy_locked(cgrp);
4484 mutex_unlock(&cgroup_mutex); 4513 mutex_unlock(&cgroup_mutex);
4485 mutex_unlock(&dentry->d_inode->i_mutex); 4514 mutex_unlock(&dentry->d_inode->i_mutex);
@@ -4539,7 +4568,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
4539 container_of(ref, struct cgroup_subsys_state, refcnt); 4568 container_of(ref, struct cgroup_subsys_state, refcnt);
4540 4569
4541 INIT_WORK(&css->destroy_work, css_killed_work_fn); 4570 INIT_WORK(&css->destroy_work, css_killed_work_fn);
4542 schedule_work(&css->destroy_work); 4571 queue_work(cgroup_destroy_wq, &css->destroy_work);
4543} 4572}
4544 4573
4545/** 4574/**
@@ -4641,8 +4670,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
4641 * will be invoked to perform the rest of destruction once the 4670 * will be invoked to perform the rest of destruction once the
4642 * percpu refs of all css's are confirmed to be killed. 4671 * percpu refs of all css's are confirmed to be killed.
4643 */ 4672 */
4644 for_each_root_subsys(cgrp->root, ss) 4673 for_each_root_subsys(cgrp->root, ss) {
4645 kill_css(cgroup_css(cgrp, ss)); 4674 struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
4675
4676 if (css)
4677 kill_css(css);
4678 }
4646 4679
4647 /* 4680 /*
4648 * Mark @cgrp dead. This prevents further task migration and child 4681 * Mark @cgrp dead. This prevents further task migration and child
@@ -4711,14 +4744,6 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp)
4711 /* delete this cgroup from parent->children */ 4744 /* delete this cgroup from parent->children */
4712 list_del_rcu(&cgrp->sibling); 4745 list_del_rcu(&cgrp->sibling);
4713 4746
4714 /*
4715 * We should remove the cgroup object from idr before its grace
4716 * period starts, so we won't be looking up a cgroup while the
4717 * cgroup is being freed.
4718 */
4719 idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
4720 cgrp->id = -1;
4721
4722 dput(d); 4747 dput(d);
4723 4748
4724 set_bit(CGRP_RELEASABLE, &parent->flags); 4749 set_bit(CGRP_RELEASABLE, &parent->flags);
@@ -5063,6 +5088,22 @@ out:
5063 return err; 5088 return err;
5064} 5089}
5065 5090
5091static int __init cgroup_wq_init(void)
5092{
5093 /*
5094 * There isn't much point in executing destruction path in
5095 * parallel. Good chunk is serialized with cgroup_mutex anyway.
5096 * Use 1 for @max_active.
5097 *
5098 * We would prefer to do this in cgroup_init() above, but that
5099 * is called before init_workqueues(): so leave this until after.
5100 */
5101 cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
5102 BUG_ON(!cgroup_destroy_wq);
5103 return 0;
5104}
5105core_initcall(cgroup_wq_init);
5106
5066/* 5107/*
5067 * proc_cgroup_show() 5108 * proc_cgroup_show()
5068 * - Print task's cgroup paths into seq_file, one line for each hierarchy 5109 * - Print task's cgroup paths into seq_file, one line for each hierarchy
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 6bf981e13c43..4772034b4b17 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1033,8 +1033,10 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
1033 need_loop = task_has_mempolicy(tsk) || 1033 need_loop = task_has_mempolicy(tsk) ||
1034 !nodes_intersects(*newmems, tsk->mems_allowed); 1034 !nodes_intersects(*newmems, tsk->mems_allowed);
1035 1035
1036 if (need_loop) 1036 if (need_loop) {
1037 local_irq_disable();
1037 write_seqcount_begin(&tsk->mems_allowed_seq); 1038 write_seqcount_begin(&tsk->mems_allowed_seq);
1039 }
1038 1040
1039 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); 1041 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
1040 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1); 1042 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
@@ -1042,8 +1044,10 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
1042 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2); 1044 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
1043 tsk->mems_allowed = *newmems; 1045 tsk->mems_allowed = *newmems;
1044 1046
1045 if (need_loop) 1047 if (need_loop) {
1046 write_seqcount_end(&tsk->mems_allowed_seq); 1048 write_seqcount_end(&tsk->mems_allowed_seq);
1049 local_irq_enable();
1050 }
1047 1051
1048 task_unlock(tsk); 1052 task_unlock(tsk);
1049} 1053}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d724e7757cd1..f5744010a8d2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1396,6 +1396,8 @@ event_sched_out(struct perf_event *event,
1396 if (event->state != PERF_EVENT_STATE_ACTIVE) 1396 if (event->state != PERF_EVENT_STATE_ACTIVE)
1397 return; 1397 return;
1398 1398
1399 perf_pmu_disable(event->pmu);
1400
1399 event->state = PERF_EVENT_STATE_INACTIVE; 1401 event->state = PERF_EVENT_STATE_INACTIVE;
1400 if (event->pending_disable) { 1402 if (event->pending_disable) {
1401 event->pending_disable = 0; 1403 event->pending_disable = 0;
@@ -1412,6 +1414,8 @@ event_sched_out(struct perf_event *event,
1412 ctx->nr_freq--; 1414 ctx->nr_freq--;
1413 if (event->attr.exclusive || !cpuctx->active_oncpu) 1415 if (event->attr.exclusive || !cpuctx->active_oncpu)
1414 cpuctx->exclusive = 0; 1416 cpuctx->exclusive = 0;
1417
1418 perf_pmu_enable(event->pmu);
1415} 1419}
1416 1420
1417static void 1421static void
@@ -1652,6 +1656,7 @@ event_sched_in(struct perf_event *event,
1652 struct perf_event_context *ctx) 1656 struct perf_event_context *ctx)
1653{ 1657{
1654 u64 tstamp = perf_event_time(event); 1658 u64 tstamp = perf_event_time(event);
1659 int ret = 0;
1655 1660
1656 if (event->state <= PERF_EVENT_STATE_OFF) 1661 if (event->state <= PERF_EVENT_STATE_OFF)
1657 return 0; 1662 return 0;
@@ -1674,10 +1679,13 @@ event_sched_in(struct perf_event *event,
1674 */ 1679 */
1675 smp_wmb(); 1680 smp_wmb();
1676 1681
1682 perf_pmu_disable(event->pmu);
1683
1677 if (event->pmu->add(event, PERF_EF_START)) { 1684 if (event->pmu->add(event, PERF_EF_START)) {
1678 event->state = PERF_EVENT_STATE_INACTIVE; 1685 event->state = PERF_EVENT_STATE_INACTIVE;
1679 event->oncpu = -1; 1686 event->oncpu = -1;
1680 return -EAGAIN; 1687 ret = -EAGAIN;
1688 goto out;
1681 } 1689 }
1682 1690
1683 event->tstamp_running += tstamp - event->tstamp_stopped; 1691 event->tstamp_running += tstamp - event->tstamp_stopped;
@@ -1693,7 +1701,10 @@ event_sched_in(struct perf_event *event,
1693 if (event->attr.exclusive) 1701 if (event->attr.exclusive)
1694 cpuctx->exclusive = 1; 1702 cpuctx->exclusive = 1;
1695 1703
1696 return 0; 1704out:
1705 perf_pmu_enable(event->pmu);
1706
1707 return ret;
1697} 1708}
1698 1709
1699static int 1710static int
@@ -2743,6 +2754,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2743 if (!event_filter_match(event)) 2754 if (!event_filter_match(event))
2744 continue; 2755 continue;
2745 2756
2757 perf_pmu_disable(event->pmu);
2758
2746 hwc = &event->hw; 2759 hwc = &event->hw;
2747 2760
2748 if (hwc->interrupts == MAX_INTERRUPTS) { 2761 if (hwc->interrupts == MAX_INTERRUPTS) {
@@ -2752,7 +2765,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2752 } 2765 }
2753 2766
2754 if (!event->attr.freq || !event->attr.sample_freq) 2767 if (!event->attr.freq || !event->attr.sample_freq)
2755 continue; 2768 goto next;
2756 2769
2757 /* 2770 /*
2758 * stop the event and update event->count 2771 * stop the event and update event->count
@@ -2774,6 +2787,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2774 perf_adjust_period(event, period, delta, false); 2787 perf_adjust_period(event, period, delta, false);
2775 2788
2776 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); 2789 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
2790 next:
2791 perf_pmu_enable(event->pmu);
2777 } 2792 }
2778 2793
2779 perf_pmu_enable(ctx->pmu); 2794 perf_pmu_enable(ctx->pmu);
@@ -5680,11 +5695,6 @@ static void swevent_hlist_put(struct perf_event *event)
5680{ 5695{
5681 int cpu; 5696 int cpu;
5682 5697
5683 if (event->cpu != -1) {
5684 swevent_hlist_put_cpu(event, event->cpu);
5685 return;
5686 }
5687
5688 for_each_possible_cpu(cpu) 5698 for_each_possible_cpu(cpu)
5689 swevent_hlist_put_cpu(event, cpu); 5699 swevent_hlist_put_cpu(event, cpu);
5690} 5700}
@@ -5718,9 +5728,6 @@ static int swevent_hlist_get(struct perf_event *event)
5718 int err; 5728 int err;
5719 int cpu, failed_cpu; 5729 int cpu, failed_cpu;
5720 5730
5721 if (event->cpu != -1)
5722 return swevent_hlist_get_cpu(event, event->cpu);
5723
5724 get_online_cpus(); 5731 get_online_cpus();
5725 for_each_possible_cpu(cpu) { 5732 for_each_possible_cpu(cpu) {
5726 err = swevent_hlist_get_cpu(event, cpu); 5733 err = swevent_hlist_get_cpu(event, cpu);
diff --git a/kernel/extable.c b/kernel/extable.c
index 832cb28105bb..763faf037ec1 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -61,7 +61,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr)
61static inline int init_kernel_text(unsigned long addr) 61static inline int init_kernel_text(unsigned long addr)
62{ 62{
63 if (addr >= (unsigned long)_sinittext && 63 if (addr >= (unsigned long)_sinittext &&
64 addr <= (unsigned long)_einittext) 64 addr < (unsigned long)_einittext)
65 return 1; 65 return 1;
66 return 0; 66 return 0;
67} 67}
@@ -69,7 +69,7 @@ static inline int init_kernel_text(unsigned long addr)
69int core_kernel_text(unsigned long addr) 69int core_kernel_text(unsigned long addr)
70{ 70{
71 if (addr >= (unsigned long)_stext && 71 if (addr >= (unsigned long)_stext &&
72 addr <= (unsigned long)_etext) 72 addr < (unsigned long)_etext)
73 return 1; 73 return 1;
74 74
75 if (system_state == SYSTEM_BOOTING && 75 if (system_state == SYSTEM_BOOTING &&
diff --git a/kernel/fork.c b/kernel/fork.c
index 728d5be9548c..5721f0e3f2da 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -537,6 +537,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
537 spin_lock_init(&mm->page_table_lock); 537 spin_lock_init(&mm->page_table_lock);
538 mm_init_aio(mm); 538 mm_init_aio(mm);
539 mm_init_owner(mm, p); 539 mm_init_owner(mm, p);
540 clear_tlb_flush_pending(mm);
540 541
541 if (likely(!mm_alloc_pgd(mm))) { 542 if (likely(!mm_alloc_pgd(mm))) {
542 mm->def_flags = 0; 543 mm->def_flags = 0;
diff --git a/kernel/freezer.c b/kernel/freezer.c
index b462fa197517..aa6a8aadb911 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -19,6 +19,12 @@ EXPORT_SYMBOL(system_freezing_cnt);
19bool pm_freezing; 19bool pm_freezing;
20bool pm_nosig_freezing; 20bool pm_nosig_freezing;
21 21
22/*
23 * Temporary export for the deadlock workaround in ata_scsi_hotplug().
24 * Remove once the hack becomes unnecessary.
25 */
26EXPORT_SYMBOL_GPL(pm_freezing);
27
22/* protects freezing and frozen transitions */ 28/* protects freezing and frozen transitions */
23static DEFINE_SPINLOCK(freezer_lock); 29static DEFINE_SPINLOCK(freezer_lock);
24 30
diff --git a/kernel/futex.c b/kernel/futex.c
index 80ba086f021d..f6ff0191ecf7 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -251,6 +251,9 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
251 return -EINVAL; 251 return -EINVAL;
252 address -= key->both.offset; 252 address -= key->both.offset;
253 253
254 if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
255 return -EFAULT;
256
254 /* 257 /*
255 * PROCESS_PRIVATE futexes are fast. 258 * PROCESS_PRIVATE futexes are fast.
256 * As the mm cannot disappear under us and the 'key' only needs 259 * As the mm cannot disappear under us and the 'key' only needs
@@ -259,8 +262,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
259 * but access_ok() should be faster than find_vma() 262 * but access_ok() should be faster than find_vma()
260 */ 263 */
261 if (!fshared) { 264 if (!fshared) {
262 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
263 return -EFAULT;
264 key->private.mm = mm; 265 key->private.mm = mm;
265 key->private.address = address; 266 key->private.address = address;
266 get_futex_key_refs(key); 267 get_futex_key_refs(key);
@@ -288,7 +289,7 @@ again:
288 put_page(page); 289 put_page(page);
289 /* serialize against __split_huge_page_splitting() */ 290 /* serialize against __split_huge_page_splitting() */
290 local_irq_disable(); 291 local_irq_disable();
291 if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) { 292 if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
292 page_head = compound_head(page); 293 page_head = compound_head(page);
293 /* 294 /*
294 * page_head is valid pointer but we must pin 295 * page_head is valid pointer but we must pin
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index cb228bf21760..abcd6ca86cb7 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -50,7 +50,7 @@ static void resume_irqs(bool want_early)
50 bool is_early = desc->action && 50 bool is_early = desc->action &&
51 desc->action->flags & IRQF_EARLY_RESUME; 51 desc->action->flags & IRQF_EARLY_RESUME;
52 52
53 if (is_early != want_early) 53 if (!is_early && want_early)
54 continue; 54 continue;
55 55
56 raw_spin_lock_irqsave(&desc->lock, flags); 56 raw_spin_lock_irqsave(&desc->lock, flags);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 490afc03627e..9c970167e402 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -47,6 +47,9 @@ u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
47size_t vmcoreinfo_size; 47size_t vmcoreinfo_size;
48size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); 48size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
49 49
50/* Flag to indicate we are going to kexec a new kernel */
51bool kexec_in_progress = false;
52
50/* Location of the reserved area for the crash kernel */ 53/* Location of the reserved area for the crash kernel */
51struct resource crashk_res = { 54struct resource crashk_res = {
52 .name = "Crash kernel", 55 .name = "Crash kernel",
@@ -1675,7 +1678,9 @@ int kernel_kexec(void)
1675 } else 1678 } else
1676#endif 1679#endif
1677 { 1680 {
1681 kexec_in_progress = true;
1678 kernel_restart_prepare(NULL); 1682 kernel_restart_prepare(NULL);
1683 migrate_to_reboot_cpu();
1679 printk(KERN_EMERG "Starting new kernel\n"); 1684 printk(KERN_EMERG "Starting new kernel\n");
1680 machine_shutdown(); 1685 machine_shutdown();
1681 } 1686 }
diff --git a/kernel/padata.c b/kernel/padata.c
index 07af2c95dcfe..2abd25d79cc8 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -46,6 +46,7 @@ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
46 46
47static int padata_cpu_hash(struct parallel_data *pd) 47static int padata_cpu_hash(struct parallel_data *pd)
48{ 48{
49 unsigned int seq_nr;
49 int cpu_index; 50 int cpu_index;
50 51
51 /* 52 /*
@@ -53,10 +54,8 @@ static int padata_cpu_hash(struct parallel_data *pd)
53 * seq_nr mod. number of cpus in use. 54 * seq_nr mod. number of cpus in use.
54 */ 55 */
55 56
56 spin_lock(&pd->seq_lock); 57 seq_nr = atomic_inc_return(&pd->seq_nr);
57 cpu_index = pd->seq_nr % cpumask_weight(pd->cpumask.pcpu); 58 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
58 pd->seq_nr++;
59 spin_unlock(&pd->seq_lock);
60 59
61 return padata_index_to_cpu(pd, cpu_index); 60 return padata_index_to_cpu(pd, cpu_index);
62} 61}
@@ -429,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
429 padata_init_pqueues(pd); 428 padata_init_pqueues(pd);
430 padata_init_squeues(pd); 429 padata_init_squeues(pd);
431 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); 430 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
432 pd->seq_nr = 0; 431 atomic_set(&pd->seq_nr, -1);
433 atomic_set(&pd->reorder_objects, 0); 432 atomic_set(&pd->reorder_objects, 0);
434 atomic_set(&pd->refcnt, 0); 433 atomic_set(&pd->refcnt, 0);
435 pd->pinst = pinst; 434 pd->pinst = pinst;
diff --git a/kernel/power/console.c b/kernel/power/console.c
index 463aa6736751..eacb8bd8cab4 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -81,6 +81,7 @@ void pm_vt_switch_unregister(struct device *dev)
81 list_for_each_entry(tmp, &pm_vt_switch_list, head) { 81 list_for_each_entry(tmp, &pm_vt_switch_list, head) {
82 if (tmp->dev == dev) { 82 if (tmp->dev == dev) {
83 list_del(&tmp->head); 83 list_del(&tmp->head);
84 kfree(tmp);
84 break; 85 break;
85 } 86 }
86 } 87 }
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 6abb03dff5c0..08a765232432 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1632,7 +1632,7 @@ module_param(rcu_idle_gp_delay, int, 0644);
1632static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY; 1632static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
1633module_param(rcu_idle_lazy_gp_delay, int, 0644); 1633module_param(rcu_idle_lazy_gp_delay, int, 0644);
1634 1634
1635extern int tick_nohz_enabled; 1635extern int tick_nohz_active;
1636 1636
1637/* 1637/*
1638 * Try to advance callbacks for all flavors of RCU on the current CPU, but 1638 * Try to advance callbacks for all flavors of RCU on the current CPU, but
@@ -1729,7 +1729,7 @@ static void rcu_prepare_for_idle(int cpu)
1729 int tne; 1729 int tne;
1730 1730
1731 /* Handle nohz enablement switches conservatively. */ 1731 /* Handle nohz enablement switches conservatively. */
1732 tne = ACCESS_ONCE(tick_nohz_enabled); 1732 tne = ACCESS_ONCE(tick_nohz_active);
1733 if (tne != rdtp->tick_nohz_enabled_snap) { 1733 if (tne != rdtp->tick_nohz_enabled_snap) {
1734 if (rcu_cpu_has_callbacks(cpu, NULL)) 1734 if (rcu_cpu_has_callbacks(cpu, NULL))
1735 invoke_rcu_core(); /* force nohz to see update. */ 1735 invoke_rcu_core(); /* force nohz to see update. */
diff --git a/kernel/reboot.c b/kernel/reboot.c
index f813b3474646..662c83fc16b7 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -104,7 +104,7 @@ int unregister_reboot_notifier(struct notifier_block *nb)
104} 104}
105EXPORT_SYMBOL(unregister_reboot_notifier); 105EXPORT_SYMBOL(unregister_reboot_notifier);
106 106
107static void migrate_to_reboot_cpu(void) 107void migrate_to_reboot_cpu(void)
108{ 108{
109 /* The boot cpu is always logical cpu 0 */ 109 /* The boot cpu is always logical cpu 0 */
110 int cpu = reboot_cpu; 110 int cpu = reboot_cpu;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c1808606ee5f..a88f4a485c5e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2660,6 +2660,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
2660 } while (need_resched()); 2660 } while (need_resched());
2661} 2661}
2662EXPORT_SYMBOL(preempt_schedule); 2662EXPORT_SYMBOL(preempt_schedule);
2663#endif /* CONFIG_PREEMPT */
2663 2664
2664/* 2665/*
2665 * this is the entry point to schedule() from kernel preemption 2666 * this is the entry point to schedule() from kernel preemption
@@ -2693,8 +2694,6 @@ asmlinkage void __sched preempt_schedule_irq(void)
2693 exception_exit(prev_state); 2694 exception_exit(prev_state);
2694} 2695}
2695 2696
2696#endif /* CONFIG_PREEMPT */
2697
2698int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, 2697int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
2699 void *key) 2698 void *key)
2700{ 2699{
@@ -4762,7 +4761,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
4762 cpumask_clear_cpu(rq->cpu, old_rd->span); 4761 cpumask_clear_cpu(rq->cpu, old_rd->span);
4763 4762
4764 /* 4763 /*
4765 * If we dont want to free the old_rt yet then 4764 * If we dont want to free the old_rd yet then
4766 * set old_rd to NULL to skip the freeing later 4765 * set old_rd to NULL to skip the freeing later
4767 * in this function: 4766 * in this function:
4768 */ 4767 */
@@ -4903,6 +4902,7 @@ DEFINE_PER_CPU(struct sched_domain *, sd_asym);
4903static void update_top_cache_domain(int cpu) 4902static void update_top_cache_domain(int cpu)
4904{ 4903{
4905 struct sched_domain *sd; 4904 struct sched_domain *sd;
4905 struct sched_domain *busy_sd = NULL;
4906 int id = cpu; 4906 int id = cpu;
4907 int size = 1; 4907 int size = 1;
4908 4908
@@ -4910,8 +4910,9 @@ static void update_top_cache_domain(int cpu)
4910 if (sd) { 4910 if (sd) {
4911 id = cpumask_first(sched_domain_span(sd)); 4911 id = cpumask_first(sched_domain_span(sd));
4912 size = cpumask_weight(sched_domain_span(sd)); 4912 size = cpumask_weight(sched_domain_span(sd));
4913 rcu_assign_pointer(per_cpu(sd_busy, cpu), sd->parent); 4913 busy_sd = sd->parent; /* sd_busy */
4914 } 4914 }
4915 rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
4915 4916
4916 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 4917 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
4917 per_cpu(sd_llc_size, cpu) = size; 4918 per_cpu(sd_llc_size, cpu) = size;
@@ -5112,6 +5113,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5112 * die on a /0 trap. 5113 * die on a /0 trap.
5113 */ 5114 */
5114 sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span); 5115 sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
5116 sg->sgp->power_orig = sg->sgp->power;
5115 5117
5116 /* 5118 /*
5117 * Make sure the first group of this domain contains the 5119 * Make sure the first group of this domain contains the
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e8b652ebe027..c7395d97e4cb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -178,59 +178,61 @@ void sched_init_granularity(void)
178 update_sysctl(); 178 update_sysctl();
179} 179}
180 180
181#if BITS_PER_LONG == 32 181#define WMULT_CONST (~0U)
182# define WMULT_CONST (~0UL)
183#else
184# define WMULT_CONST (1UL << 32)
185#endif
186
187#define WMULT_SHIFT 32 182#define WMULT_SHIFT 32
188 183
189/* 184static void __update_inv_weight(struct load_weight *lw)
190 * Shift right and round: 185{
191 */ 186 unsigned long w;
192#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) 187
188 if (likely(lw->inv_weight))
189 return;
190
191 w = scale_load_down(lw->weight);
192
193 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
194 lw->inv_weight = 1;
195 else if (unlikely(!w))
196 lw->inv_weight = WMULT_CONST;
197 else
198 lw->inv_weight = WMULT_CONST / w;
199}
193 200
194/* 201/*
195 * delta *= weight / lw 202 * delta_exec * weight / lw.weight
203 * OR
204 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
205 *
206 * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case
207 * we're guaranteed shift stays positive because inv_weight is guaranteed to
208 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
209 *
210 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
211 * weight/lw.weight <= 1, and therefore our shift will also be positive.
196 */ 212 */
197static unsigned long 213static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
198calc_delta_mine(unsigned long delta_exec, unsigned long weight,
199 struct load_weight *lw)
200{ 214{
201 u64 tmp; 215 u64 fact = scale_load_down(weight);
216 int shift = WMULT_SHIFT;
202 217
203 /* 218 __update_inv_weight(lw);
204 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
205 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
206 * 2^SCHED_LOAD_RESOLUTION.
207 */
208 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
209 tmp = (u64)delta_exec * scale_load_down(weight);
210 else
211 tmp = (u64)delta_exec;
212 219
213 if (!lw->inv_weight) { 220 if (unlikely(fact >> 32)) {
214 unsigned long w = scale_load_down(lw->weight); 221 while (fact >> 32) {
215 222 fact >>= 1;
216 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) 223 shift--;
217 lw->inv_weight = 1; 224 }
218 else if (unlikely(!w))
219 lw->inv_weight = WMULT_CONST;
220 else
221 lw->inv_weight = WMULT_CONST / w;
222 } 225 }
223 226
224 /* 227 /* hint to use a 32x32->64 mul */
225 * Check whether we'd overflow the 64-bit multiplication: 228 fact = (u64)(u32)fact * lw->inv_weight;
226 */
227 if (unlikely(tmp > WMULT_CONST))
228 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
229 WMULT_SHIFT/2);
230 else
231 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
232 229
233 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); 230 while (fact >> 32) {
231 fact >>= 1;
232 shift--;
233 }
234
235 return mul_u64_u32_shr(delta_exec, fact, shift);
234} 236}
235 237
236 238
@@ -443,7 +445,7 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
443#endif /* CONFIG_FAIR_GROUP_SCHED */ 445#endif /* CONFIG_FAIR_GROUP_SCHED */
444 446
445static __always_inline 447static __always_inline
446void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec); 448void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
447 449
448/************************************************************** 450/**************************************************************
449 * Scheduling class tree data structure manipulation methods: 451 * Scheduling class tree data structure manipulation methods:
@@ -612,11 +614,10 @@ int sched_proc_update_handler(struct ctl_table *table, int write,
612/* 614/*
613 * delta /= w 615 * delta /= w
614 */ 616 */
615static inline unsigned long 617static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
616calc_delta_fair(unsigned long delta, struct sched_entity *se)
617{ 618{
618 if (unlikely(se->load.weight != NICE_0_LOAD)) 619 if (unlikely(se->load.weight != NICE_0_LOAD))
619 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load); 620 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
620 621
621 return delta; 622 return delta;
622} 623}
@@ -665,7 +666,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
665 update_load_add(&lw, se->load.weight); 666 update_load_add(&lw, se->load.weight);
666 load = &lw; 667 load = &lw;
667 } 668 }
668 slice = calc_delta_mine(slice, se->load.weight, load); 669 slice = __calc_delta(slice, se->load.weight, load);
669 } 670 }
670 return slice; 671 return slice;
671} 672}
@@ -703,47 +704,32 @@ void init_task_runnable_average(struct task_struct *p)
703#endif 704#endif
704 705
705/* 706/*
706 * Update the current task's runtime statistics. Skip current tasks that 707 * Update the current task's runtime statistics.
707 * are not in our scheduling class.
708 */ 708 */
709static inline void
710__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
711 unsigned long delta_exec)
712{
713 unsigned long delta_exec_weighted;
714
715 schedstat_set(curr->statistics.exec_max,
716 max((u64)delta_exec, curr->statistics.exec_max));
717
718 curr->sum_exec_runtime += delta_exec;
719 schedstat_add(cfs_rq, exec_clock, delta_exec);
720 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
721
722 curr->vruntime += delta_exec_weighted;
723 update_min_vruntime(cfs_rq);
724}
725
726static void update_curr(struct cfs_rq *cfs_rq) 709static void update_curr(struct cfs_rq *cfs_rq)
727{ 710{
728 struct sched_entity *curr = cfs_rq->curr; 711 struct sched_entity *curr = cfs_rq->curr;
729 u64 now = rq_clock_task(rq_of(cfs_rq)); 712 u64 now = rq_clock_task(rq_of(cfs_rq));
730 unsigned long delta_exec; 713 u64 delta_exec;
731 714
732 if (unlikely(!curr)) 715 if (unlikely(!curr))
733 return; 716 return;
734 717
735 /* 718 delta_exec = now - curr->exec_start;
736 * Get the amount of time the current task was running 719 if (unlikely((s64)delta_exec <= 0))
737 * since the last time we changed load (this cannot
738 * overflow on 32 bits):
739 */
740 delta_exec = (unsigned long)(now - curr->exec_start);
741 if (!delta_exec)
742 return; 720 return;
743 721
744 __update_curr(cfs_rq, curr, delta_exec);
745 curr->exec_start = now; 722 curr->exec_start = now;
746 723
724 schedstat_set(curr->statistics.exec_max,
725 max(delta_exec, curr->statistics.exec_max));
726
727 curr->sum_exec_runtime += delta_exec;
728 schedstat_add(cfs_rq, exec_clock, delta_exec);
729
730 curr->vruntime += calc_delta_fair(delta_exec, curr);
731 update_min_vruntime(cfs_rq);
732
747 if (entity_is_task(curr)) { 733 if (entity_is_task(curr)) {
748 struct task_struct *curtask = task_of(curr); 734 struct task_struct *curtask = task_of(curr);
749 735
@@ -1752,6 +1738,13 @@ void task_numa_work(struct callback_head *work)
1752 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) 1738 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
1753 continue; 1739 continue;
1754 1740
1741 /*
1742 * Skip inaccessible VMAs to avoid any confusion between
1743 * PROT_NONE and NUMA hinting ptes
1744 */
1745 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
1746 continue;
1747
1755 do { 1748 do {
1756 start = max(start, vma->vm_start); 1749 start = max(start, vma->vm_start);
1757 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); 1750 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
@@ -3015,8 +3008,7 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3015 } 3008 }
3016} 3009}
3017 3010
3018static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, 3011static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3019 unsigned long delta_exec)
3020{ 3012{
3021 /* dock delta_exec before expiring quota (as it could span periods) */ 3013 /* dock delta_exec before expiring quota (as it could span periods) */
3022 cfs_rq->runtime_remaining -= delta_exec; 3014 cfs_rq->runtime_remaining -= delta_exec;
@@ -3034,7 +3026,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
3034} 3026}
3035 3027
3036static __always_inline 3028static __always_inline
3037void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) 3029void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3038{ 3030{
3039 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) 3031 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
3040 return; 3032 return;
@@ -3574,8 +3566,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3574 return rq_clock_task(rq_of(cfs_rq)); 3566 return rq_clock_task(rq_of(cfs_rq));
3575} 3567}
3576 3568
3577static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, 3569static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
3578 unsigned long delta_exec) {}
3579static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 3570static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
3580static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} 3571static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
3581static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 3572static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
@@ -5379,10 +5370,31 @@ void update_group_power(struct sched_domain *sd, int cpu)
5379 */ 5370 */
5380 5371
5381 for_each_cpu(cpu, sched_group_cpus(sdg)) { 5372 for_each_cpu(cpu, sched_group_cpus(sdg)) {
5382 struct sched_group *sg = cpu_rq(cpu)->sd->groups; 5373 struct sched_group_power *sgp;
5374 struct rq *rq = cpu_rq(cpu);
5375
5376 /*
5377 * build_sched_domains() -> init_sched_groups_power()
5378 * gets here before we've attached the domains to the
5379 * runqueues.
5380 *
5381 * Use power_of(), which is set irrespective of domains
5382 * in update_cpu_power().
5383 *
5384 * This avoids power/power_orig from being 0 and
5385 * causing divide-by-zero issues on boot.
5386 *
5387 * Runtime updates will correct power_orig.
5388 */
5389 if (unlikely(!rq->sd)) {
5390 power_orig += power_of(cpu);
5391 power += power_of(cpu);
5392 continue;
5393 }
5383 5394
5384 power_orig += sg->sgp->power_orig; 5395 sgp = rq->sd->groups->sgp;
5385 power += sg->sgp->power; 5396 power_orig += sgp->power_orig;
5397 power += sgp->power;
5386 } 5398 }
5387 } else { 5399 } else {
5388 /* 5400 /*
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 7d57275fc396..1c4065575fa2 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -901,6 +901,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
901{ 901{
902 struct rq *rq = rq_of_rt_rq(rt_rq); 902 struct rq *rq = rq_of_rt_rq(rt_rq);
903 903
904#ifdef CONFIG_RT_GROUP_SCHED
905 /*
906 * Change rq's cpupri only if rt_rq is the top queue.
907 */
908 if (&rq->rt != rt_rq)
909 return;
910#endif
904 if (rq->online && prio < prev_prio) 911 if (rq->online && prio < prev_prio)
905 cpupri_set(&rq->rd->cpupri, rq->cpu, prio); 912 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
906} 913}
@@ -910,6 +917,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
910{ 917{
911 struct rq *rq = rq_of_rt_rq(rt_rq); 918 struct rq *rq = rq_of_rt_rq(rt_rq);
912 919
920#ifdef CONFIG_RT_GROUP_SCHED
921 /*
922 * Change rq's cpupri only if rt_rq is the top queue.
923 */
924 if (&rq->rt != rt_rq)
925 return;
926#endif
913 if (rq->online && rt_rq->highest_prio.curr != prev_prio) 927 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
914 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); 928 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
915} 929}
diff --git a/kernel/system_certificates.S b/kernel/system_certificates.S
index 4aef390671cb..3e9868d47535 100644
--- a/kernel/system_certificates.S
+++ b/kernel/system_certificates.S
@@ -3,8 +3,18 @@
3 3
4 __INITRODATA 4 __INITRODATA
5 5
6 .align 8
6 .globl VMLINUX_SYMBOL(system_certificate_list) 7 .globl VMLINUX_SYMBOL(system_certificate_list)
7VMLINUX_SYMBOL(system_certificate_list): 8VMLINUX_SYMBOL(system_certificate_list):
9__cert_list_start:
8 .incbin "kernel/x509_certificate_list" 10 .incbin "kernel/x509_certificate_list"
9 .globl VMLINUX_SYMBOL(system_certificate_list_end) 11__cert_list_end:
10VMLINUX_SYMBOL(system_certificate_list_end): 12
13 .align 8
14 .globl VMLINUX_SYMBOL(system_certificate_list_size)
15VMLINUX_SYMBOL(system_certificate_list_size):
16#ifdef CONFIG_64BIT
17 .quad __cert_list_end - __cert_list_start
18#else
19 .long __cert_list_end - __cert_list_start
20#endif
diff --git a/kernel/system_keyring.c b/kernel/system_keyring.c
index 564dd93430a2..52ebc70263f4 100644
--- a/kernel/system_keyring.c
+++ b/kernel/system_keyring.c
@@ -22,7 +22,7 @@ struct key *system_trusted_keyring;
22EXPORT_SYMBOL_GPL(system_trusted_keyring); 22EXPORT_SYMBOL_GPL(system_trusted_keyring);
23 23
24extern __initconst const u8 system_certificate_list[]; 24extern __initconst const u8 system_certificate_list[];
25extern __initconst const u8 system_certificate_list_end[]; 25extern __initconst const unsigned long system_certificate_list_size;
26 26
27/* 27/*
28 * Load the compiled-in keys 28 * Load the compiled-in keys
@@ -60,8 +60,8 @@ static __init int load_system_certificate_list(void)
60 60
61 pr_notice("Loading compiled-in X.509 certificates\n"); 61 pr_notice("Loading compiled-in X.509 certificates\n");
62 62
63 end = system_certificate_list_end;
64 p = system_certificate_list; 63 p = system_certificate_list;
64 end = p + system_certificate_list_size;
65 while (p < end) { 65 while (p < end) {
66 /* Each cert begins with an ASN.1 SEQUENCE tag and must be more 66 /* Each cert begins with an ASN.1 SEQUENCE tag and must be more
67 * than 256 bytes in size. 67 * than 256 bytes in size.
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 64522ecdfe0e..162b03ab0ad2 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -33,6 +33,21 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
33 */ 33 */
34ktime_t tick_next_period; 34ktime_t tick_next_period;
35ktime_t tick_period; 35ktime_t tick_period;
36
37/*
38 * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR
39 * which is responsible for calling do_timer(), i.e. the timekeeping stuff. This
40 * variable has two functions:
41 *
42 * 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the
43 * timekeeping lock all at once. Only the CPU which is assigned to do the
44 * update is handling it.
45 *
46 * 2) Hand off the duty in the NOHZ idle case by setting the value to
47 * TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks
48 * at it will take over and keep the time keeping alive. The handover
49 * procedure also covers cpu hotplug.
50 */
36int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; 51int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
37 52
38/* 53/*
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3612fc77f834..ea20f7d1ac2c 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -361,8 +361,8 @@ void __init tick_nohz_init(void)
361/* 361/*
362 * NO HZ enabled ? 362 * NO HZ enabled ?
363 */ 363 */
364int tick_nohz_enabled __read_mostly = 1; 364static int tick_nohz_enabled __read_mostly = 1;
365 365int tick_nohz_active __read_mostly;
366/* 366/*
367 * Enable / Disable tickless mode 367 * Enable / Disable tickless mode
368 */ 368 */
@@ -465,7 +465,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
465 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 465 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
466 ktime_t now, idle; 466 ktime_t now, idle;
467 467
468 if (!tick_nohz_enabled) 468 if (!tick_nohz_active)
469 return -1; 469 return -1;
470 470
471 now = ktime_get(); 471 now = ktime_get();
@@ -506,7 +506,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
506 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 506 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
507 ktime_t now, iowait; 507 ktime_t now, iowait;
508 508
509 if (!tick_nohz_enabled) 509 if (!tick_nohz_active)
510 return -1; 510 return -1;
511 511
512 now = ktime_get(); 512 now = ktime_get();
@@ -711,8 +711,10 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
711 return false; 711 return false;
712 } 712 }
713 713
714 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 714 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
715 ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ };
715 return false; 716 return false;
717 }
716 718
717 if (need_resched()) 719 if (need_resched())
718 return false; 720 return false;
@@ -799,11 +801,6 @@ void tick_nohz_idle_enter(void)
799 local_irq_disable(); 801 local_irq_disable();
800 802
801 ts = &__get_cpu_var(tick_cpu_sched); 803 ts = &__get_cpu_var(tick_cpu_sched);
802 /*
803 * set ts->inidle unconditionally. even if the system did not
804 * switch to nohz mode the cpu frequency governers rely on the
805 * update of the idle time accounting in tick_nohz_start_idle().
806 */
807 ts->inidle = 1; 804 ts->inidle = 1;
808 __tick_nohz_idle_enter(ts); 805 __tick_nohz_idle_enter(ts);
809 806
@@ -973,7 +970,7 @@ static void tick_nohz_switch_to_nohz(void)
973 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 970 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
974 ktime_t next; 971 ktime_t next;
975 972
976 if (!tick_nohz_enabled) 973 if (!tick_nohz_active)
977 return; 974 return;
978 975
979 local_irq_disable(); 976 local_irq_disable();
@@ -981,7 +978,7 @@ static void tick_nohz_switch_to_nohz(void)
981 local_irq_enable(); 978 local_irq_enable();
982 return; 979 return;
983 } 980 }
984 981 tick_nohz_active = 1;
985 ts->nohz_mode = NOHZ_MODE_LOWRES; 982 ts->nohz_mode = NOHZ_MODE_LOWRES;
986 983
987 /* 984 /*
@@ -1139,8 +1136,10 @@ void tick_setup_sched_timer(void)
1139 } 1136 }
1140 1137
1141#ifdef CONFIG_NO_HZ_COMMON 1138#ifdef CONFIG_NO_HZ_COMMON
1142 if (tick_nohz_enabled) 1139 if (tick_nohz_enabled) {
1143 ts->nohz_mode = NOHZ_MODE_HIGHRES; 1140 ts->nohz_mode = NOHZ_MODE_HIGHRES;
1141 tick_nohz_active = 1;
1142 }
1144#endif 1143#endif
1145} 1144}
1146#endif /* HIGH_RES_TIMERS */ 1145#endif /* HIGH_RES_TIMERS */
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 3abf53418b67..87b4f00284c9 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1347,7 +1347,7 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)
1347 tk->xtime_nsec -= remainder; 1347 tk->xtime_nsec -= remainder;
1348 tk->xtime_nsec += 1ULL << tk->shift; 1348 tk->xtime_nsec += 1ULL << tk->shift;
1349 tk->ntp_error += remainder << tk->ntp_error_shift; 1349 tk->ntp_error += remainder << tk->ntp_error_shift;
1350 1350 tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift;
1351} 1351}
1352#else 1352#else
1353#define old_vsyscall_fixup(tk) 1353#define old_vsyscall_fixup(tk)
diff --git a/kernel/timer.c b/kernel/timer.c
index 6582b82fa966..accfd241b9e5 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1518,9 +1518,8 @@ static int init_timers_cpu(int cpu)
1518 /* 1518 /*
1519 * The APs use this path later in boot 1519 * The APs use this path later in boot
1520 */ 1520 */
1521 base = kmalloc_node(sizeof(*base), 1521 base = kzalloc_node(sizeof(*base), GFP_KERNEL,
1522 GFP_KERNEL | __GFP_ZERO, 1522 cpu_to_node(cpu));
1523 cpu_to_node(cpu));
1524 if (!base) 1523 if (!base)
1525 return -ENOMEM; 1524 return -ENOMEM;
1526 1525
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 22fa55696760..72a0f81dc5a8 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -367,9 +367,6 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list,
367 367
368static int __register_ftrace_function(struct ftrace_ops *ops) 368static int __register_ftrace_function(struct ftrace_ops *ops)
369{ 369{
370 if (unlikely(ftrace_disabled))
371 return -ENODEV;
372
373 if (FTRACE_WARN_ON(ops == &global_ops)) 370 if (FTRACE_WARN_ON(ops == &global_ops))
374 return -EINVAL; 371 return -EINVAL;
375 372
@@ -428,9 +425,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
428{ 425{
429 int ret; 426 int ret;
430 427
431 if (ftrace_disabled)
432 return -ENODEV;
433
434 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) 428 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
435 return -EBUSY; 429 return -EBUSY;
436 430
@@ -781,7 +775,7 @@ static int ftrace_profile_init(void)
781 int cpu; 775 int cpu;
782 int ret = 0; 776 int ret = 0;
783 777
784 for_each_online_cpu(cpu) { 778 for_each_possible_cpu(cpu) {
785 ret = ftrace_profile_init_cpu(cpu); 779 ret = ftrace_profile_init_cpu(cpu);
786 if (ret) 780 if (ret)
787 break; 781 break;
@@ -2088,10 +2082,15 @@ static void ftrace_startup_enable(int command)
2088static int ftrace_startup(struct ftrace_ops *ops, int command) 2082static int ftrace_startup(struct ftrace_ops *ops, int command)
2089{ 2083{
2090 bool hash_enable = true; 2084 bool hash_enable = true;
2085 int ret;
2091 2086
2092 if (unlikely(ftrace_disabled)) 2087 if (unlikely(ftrace_disabled))
2093 return -ENODEV; 2088 return -ENODEV;
2094 2089
2090 ret = __register_ftrace_function(ops);
2091 if (ret)
2092 return ret;
2093
2095 ftrace_start_up++; 2094 ftrace_start_up++;
2096 command |= FTRACE_UPDATE_CALLS; 2095 command |= FTRACE_UPDATE_CALLS;
2097 2096
@@ -2113,12 +2112,17 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
2113 return 0; 2112 return 0;
2114} 2113}
2115 2114
2116static void ftrace_shutdown(struct ftrace_ops *ops, int command) 2115static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2117{ 2116{
2118 bool hash_disable = true; 2117 bool hash_disable = true;
2118 int ret;
2119 2119
2120 if (unlikely(ftrace_disabled)) 2120 if (unlikely(ftrace_disabled))
2121 return; 2121 return -ENODEV;
2122
2123 ret = __unregister_ftrace_function(ops);
2124 if (ret)
2125 return ret;
2122 2126
2123 ftrace_start_up--; 2127 ftrace_start_up--;
2124 /* 2128 /*
@@ -2153,9 +2157,10 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
2153 } 2157 }
2154 2158
2155 if (!command || !ftrace_enabled) 2159 if (!command || !ftrace_enabled)
2156 return; 2160 return 0;
2157 2161
2158 ftrace_run_update_code(command); 2162 ftrace_run_update_code(command);
2163 return 0;
2159} 2164}
2160 2165
2161static void ftrace_startup_sysctl(void) 2166static void ftrace_startup_sysctl(void)
@@ -3060,16 +3065,13 @@ static void __enable_ftrace_function_probe(void)
3060 if (i == FTRACE_FUNC_HASHSIZE) 3065 if (i == FTRACE_FUNC_HASHSIZE)
3061 return; 3066 return;
3062 3067
3063 ret = __register_ftrace_function(&trace_probe_ops); 3068 ret = ftrace_startup(&trace_probe_ops, 0);
3064 if (!ret)
3065 ret = ftrace_startup(&trace_probe_ops, 0);
3066 3069
3067 ftrace_probe_registered = 1; 3070 ftrace_probe_registered = 1;
3068} 3071}
3069 3072
3070static void __disable_ftrace_function_probe(void) 3073static void __disable_ftrace_function_probe(void)
3071{ 3074{
3072 int ret;
3073 int i; 3075 int i;
3074 3076
3075 if (!ftrace_probe_registered) 3077 if (!ftrace_probe_registered)
@@ -3082,9 +3084,7 @@ static void __disable_ftrace_function_probe(void)
3082 } 3084 }
3083 3085
3084 /* no more funcs left */ 3086 /* no more funcs left */
3085 ret = __unregister_ftrace_function(&trace_probe_ops); 3087 ftrace_shutdown(&trace_probe_ops, 0);
3086 if (!ret)
3087 ftrace_shutdown(&trace_probe_ops, 0);
3088 3088
3089 ftrace_probe_registered = 0; 3089 ftrace_probe_registered = 0;
3090} 3090}
@@ -4366,12 +4366,15 @@ core_initcall(ftrace_nodyn_init);
4366static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } 4366static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4367static inline void ftrace_startup_enable(int command) { } 4367static inline void ftrace_startup_enable(int command) { }
4368/* Keep as macros so we do not need to define the commands */ 4368/* Keep as macros so we do not need to define the commands */
4369# define ftrace_startup(ops, command) \ 4369# define ftrace_startup(ops, command) \
4370 ({ \ 4370 ({ \
4371 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ 4371 int ___ret = __register_ftrace_function(ops); \
4372 0; \ 4372 if (!___ret) \
4373 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
4374 ___ret; \
4373 }) 4375 })
4374# define ftrace_shutdown(ops, command) do { } while (0) 4376# define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops)
4377
4375# define ftrace_startup_sysctl() do { } while (0) 4378# define ftrace_startup_sysctl() do { } while (0)
4376# define ftrace_shutdown_sysctl() do { } while (0) 4379# define ftrace_shutdown_sysctl() do { } while (0)
4377 4380
@@ -4780,9 +4783,7 @@ int register_ftrace_function(struct ftrace_ops *ops)
4780 4783
4781 mutex_lock(&ftrace_lock); 4784 mutex_lock(&ftrace_lock);
4782 4785
4783 ret = __register_ftrace_function(ops); 4786 ret = ftrace_startup(ops, 0);
4784 if (!ret)
4785 ret = ftrace_startup(ops, 0);
4786 4787
4787 mutex_unlock(&ftrace_lock); 4788 mutex_unlock(&ftrace_lock);
4788 4789
@@ -4801,9 +4802,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
4801 int ret; 4802 int ret;
4802 4803
4803 mutex_lock(&ftrace_lock); 4804 mutex_lock(&ftrace_lock);
4804 ret = __unregister_ftrace_function(ops); 4805 ret = ftrace_shutdown(ops, 0);
4805 if (!ret)
4806 ftrace_shutdown(ops, 0);
4807 mutex_unlock(&ftrace_lock); 4806 mutex_unlock(&ftrace_lock);
4808 4807
4809 return ret; 4808 return ret;
@@ -4997,6 +4996,13 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4997 return NOTIFY_DONE; 4996 return NOTIFY_DONE;
4998} 4997}
4999 4998
4999/* Just a place holder for function graph */
5000static struct ftrace_ops fgraph_ops __read_mostly = {
5001 .func = ftrace_stub,
5002 .flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL |
5003 FTRACE_OPS_FL_RECURSION_SAFE,
5004};
5005
5000int register_ftrace_graph(trace_func_graph_ret_t retfunc, 5006int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5001 trace_func_graph_ent_t entryfunc) 5007 trace_func_graph_ent_t entryfunc)
5002{ 5008{
@@ -5023,7 +5029,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5023 ftrace_graph_return = retfunc; 5029 ftrace_graph_return = retfunc;
5024 ftrace_graph_entry = entryfunc; 5030 ftrace_graph_entry = entryfunc;
5025 5031
5026 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); 5032 ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
5027 5033
5028out: 5034out:
5029 mutex_unlock(&ftrace_lock); 5035 mutex_unlock(&ftrace_lock);
@@ -5040,7 +5046,7 @@ void unregister_ftrace_graph(void)
5040 ftrace_graph_active--; 5046 ftrace_graph_active--;
5041 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 5047 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5042 ftrace_graph_entry = ftrace_graph_entry_stub; 5048 ftrace_graph_entry = ftrace_graph_entry_stub;
5043 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); 5049 ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
5044 unregister_pm_notifier(&ftrace_suspend_notifier); 5050 unregister_pm_notifier(&ftrace_suspend_notifier);
5045 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 5051 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5046 5052
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 78e27e3b52ac..e854f420e033 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -24,6 +24,12 @@ static int total_ref_count;
24static int perf_trace_event_perm(struct ftrace_event_call *tp_event, 24static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
25 struct perf_event *p_event) 25 struct perf_event *p_event)
26{ 26{
27 if (tp_event->perf_perm) {
28 int ret = tp_event->perf_perm(tp_event, p_event);
29 if (ret)
30 return ret;
31 }
32
27 /* The ftrace function trace is allowed only for root. */ 33 /* The ftrace function trace is allowed only for root. */
28 if (ftrace_event_is_function(tp_event) && 34 if (ftrace_event_is_function(tp_event) &&
29 perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) 35 perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
@@ -173,7 +179,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event,
173int perf_trace_init(struct perf_event *p_event) 179int perf_trace_init(struct perf_event *p_event)
174{ 180{
175 struct ftrace_event_call *tp_event; 181 struct ftrace_event_call *tp_event;
176 int event_id = p_event->attr.config; 182 u64 event_id = p_event->attr.config;
177 int ret = -EINVAL; 183 int ret = -EINVAL;
178 184
179 mutex_lock(&event_mutex); 185 mutex_lock(&event_mutex);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f919a2e21bf3..a11800ae96de 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2314,6 +2314,9 @@ int event_trace_del_tracer(struct trace_array *tr)
2314 /* Disable any running events */ 2314 /* Disable any running events */
2315 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); 2315 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2316 2316
2317 /* Access to events are within rcu_read_lock_sched() */
2318 synchronize_sched();
2319
2317 down_write(&trace_event_sem); 2320 down_write(&trace_event_sem);
2318 __trace_remove_event_dirs(tr); 2321 __trace_remove_event_dirs(tr);
2319 debugfs_remove_recursive(tr->event_dir); 2322 debugfs_remove_recursive(tr->event_dir);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index e4b6d11bdf78..ea90eb5f6f17 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -431,11 +431,6 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file,
431 if (!tr->sys_refcount_enter) 431 if (!tr->sys_refcount_enter)
432 unregister_trace_sys_enter(ftrace_syscall_enter, tr); 432 unregister_trace_sys_enter(ftrace_syscall_enter, tr);
433 mutex_unlock(&syscall_trace_lock); 433 mutex_unlock(&syscall_trace_lock);
434 /*
435 * Callers expect the event to be completely disabled on
436 * return, so wait for current handlers to finish.
437 */
438 synchronize_sched();
439} 434}
440 435
441static int reg_event_syscall_exit(struct ftrace_event_file *file, 436static int reg_event_syscall_exit(struct ftrace_event_file *file,
@@ -474,11 +469,6 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file,
474 if (!tr->sys_refcount_exit) 469 if (!tr->sys_refcount_exit)
475 unregister_trace_sys_exit(ftrace_syscall_exit, tr); 470 unregister_trace_sys_exit(ftrace_syscall_exit, tr);
476 mutex_unlock(&syscall_trace_lock); 471 mutex_unlock(&syscall_trace_lock);
477 /*
478 * Callers expect the event to be completely disabled on
479 * return, so wait for current handlers to finish.
480 */
481 synchronize_sched();
482} 472}
483 473
484static int __init init_syscall_trace(struct ftrace_event_call *call) 474static int __init init_syscall_trace(struct ftrace_event_call *call)
diff --git a/kernel/user.c b/kernel/user.c
index a3a0dbfda329..c006131beb77 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -51,9 +51,9 @@ struct user_namespace init_user_ns = {
51 .owner = GLOBAL_ROOT_UID, 51 .owner = GLOBAL_ROOT_UID,
52 .group = GLOBAL_ROOT_GID, 52 .group = GLOBAL_ROOT_GID,
53 .proc_inum = PROC_USER_INIT_INO, 53 .proc_inum = PROC_USER_INIT_INO,
54#ifdef CONFIG_KEYS_KERBEROS_CACHE 54#ifdef CONFIG_PERSISTENT_KEYRINGS
55 .krb_cache_register_sem = 55 .persistent_keyring_register_sem =
56 __RWSEM_INITIALIZER(init_user_ns.krb_cache_register_sem), 56 __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
57#endif 57#endif
58}; 58};
59EXPORT_SYMBOL_GPL(init_user_ns); 59EXPORT_SYMBOL_GPL(init_user_ns);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 987293d03ebc..b010eac595d2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -305,6 +305,9 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
305/* I: attributes used when instantiating standard unbound pools on demand */ 305/* I: attributes used when instantiating standard unbound pools on demand */
306static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; 306static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
307 307
308/* I: attributes used when instantiating ordered pools on demand */
309static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
310
308struct workqueue_struct *system_wq __read_mostly; 311struct workqueue_struct *system_wq __read_mostly;
309EXPORT_SYMBOL(system_wq); 312EXPORT_SYMBOL(system_wq);
310struct workqueue_struct *system_highpri_wq __read_mostly; 313struct workqueue_struct *system_highpri_wq __read_mostly;
@@ -518,14 +521,21 @@ static inline void debug_work_activate(struct work_struct *work) { }
518static inline void debug_work_deactivate(struct work_struct *work) { } 521static inline void debug_work_deactivate(struct work_struct *work) { }
519#endif 522#endif
520 523
521/* allocate ID and assign it to @pool */ 524/**
525 * worker_pool_assign_id - allocate ID and assing it to @pool
526 * @pool: the pool pointer of interest
527 *
528 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
529 * successfully, -errno on failure.
530 */
522static int worker_pool_assign_id(struct worker_pool *pool) 531static int worker_pool_assign_id(struct worker_pool *pool)
523{ 532{
524 int ret; 533 int ret;
525 534
526 lockdep_assert_held(&wq_pool_mutex); 535 lockdep_assert_held(&wq_pool_mutex);
527 536
528 ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); 537 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
538 GFP_KERNEL);
529 if (ret >= 0) { 539 if (ret >= 0) {
530 pool->id = ret; 540 pool->id = ret;
531 return 0; 541 return 0;
@@ -1320,7 +1330,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
1320 1330
1321 debug_work_activate(work); 1331 debug_work_activate(work);
1322 1332
1323 /* if dying, only works from the same workqueue are allowed */ 1333 /* if draining, only works from the same workqueue are allowed */
1324 if (unlikely(wq->flags & __WQ_DRAINING) && 1334 if (unlikely(wq->flags & __WQ_DRAINING) &&
1325 WARN_ON_ONCE(!is_chained_work(wq))) 1335 WARN_ON_ONCE(!is_chained_work(wq)))
1326 return; 1336 return;
@@ -1736,16 +1746,17 @@ static struct worker *create_worker(struct worker_pool *pool)
1736 if (IS_ERR(worker->task)) 1746 if (IS_ERR(worker->task))
1737 goto fail; 1747 goto fail;
1738 1748
1749 set_user_nice(worker->task, pool->attrs->nice);
1750
1751 /* prevent userland from meddling with cpumask of workqueue workers */
1752 worker->task->flags |= PF_NO_SETAFFINITY;
1753
1739 /* 1754 /*
1740 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any 1755 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
1741 * online CPUs. It'll be re-applied when any of the CPUs come up. 1756 * online CPUs. It'll be re-applied when any of the CPUs come up.
1742 */ 1757 */
1743 set_user_nice(worker->task, pool->attrs->nice);
1744 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); 1758 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1745 1759
1746 /* prevent userland from meddling with cpumask of workqueue workers */
1747 worker->task->flags |= PF_NO_SETAFFINITY;
1748
1749 /* 1760 /*
1750 * The caller is responsible for ensuring %POOL_DISASSOCIATED 1761 * The caller is responsible for ensuring %POOL_DISASSOCIATED
1751 * remains stable across this function. See the comments above the 1762 * remains stable across this function. See the comments above the
@@ -2840,19 +2851,6 @@ already_gone:
2840 return false; 2851 return false;
2841} 2852}
2842 2853
2843static bool __flush_work(struct work_struct *work)
2844{
2845 struct wq_barrier barr;
2846
2847 if (start_flush_work(work, &barr)) {
2848 wait_for_completion(&barr.done);
2849 destroy_work_on_stack(&barr.work);
2850 return true;
2851 } else {
2852 return false;
2853 }
2854}
2855
2856/** 2854/**
2857 * flush_work - wait for a work to finish executing the last queueing instance 2855 * flush_work - wait for a work to finish executing the last queueing instance
2858 * @work: the work to flush 2856 * @work: the work to flush
@@ -2866,10 +2864,18 @@ static bool __flush_work(struct work_struct *work)
2866 */ 2864 */
2867bool flush_work(struct work_struct *work) 2865bool flush_work(struct work_struct *work)
2868{ 2866{
2867 struct wq_barrier barr;
2868
2869 lock_map_acquire(&work->lockdep_map); 2869 lock_map_acquire(&work->lockdep_map);
2870 lock_map_release(&work->lockdep_map); 2870 lock_map_release(&work->lockdep_map);
2871 2871
2872 return __flush_work(work); 2872 if (start_flush_work(work, &barr)) {
2873 wait_for_completion(&barr.done);
2874 destroy_work_on_stack(&barr.work);
2875 return true;
2876 } else {
2877 return false;
2878 }
2873} 2879}
2874EXPORT_SYMBOL_GPL(flush_work); 2880EXPORT_SYMBOL_GPL(flush_work);
2875 2881
@@ -4106,7 +4112,7 @@ out_unlock:
4106static int alloc_and_link_pwqs(struct workqueue_struct *wq) 4112static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4107{ 4113{
4108 bool highpri = wq->flags & WQ_HIGHPRI; 4114 bool highpri = wq->flags & WQ_HIGHPRI;
4109 int cpu; 4115 int cpu, ret;
4110 4116
4111 if (!(wq->flags & WQ_UNBOUND)) { 4117 if (!(wq->flags & WQ_UNBOUND)) {
4112 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); 4118 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
@@ -4126,6 +4132,13 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4126 mutex_unlock(&wq->mutex); 4132 mutex_unlock(&wq->mutex);
4127 } 4133 }
4128 return 0; 4134 return 0;
4135 } else if (wq->flags & __WQ_ORDERED) {
4136 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4137 /* there should only be single pwq for ordering guarantee */
4138 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4139 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4140 "ordering guarantee broken for workqueue %s\n", wq->name);
4141 return ret;
4129 } else { 4142 } else {
4130 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 4143 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
4131 } 4144 }
@@ -4814,14 +4827,7 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
4814 4827
4815 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 4828 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
4816 schedule_work_on(cpu, &wfc.work); 4829 schedule_work_on(cpu, &wfc.work);
4817 4830 flush_work(&wfc.work);
4818 /*
4819 * The work item is on-stack and can't lead to deadlock through
4820 * flushing. Use __flush_work() to avoid spurious lockdep warnings
4821 * when work_on_cpu()s are nested.
4822 */
4823 __flush_work(&wfc.work);
4824
4825 return wfc.ret; 4831 return wfc.ret;
4826} 4832}
4827EXPORT_SYMBOL_GPL(work_on_cpu); 4833EXPORT_SYMBOL_GPL(work_on_cpu);
@@ -5009,10 +5015,6 @@ static int __init init_workqueues(void)
5009 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 5015 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
5010 int i, cpu; 5016 int i, cpu;
5011 5017
5012 /* make sure we have enough bits for OFFQ pool ID */
5013 BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
5014 WORK_CPU_END * NR_STD_WORKER_POOLS);
5015
5016 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 5018 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
5017 5019
5018 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 5020 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
@@ -5051,13 +5053,23 @@ static int __init init_workqueues(void)
5051 } 5053 }
5052 } 5054 }
5053 5055
5054 /* create default unbound wq attrs */ 5056 /* create default unbound and ordered wq attrs */
5055 for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 5057 for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
5056 struct workqueue_attrs *attrs; 5058 struct workqueue_attrs *attrs;
5057 5059
5058 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); 5060 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
5059 attrs->nice = std_nice[i]; 5061 attrs->nice = std_nice[i];
5060 unbound_std_wq_attrs[i] = attrs; 5062 unbound_std_wq_attrs[i] = attrs;
5063
5064 /*
5065 * An ordered wq should have only one pwq as ordering is
5066 * guaranteed by max_active which is enforced by pwqs.
5067 * Turn off NUMA so that dfl_pwq is used for all nodes.
5068 */
5069 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
5070 attrs->nice = std_nice[i];
5071 attrs->no_numa = true;
5072 ordered_wq_attrs[i] = attrs;
5061 } 5073 }
5062 5074
5063 system_wq = alloc_workqueue("events", 0, 0); 5075 system_wq = alloc_workqueue("events", 0, 0);
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 17edeaf19180..1b6a44f1ec3e 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -759,8 +759,8 @@ all_leaves_cluster_together:
759 pr_devel("all leaves cluster together\n"); 759 pr_devel("all leaves cluster together\n");
760 diff = INT_MAX; 760 diff = INT_MAX;
761 for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) { 761 for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
762 int x = ops->diff_objects(assoc_array_ptr_to_leaf(edit->leaf), 762 int x = ops->diff_objects(assoc_array_ptr_to_leaf(node->slots[i]),
763 assoc_array_ptr_to_leaf(node->slots[i])); 763 index_key);
764 if (x < diff) { 764 if (x < diff) {
765 BUG_ON(x < 0); 765 BUG_ON(x < 0);
766 diff = x; 766 diff = x;
diff --git a/lib/lockref.c b/lib/lockref.c
index d2b123f8456b..f07a40d33871 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -1,5 +1,6 @@
1#include <linux/export.h> 1#include <linux/export.h>
2#include <linux/lockref.h> 2#include <linux/lockref.h>
3#include <linux/mutex.h>
3 4
4#if USE_CMPXCHG_LOCKREF 5#if USE_CMPXCHG_LOCKREF
5 6
@@ -12,14 +13,6 @@
12#endif 13#endif
13 14
14/* 15/*
15 * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
16 * This is useful for architectures with an expensive cpu_relax().
17 */
18#ifndef arch_mutex_cpu_relax
19# define arch_mutex_cpu_relax() cpu_relax()
20#endif
21
22/*
23 * Note that the "cmpxchg()" reloads the "old" value for the 16 * Note that the "cmpxchg()" reloads the "old" value for the
24 * failure case. 17 * failure case.
25 */ 18 */
diff --git a/mm/Kconfig b/mm/Kconfig
index eb69f352401d..723bbe04a0b0 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -543,7 +543,7 @@ config ZSWAP
543 543
544config MEM_SOFT_DIRTY 544config MEM_SOFT_DIRTY
545 bool "Track memory changes" 545 bool "Track memory changes"
546 depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY 546 depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
547 select PROC_PAGE_MONITOR 547 select PROC_PAGE_MONITOR
548 help 548 help
549 This option enables memory changes tracking by introducing a 549 This option enables memory changes tracking by introducing a
diff --git a/mm/compaction.c b/mm/compaction.c
index 805165bcd3dd..f58bcd016f43 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -134,6 +134,10 @@ static void update_pageblock_skip(struct compact_control *cc,
134 bool migrate_scanner) 134 bool migrate_scanner)
135{ 135{
136 struct zone *zone = cc->zone; 136 struct zone *zone = cc->zone;
137
138 if (cc->ignore_skip_hint)
139 return;
140
137 if (!page) 141 if (!page)
138 return; 142 return;
139 143
diff --git a/mm/fremap.c b/mm/fremap.c
index 5bff08147768..bbc4d660221a 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -208,9 +208,10 @@ get_write_lock:
208 if (mapping_cap_account_dirty(mapping)) { 208 if (mapping_cap_account_dirty(mapping)) {
209 unsigned long addr; 209 unsigned long addr;
210 struct file *file = get_file(vma->vm_file); 210 struct file *file = get_file(vma->vm_file);
211 /* mmap_region may free vma; grab the info now */
212 vm_flags = vma->vm_flags;
211 213
212 addr = mmap_region(file, start, size, 214 addr = mmap_region(file, start, size, vm_flags, pgoff);
213 vma->vm_flags, pgoff);
214 fput(file); 215 fput(file);
215 if (IS_ERR_VALUE(addr)) { 216 if (IS_ERR_VALUE(addr)) {
216 err = addr; 217 err = addr;
@@ -218,7 +219,7 @@ get_write_lock:
218 BUG_ON(addr != start); 219 BUG_ON(addr != start);
219 err = 0; 220 err = 0;
220 } 221 }
221 goto out; 222 goto out_freed;
222 } 223 }
223 mutex_lock(&mapping->i_mmap_mutex); 224 mutex_lock(&mapping->i_mmap_mutex);
224 flush_dcache_mmap_lock(mapping); 225 flush_dcache_mmap_lock(mapping);
@@ -253,6 +254,7 @@ get_write_lock:
253out: 254out:
254 if (vma) 255 if (vma)
255 vm_flags = vma->vm_flags; 256 vm_flags = vma->vm_flags;
257out_freed:
256 if (likely(!has_write_lock)) 258 if (likely(!has_write_lock))
257 up_read(&mm->mmap_sem); 259 up_read(&mm->mmap_sem);
258 else 260 else
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index bccd5a628ea6..95d1acb0f3d2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -882,6 +882,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
882 ret = 0; 882 ret = 0;
883 goto out_unlock; 883 goto out_unlock;
884 } 884 }
885
885 if (unlikely(pmd_trans_splitting(pmd))) { 886 if (unlikely(pmd_trans_splitting(pmd))) {
886 /* split huge page running from under us */ 887 /* split huge page running from under us */
887 spin_unlock(src_ptl); 888 spin_unlock(src_ptl);
@@ -1153,7 +1154,7 @@ alloc:
1153 new_page = NULL; 1154 new_page = NULL;
1154 1155
1155 if (unlikely(!new_page)) { 1156 if (unlikely(!new_page)) {
1156 if (is_huge_zero_pmd(orig_pmd)) { 1157 if (!page) {
1157 ret = do_huge_pmd_wp_zero_page_fallback(mm, vma, 1158 ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
1158 address, pmd, orig_pmd, haddr); 1159 address, pmd, orig_pmd, haddr);
1159 } else { 1160 } else {
@@ -1180,7 +1181,7 @@ alloc:
1180 1181
1181 count_vm_event(THP_FAULT_ALLOC); 1182 count_vm_event(THP_FAULT_ALLOC);
1182 1183
1183 if (is_huge_zero_pmd(orig_pmd)) 1184 if (!page)
1184 clear_huge_page(new_page, haddr, HPAGE_PMD_NR); 1185 clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
1185 else 1186 else
1186 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); 1187 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
@@ -1206,7 +1207,7 @@ alloc:
1206 page_add_new_anon_rmap(new_page, vma, haddr); 1207 page_add_new_anon_rmap(new_page, vma, haddr);
1207 set_pmd_at(mm, haddr, pmd, entry); 1208 set_pmd_at(mm, haddr, pmd, entry);
1208 update_mmu_cache_pmd(vma, address, pmd); 1209 update_mmu_cache_pmd(vma, address, pmd);
1209 if (is_huge_zero_pmd(orig_pmd)) { 1210 if (!page) {
1210 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 1211 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
1211 put_huge_zero_page(); 1212 put_huge_zero_page();
1212 } else { 1213 } else {
@@ -1243,6 +1244,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1243 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 1244 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1244 return ERR_PTR(-EFAULT); 1245 return ERR_PTR(-EFAULT);
1245 1246
1247 /* Full NUMA hinting faults to serialise migration in fault paths */
1248 if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
1249 goto out;
1250
1246 page = pmd_page(*pmd); 1251 page = pmd_page(*pmd);
1247 VM_BUG_ON(!PageHead(page)); 1252 VM_BUG_ON(!PageHead(page));
1248 if (flags & FOLL_TOUCH) { 1253 if (flags & FOLL_TOUCH) {
@@ -1295,6 +1300,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1295 if (unlikely(!pmd_same(pmd, *pmdp))) 1300 if (unlikely(!pmd_same(pmd, *pmdp)))
1296 goto out_unlock; 1301 goto out_unlock;
1297 1302
1303 /*
1304 * If there are potential migrations, wait for completion and retry
1305 * without disrupting NUMA hinting information. Do not relock and
1306 * check_same as the page may no longer be mapped.
1307 */
1308 if (unlikely(pmd_trans_migrating(*pmdp))) {
1309 spin_unlock(ptl);
1310 wait_migrate_huge_page(vma->anon_vma, pmdp);
1311 goto out;
1312 }
1313
1298 page = pmd_page(pmd); 1314 page = pmd_page(pmd);
1299 BUG_ON(is_huge_zero_page(page)); 1315 BUG_ON(is_huge_zero_page(page));
1300 page_nid = page_to_nid(page); 1316 page_nid = page_to_nid(page);
@@ -1323,23 +1339,22 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1323 /* If the page was locked, there are no parallel migrations */ 1339 /* If the page was locked, there are no parallel migrations */
1324 if (page_locked) 1340 if (page_locked)
1325 goto clear_pmdnuma; 1341 goto clear_pmdnuma;
1342 }
1326 1343
1327 /* 1344 /* Migration could have started since the pmd_trans_migrating check */
1328 * Otherwise wait for potential migrations and retry. We do 1345 if (!page_locked) {
1329 * relock and check_same as the page may no longer be mapped.
1330 * As the fault is being retried, do not account for it.
1331 */
1332 spin_unlock(ptl); 1346 spin_unlock(ptl);
1333 wait_on_page_locked(page); 1347 wait_on_page_locked(page);
1334 page_nid = -1; 1348 page_nid = -1;
1335 goto out; 1349 goto out;
1336 } 1350 }
1337 1351
1338 /* Page is misplaced, serialise migrations and parallel THP splits */ 1352 /*
1353 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1354 * to serialises splits
1355 */
1339 get_page(page); 1356 get_page(page);
1340 spin_unlock(ptl); 1357 spin_unlock(ptl);
1341 if (!page_locked)
1342 lock_page(page);
1343 anon_vma = page_lock_anon_vma_read(page); 1358 anon_vma = page_lock_anon_vma_read(page);
1344 1359
1345 /* Confirm the PMD did not change while page_table_lock was released */ 1360 /* Confirm the PMD did not change while page_table_lock was released */
@@ -1351,6 +1366,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1351 goto out_unlock; 1366 goto out_unlock;
1352 } 1367 }
1353 1368
1369 /* Bail if we fail to protect against THP splits for any reason */
1370 if (unlikely(!anon_vma)) {
1371 put_page(page);
1372 page_nid = -1;
1373 goto clear_pmdnuma;
1374 }
1375
1354 /* 1376 /*
1355 * Migrate the THP to the requested node, returns with page unlocked 1377 * Migrate the THP to the requested node, returns with page unlocked
1356 * and pmd_numa cleared. 1378 * and pmd_numa cleared.
@@ -1481,8 +1503,18 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1481 pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); 1503 pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
1482 VM_BUG_ON(!pmd_none(*new_pmd)); 1504 VM_BUG_ON(!pmd_none(*new_pmd));
1483 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); 1505 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1484 if (new_ptl != old_ptl) 1506 if (new_ptl != old_ptl) {
1507 pgtable_t pgtable;
1508
1509 /*
1510 * Move preallocated PTE page table if new_pmd is on
1511 * different PMD page table.
1512 */
1513 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1514 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1515
1485 spin_unlock(new_ptl); 1516 spin_unlock(new_ptl);
1517 }
1486 spin_unlock(old_ptl); 1518 spin_unlock(old_ptl);
1487 } 1519 }
1488out: 1520out:
@@ -1507,6 +1539,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1507 ret = 1; 1539 ret = 1;
1508 if (!prot_numa) { 1540 if (!prot_numa) {
1509 entry = pmdp_get_and_clear(mm, addr, pmd); 1541 entry = pmdp_get_and_clear(mm, addr, pmd);
1542 if (pmd_numa(entry))
1543 entry = pmd_mknonnuma(entry);
1510 entry = pmd_modify(entry, newprot); 1544 entry = pmd_modify(entry, newprot);
1511 ret = HPAGE_PMD_NR; 1545 ret = HPAGE_PMD_NR;
1512 BUG_ON(pmd_write(entry)); 1546 BUG_ON(pmd_write(entry));
@@ -1521,7 +1555,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1521 */ 1555 */
1522 if (!is_huge_zero_page(page) && 1556 if (!is_huge_zero_page(page) &&
1523 !pmd_numa(*pmd)) { 1557 !pmd_numa(*pmd)) {
1524 entry = pmdp_get_and_clear(mm, addr, pmd); 1558 entry = *pmd;
1525 entry = pmd_mknuma(entry); 1559 entry = pmd_mknuma(entry);
1526 ret = HPAGE_PMD_NR; 1560 ret = HPAGE_PMD_NR;
1527 } 1561 }
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f1a0ae6e11b8..7f1a356153c0 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -338,7 +338,7 @@ struct mem_cgroup {
338static size_t memcg_size(void) 338static size_t memcg_size(void)
339{ 339{
340 return sizeof(struct mem_cgroup) + 340 return sizeof(struct mem_cgroup) +
341 nr_node_ids * sizeof(struct mem_cgroup_per_node); 341 nr_node_ids * sizeof(struct mem_cgroup_per_node *);
342} 342}
343 343
344/* internal only representation about the status of kmem accounting. */ 344/* internal only representation about the status of kmem accounting. */
@@ -2694,7 +2694,10 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
2694 goto bypass; 2694 goto bypass;
2695 2695
2696 if (unlikely(task_in_memcg_oom(current))) 2696 if (unlikely(task_in_memcg_oom(current)))
2697 goto bypass; 2697 goto nomem;
2698
2699 if (gfp_mask & __GFP_NOFAIL)
2700 oom = false;
2698 2701
2699 /* 2702 /*
2700 * We always charge the cgroup the mm_struct belongs to. 2703 * We always charge the cgroup the mm_struct belongs to.
@@ -6352,6 +6355,42 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
6352static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 6355static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
6353{ 6356{
6354 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6357 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6358 /*
6359 * XXX: css_offline() would be where we should reparent all
6360 * memory to prepare the cgroup for destruction. However,
6361 * memcg does not do css_tryget() and res_counter charging
6362 * under the same RCU lock region, which means that charging
6363 * could race with offlining. Offlining only happens to
6364 * cgroups with no tasks in them but charges can show up
6365 * without any tasks from the swapin path when the target
6366 * memcg is looked up from the swapout record and not from the
6367 * current task as it usually is. A race like this can leak
6368 * charges and put pages with stale cgroup pointers into
6369 * circulation:
6370 *
6371 * #0 #1
6372 * lookup_swap_cgroup_id()
6373 * rcu_read_lock()
6374 * mem_cgroup_lookup()
6375 * css_tryget()
6376 * rcu_read_unlock()
6377 * disable css_tryget()
6378 * call_rcu()
6379 * offline_css()
6380 * reparent_charges()
6381 * res_counter_charge()
6382 * css_put()
6383 * css_free()
6384 * pc->mem_cgroup = dead memcg
6385 * add page to lru
6386 *
6387 * The bulk of the charges are still moved in offline_css() to
6388 * avoid pinning a lot of pages in case a long-term reference
6389 * like a swapout record is deferring the css_free() to long
6390 * after offlining. But this makes sure we catch any charges
6391 * made after offlining:
6392 */
6393 mem_cgroup_reparent_charges(memcg);
6355 6394
6356 memcg_destroy_kmem(memcg); 6395 memcg_destroy_kmem(memcg);
6357 __mem_cgroup_free(memcg); 6396 __mem_cgroup_free(memcg);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index b7c171602ba1..fabe55046c1d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -938,6 +938,16 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
938 BUG_ON(!PageHWPoison(p)); 938 BUG_ON(!PageHWPoison(p));
939 return SWAP_FAIL; 939 return SWAP_FAIL;
940 } 940 }
941 /*
942 * We pinned the head page for hwpoison handling,
943 * now we split the thp and we are interested in
944 * the hwpoisoned raw page, so move the refcount
945 * to it.
946 */
947 if (hpage != p) {
948 put_page(hpage);
949 get_page(p);
950 }
941 /* THP is split, so ppage should be the real poisoned page. */ 951 /* THP is split, so ppage should be the real poisoned page. */
942 ppage = p; 952 ppage = p;
943 } 953 }
@@ -1505,10 +1515,16 @@ static int soft_offline_huge_page(struct page *page, int flags)
1505 if (ret > 0) 1515 if (ret > 0)
1506 ret = -EIO; 1516 ret = -EIO;
1507 } else { 1517 } else {
1508 set_page_hwpoison_huge_page(hpage); 1518 /* overcommit hugetlb page will be freed to buddy */
1509 dequeue_hwpoisoned_huge_page(hpage); 1519 if (PageHuge(page)) {
1510 atomic_long_add(1 << compound_order(hpage), 1520 set_page_hwpoison_huge_page(hpage);
1511 &num_poisoned_pages); 1521 dequeue_hwpoisoned_huge_page(hpage);
1522 atomic_long_add(1 << compound_order(hpage),
1523 &num_poisoned_pages);
1524 } else {
1525 SetPageHWPoison(page);
1526 atomic_long_inc(&num_poisoned_pages);
1527 }
1512 } 1528 }
1513 return ret; 1529 return ret;
1514} 1530}
diff --git a/mm/memory.c b/mm/memory.c
index 5d9025f3b3e1..6768ce9e57d2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4271,7 +4271,7 @@ void copy_user_huge_page(struct page *dst, struct page *src,
4271} 4271}
4272#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 4272#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
4273 4273
4274#if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS 4274#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
4275bool ptlock_alloc(struct page *page) 4275bool ptlock_alloc(struct page *page)
4276{ 4276{
4277 spinlock_t *ptl; 4277 spinlock_t *ptl;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index eca4a3129129..0cd2c4d4e270 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1197,14 +1197,16 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int *
1197 break; 1197 break;
1198 vma = vma->vm_next; 1198 vma = vma->vm_next;
1199 } 1199 }
1200
1201 if (PageHuge(page)) {
1202 if (vma)
1203 return alloc_huge_page_noerr(vma, address, 1);
1204 else
1205 return NULL;
1206 }
1200 /* 1207 /*
1201 * queue_pages_range() confirms that @page belongs to some vma, 1208 * if !vma, alloc_page_vma() will use task or system default policy
1202 * so vma shouldn't be NULL.
1203 */ 1209 */
1204 BUG_ON(!vma);
1205
1206 if (PageHuge(page))
1207 return alloc_huge_page_noerr(vma, address, 1);
1208 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 1210 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1209} 1211}
1210#else 1212#else
@@ -1318,7 +1320,7 @@ static long do_mbind(unsigned long start, unsigned long len,
1318 if (nr_failed && (flags & MPOL_MF_STRICT)) 1320 if (nr_failed && (flags & MPOL_MF_STRICT))
1319 err = -EIO; 1321 err = -EIO;
1320 } else 1322 } else
1321 putback_lru_pages(&pagelist); 1323 putback_movable_pages(&pagelist);
1322 1324
1323 up_write(&mm->mmap_sem); 1325 up_write(&mm->mmap_sem);
1324 mpol_out: 1326 mpol_out:
diff --git a/mm/migrate.c b/mm/migrate.c
index bb940045fe85..9194375b2307 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -36,6 +36,7 @@
36#include <linux/hugetlb_cgroup.h> 36#include <linux/hugetlb_cgroup.h>
37#include <linux/gfp.h> 37#include <linux/gfp.h>
38#include <linux/balloon_compaction.h> 38#include <linux/balloon_compaction.h>
39#include <linux/mmu_notifier.h>
39 40
40#include <asm/tlbflush.h> 41#include <asm/tlbflush.h>
41 42
@@ -316,14 +317,15 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
316 */ 317 */
317int migrate_page_move_mapping(struct address_space *mapping, 318int migrate_page_move_mapping(struct address_space *mapping,
318 struct page *newpage, struct page *page, 319 struct page *newpage, struct page *page,
319 struct buffer_head *head, enum migrate_mode mode) 320 struct buffer_head *head, enum migrate_mode mode,
321 int extra_count)
320{ 322{
321 int expected_count = 0; 323 int expected_count = 1 + extra_count;
322 void **pslot; 324 void **pslot;
323 325
324 if (!mapping) { 326 if (!mapping) {
325 /* Anonymous page without mapping */ 327 /* Anonymous page without mapping */
326 if (page_count(page) != 1) 328 if (page_count(page) != expected_count)
327 return -EAGAIN; 329 return -EAGAIN;
328 return MIGRATEPAGE_SUCCESS; 330 return MIGRATEPAGE_SUCCESS;
329 } 331 }
@@ -333,7 +335,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
333 pslot = radix_tree_lookup_slot(&mapping->page_tree, 335 pslot = radix_tree_lookup_slot(&mapping->page_tree,
334 page_index(page)); 336 page_index(page));
335 337
336 expected_count = 2 + page_has_private(page); 338 expected_count += 1 + page_has_private(page);
337 if (page_count(page) != expected_count || 339 if (page_count(page) != expected_count ||
338 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 340 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
339 spin_unlock_irq(&mapping->tree_lock); 341 spin_unlock_irq(&mapping->tree_lock);
@@ -583,7 +585,7 @@ int migrate_page(struct address_space *mapping,
583 585
584 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 586 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
585 587
586 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode); 588 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
587 589
588 if (rc != MIGRATEPAGE_SUCCESS) 590 if (rc != MIGRATEPAGE_SUCCESS)
589 return rc; 591 return rc;
@@ -610,7 +612,7 @@ int buffer_migrate_page(struct address_space *mapping,
610 612
611 head = page_buffers(page); 613 head = page_buffers(page);
612 614
613 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode); 615 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
614 616
615 if (rc != MIGRATEPAGE_SUCCESS) 617 if (rc != MIGRATEPAGE_SUCCESS)
616 return rc; 618 return rc;
@@ -1654,6 +1656,18 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1654 return 1; 1656 return 1;
1655} 1657}
1656 1658
1659bool pmd_trans_migrating(pmd_t pmd)
1660{
1661 struct page *page = pmd_page(pmd);
1662 return PageLocked(page);
1663}
1664
1665void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
1666{
1667 struct page *page = pmd_page(*pmd);
1668 wait_on_page_locked(page);
1669}
1670
1657/* 1671/*
1658 * Attempt to migrate a misplaced page to the specified destination 1672 * Attempt to migrate a misplaced page to the specified destination
1659 * node. Caller is expected to have an elevated reference count on 1673 * node. Caller is expected to have an elevated reference count on
@@ -1716,12 +1730,14 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1716 struct page *page, int node) 1730 struct page *page, int node)
1717{ 1731{
1718 spinlock_t *ptl; 1732 spinlock_t *ptl;
1719 unsigned long haddr = address & HPAGE_PMD_MASK;
1720 pg_data_t *pgdat = NODE_DATA(node); 1733 pg_data_t *pgdat = NODE_DATA(node);
1721 int isolated = 0; 1734 int isolated = 0;
1722 struct page *new_page = NULL; 1735 struct page *new_page = NULL;
1723 struct mem_cgroup *memcg = NULL; 1736 struct mem_cgroup *memcg = NULL;
1724 int page_lru = page_is_file_cache(page); 1737 int page_lru = page_is_file_cache(page);
1738 unsigned long mmun_start = address & HPAGE_PMD_MASK;
1739 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
1740 pmd_t orig_entry;
1725 1741
1726 /* 1742 /*
1727 * Rate-limit the amount of data that is being migrated to a node. 1743 * Rate-limit the amount of data that is being migrated to a node.
@@ -1744,6 +1760,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1744 goto out_fail; 1760 goto out_fail;
1745 } 1761 }
1746 1762
1763 if (mm_tlb_flush_pending(mm))
1764 flush_tlb_range(vma, mmun_start, mmun_end);
1765
1747 /* Prepare a page as a migration target */ 1766 /* Prepare a page as a migration target */
1748 __set_page_locked(new_page); 1767 __set_page_locked(new_page);
1749 SetPageSwapBacked(new_page); 1768 SetPageSwapBacked(new_page);
@@ -1755,9 +1774,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1755 WARN_ON(PageLRU(new_page)); 1774 WARN_ON(PageLRU(new_page));
1756 1775
1757 /* Recheck the target PMD */ 1776 /* Recheck the target PMD */
1777 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1758 ptl = pmd_lock(mm, pmd); 1778 ptl = pmd_lock(mm, pmd);
1759 if (unlikely(!pmd_same(*pmd, entry))) { 1779 if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
1780fail_putback:
1760 spin_unlock(ptl); 1781 spin_unlock(ptl);
1782 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1761 1783
1762 /* Reverse changes made by migrate_page_copy() */ 1784 /* Reverse changes made by migrate_page_copy() */
1763 if (TestClearPageActive(new_page)) 1785 if (TestClearPageActive(new_page))
@@ -1774,7 +1796,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1774 putback_lru_page(page); 1796 putback_lru_page(page);
1775 mod_zone_page_state(page_zone(page), 1797 mod_zone_page_state(page_zone(page),
1776 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); 1798 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
1777 goto out_fail; 1799
1800 goto out_unlock;
1778 } 1801 }
1779 1802
1780 /* 1803 /*
@@ -1786,16 +1809,35 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1786 */ 1809 */
1787 mem_cgroup_prepare_migration(page, new_page, &memcg); 1810 mem_cgroup_prepare_migration(page, new_page, &memcg);
1788 1811
1812 orig_entry = *pmd;
1789 entry = mk_pmd(new_page, vma->vm_page_prot); 1813 entry = mk_pmd(new_page, vma->vm_page_prot);
1790 entry = pmd_mknonnuma(entry);
1791 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1792 entry = pmd_mkhuge(entry); 1814 entry = pmd_mkhuge(entry);
1815 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1793 1816
1794 pmdp_clear_flush(vma, haddr, pmd); 1817 /*
1795 set_pmd_at(mm, haddr, pmd, entry); 1818 * Clear the old entry under pagetable lock and establish the new PTE.
1796 page_add_new_anon_rmap(new_page, vma, haddr); 1819 * Any parallel GUP will either observe the old page blocking on the
1820 * page lock, block on the page table lock or observe the new page.
1821 * The SetPageUptodate on the new page and page_add_new_anon_rmap
1822 * guarantee the copy is visible before the pagetable update.
1823 */
1824 flush_cache_range(vma, mmun_start, mmun_end);
1825 page_add_new_anon_rmap(new_page, vma, mmun_start);
1826 pmdp_clear_flush(vma, mmun_start, pmd);
1827 set_pmd_at(mm, mmun_start, pmd, entry);
1828 flush_tlb_range(vma, mmun_start, mmun_end);
1797 update_mmu_cache_pmd(vma, address, &entry); 1829 update_mmu_cache_pmd(vma, address, &entry);
1830
1831 if (page_count(page) != 2) {
1832 set_pmd_at(mm, mmun_start, pmd, orig_entry);
1833 flush_tlb_range(vma, mmun_start, mmun_end);
1834 update_mmu_cache_pmd(vma, address, &entry);
1835 page_remove_rmap(new_page);
1836 goto fail_putback;
1837 }
1838
1798 page_remove_rmap(page); 1839 page_remove_rmap(page);
1840
1799 /* 1841 /*
1800 * Finish the charge transaction under the page table lock to 1842 * Finish the charge transaction under the page table lock to
1801 * prevent split_huge_page() from dividing up the charge 1843 * prevent split_huge_page() from dividing up the charge
@@ -1803,6 +1845,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1803 */ 1845 */
1804 mem_cgroup_end_migration(memcg, page, new_page, true); 1846 mem_cgroup_end_migration(memcg, page, new_page, true);
1805 spin_unlock(ptl); 1847 spin_unlock(ptl);
1848 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1806 1849
1807 unlock_page(new_page); 1850 unlock_page(new_page);
1808 unlock_page(page); 1851 unlock_page(page);
@@ -1820,10 +1863,15 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1820out_fail: 1863out_fail:
1821 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); 1864 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
1822out_dropref: 1865out_dropref:
1823 entry = pmd_mknonnuma(entry); 1866 ptl = pmd_lock(mm, pmd);
1824 set_pmd_at(mm, haddr, pmd, entry); 1867 if (pmd_same(*pmd, entry)) {
1825 update_mmu_cache_pmd(vma, address, &entry); 1868 entry = pmd_mknonnuma(entry);
1869 set_pmd_at(mm, mmun_start, pmd, entry);
1870 update_mmu_cache_pmd(vma, address, &entry);
1871 }
1872 spin_unlock(ptl);
1826 1873
1874out_unlock:
1827 unlock_page(page); 1875 unlock_page(page);
1828 put_page(page); 1876 put_page(page);
1829 return 0; 1877 return 0;
diff --git a/mm/mlock.c b/mm/mlock.c
index d480cd6fc475..192e6eebe4f2 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -133,7 +133,10 @@ static void __munlock_isolation_failed(struct page *page)
133 133
134/** 134/**
135 * munlock_vma_page - munlock a vma page 135 * munlock_vma_page - munlock a vma page
136 * @page - page to be unlocked 136 * @page - page to be unlocked, either a normal page or THP page head
137 *
138 * returns the size of the page as a page mask (0 for normal page,
139 * HPAGE_PMD_NR - 1 for THP head page)
137 * 140 *
138 * called from munlock()/munmap() path with page supposedly on the LRU. 141 * called from munlock()/munmap() path with page supposedly on the LRU.
139 * When we munlock a page, because the vma where we found the page is being 142 * When we munlock a page, because the vma where we found the page is being
@@ -148,21 +151,30 @@ static void __munlock_isolation_failed(struct page *page)
148 */ 151 */
149unsigned int munlock_vma_page(struct page *page) 152unsigned int munlock_vma_page(struct page *page)
150{ 153{
151 unsigned int page_mask = 0; 154 unsigned int nr_pages;
152 155
153 BUG_ON(!PageLocked(page)); 156 BUG_ON(!PageLocked(page));
154 157
155 if (TestClearPageMlocked(page)) { 158 if (TestClearPageMlocked(page)) {
156 unsigned int nr_pages = hpage_nr_pages(page); 159 nr_pages = hpage_nr_pages(page);
157 mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); 160 mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
158 page_mask = nr_pages - 1;
159 if (!isolate_lru_page(page)) 161 if (!isolate_lru_page(page))
160 __munlock_isolated_page(page); 162 __munlock_isolated_page(page);
161 else 163 else
162 __munlock_isolation_failed(page); 164 __munlock_isolation_failed(page);
165 } else {
166 nr_pages = hpage_nr_pages(page);
163 } 167 }
164 168
165 return page_mask; 169 /*
170 * Regardless of the original PageMlocked flag, we determine nr_pages
171 * after touching the flag. This leaves a possible race with a THP page
172 * split, such that a whole THP page was munlocked, but nr_pages == 1.
173 * Returning a smaller mask due to that is OK, the worst that can
174 * happen is subsequent useless scanning of the former tail pages.
175 * The NR_MLOCK accounting can however become broken.
176 */
177 return nr_pages - 1;
166} 178}
167 179
168/** 180/**
@@ -286,10 +298,12 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
286{ 298{
287 int i; 299 int i;
288 int nr = pagevec_count(pvec); 300 int nr = pagevec_count(pvec);
289 int delta_munlocked = -nr; 301 int delta_munlocked;
290 struct pagevec pvec_putback; 302 struct pagevec pvec_putback;
291 int pgrescued = 0; 303 int pgrescued = 0;
292 304
305 pagevec_init(&pvec_putback, 0);
306
293 /* Phase 1: page isolation */ 307 /* Phase 1: page isolation */
294 spin_lock_irq(&zone->lru_lock); 308 spin_lock_irq(&zone->lru_lock);
295 for (i = 0; i < nr; i++) { 309 for (i = 0; i < nr; i++) {
@@ -318,18 +332,21 @@ skip_munlock:
318 /* 332 /*
319 * We won't be munlocking this page in the next phase 333 * We won't be munlocking this page in the next phase
320 * but we still need to release the follow_page_mask() 334 * but we still need to release the follow_page_mask()
321 * pin. 335 * pin. We cannot do it under lru_lock however. If it's
336 * the last pin, __page_cache_release would deadlock.
322 */ 337 */
338 pagevec_add(&pvec_putback, pvec->pages[i]);
323 pvec->pages[i] = NULL; 339 pvec->pages[i] = NULL;
324 put_page(page);
325 delta_munlocked++;
326 } 340 }
327 } 341 }
342 delta_munlocked = -nr + pagevec_count(&pvec_putback);
328 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); 343 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
329 spin_unlock_irq(&zone->lru_lock); 344 spin_unlock_irq(&zone->lru_lock);
330 345
346 /* Now we can release pins of pages that we are not munlocking */
347 pagevec_release(&pvec_putback);
348
331 /* Phase 2: page munlock */ 349 /* Phase 2: page munlock */
332 pagevec_init(&pvec_putback, 0);
333 for (i = 0; i < nr; i++) { 350 for (i = 0; i < nr; i++) {
334 struct page *page = pvec->pages[i]; 351 struct page *page = pvec->pages[i];
335 352
@@ -440,7 +457,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
440 457
441 while (start < end) { 458 while (start < end) {
442 struct page *page = NULL; 459 struct page *page = NULL;
443 unsigned int page_mask, page_increm; 460 unsigned int page_mask;
461 unsigned long page_increm;
444 struct pagevec pvec; 462 struct pagevec pvec;
445 struct zone *zone; 463 struct zone *zone;
446 int zoneid; 464 int zoneid;
@@ -490,7 +508,9 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
490 goto next; 508 goto next;
491 } 509 }
492 } 510 }
493 page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); 511 /* It's a bug to munlock in the middle of a THP page */
512 VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);
513 page_increm = 1 + page_mask;
494 start += page_increm * PAGE_SIZE; 514 start += page_increm * PAGE_SIZE;
495next: 515next:
496 cond_resched(); 516 cond_resched();
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 26667971c824..bb53a6591aea 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -52,17 +52,21 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
52 pte_t ptent; 52 pte_t ptent;
53 bool updated = false; 53 bool updated = false;
54 54
55 ptent = ptep_modify_prot_start(mm, addr, pte);
56 if (!prot_numa) { 55 if (!prot_numa) {
56 ptent = ptep_modify_prot_start(mm, addr, pte);
57 if (pte_numa(ptent))
58 ptent = pte_mknonnuma(ptent);
57 ptent = pte_modify(ptent, newprot); 59 ptent = pte_modify(ptent, newprot);
58 updated = true; 60 updated = true;
59 } else { 61 } else {
60 struct page *page; 62 struct page *page;
61 63
64 ptent = *pte;
62 page = vm_normal_page(vma, addr, oldpte); 65 page = vm_normal_page(vma, addr, oldpte);
63 if (page) { 66 if (page) {
64 if (!pte_numa(oldpte)) { 67 if (!pte_numa(oldpte)) {
65 ptent = pte_mknuma(ptent); 68 ptent = pte_mknuma(ptent);
69 set_pte_at(mm, addr, pte, ptent);
66 updated = true; 70 updated = true;
67 } 71 }
68 } 72 }
@@ -79,7 +83,10 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
79 83
80 if (updated) 84 if (updated)
81 pages++; 85 pages++;
82 ptep_modify_prot_commit(mm, addr, pte, ptent); 86
87 /* Only !prot_numa always clears the pte */
88 if (!prot_numa)
89 ptep_modify_prot_commit(mm, addr, pte, ptent);
83 } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { 90 } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
84 swp_entry_t entry = pte_to_swp_entry(oldpte); 91 swp_entry_t entry = pte_to_swp_entry(oldpte);
85 92
@@ -181,6 +188,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
181 BUG_ON(addr >= end); 188 BUG_ON(addr >= end);
182 pgd = pgd_offset(mm, addr); 189 pgd = pgd_offset(mm, addr);
183 flush_cache_range(vma, addr, end); 190 flush_cache_range(vma, addr, end);
191 set_tlb_flush_pending(mm);
184 do { 192 do {
185 next = pgd_addr_end(addr, end); 193 next = pgd_addr_end(addr, end);
186 if (pgd_none_or_clear_bad(pgd)) 194 if (pgd_none_or_clear_bad(pgd))
@@ -192,6 +200,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
192 /* Only flush the TLB if we actually modified any entries: */ 200 /* Only flush the TLB if we actually modified any entries: */
193 if (pages) 201 if (pages)
194 flush_tlb_range(vma, start, end); 202 flush_tlb_range(vma, start, end);
203 clear_tlb_flush_pending(mm);
195 204
196 return pages; 205 return pages;
197} 206}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 580a5f075ed0..5248fe070aa4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1816,7 +1816,7 @@ static void zlc_clear_zones_full(struct zonelist *zonelist)
1816 1816
1817static bool zone_local(struct zone *local_zone, struct zone *zone) 1817static bool zone_local(struct zone *local_zone, struct zone *zone)
1818{ 1818{
1819 return node_distance(local_zone->node, zone->node) == LOCAL_DISTANCE; 1819 return local_zone->node == zone->node;
1820} 1820}
1821 1821
1822static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 1822static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
@@ -1913,18 +1913,17 @@ zonelist_scan:
1913 * page was allocated in should have no effect on the 1913 * page was allocated in should have no effect on the
1914 * time the page has in memory before being reclaimed. 1914 * time the page has in memory before being reclaimed.
1915 * 1915 *
1916 * When zone_reclaim_mode is enabled, try to stay in 1916 * Try to stay in local zones in the fastpath. If
1917 * local zones in the fastpath. If that fails, the 1917 * that fails, the slowpath is entered, which will do
1918 * slowpath is entered, which will do another pass 1918 * another pass starting with the local zones, but
1919 * starting with the local zones, but ultimately fall 1919 * ultimately fall back to remote zones that do not
1920 * back to remote zones that do not partake in the 1920 * partake in the fairness round-robin cycle of this
1921 * fairness round-robin cycle of this zonelist. 1921 * zonelist.
1922 */ 1922 */
1923 if (alloc_flags & ALLOC_WMARK_LOW) { 1923 if (alloc_flags & ALLOC_WMARK_LOW) {
1924 if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) 1924 if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
1925 continue; 1925 continue;
1926 if (zone_reclaim_mode && 1926 if (!zone_local(preferred_zone, zone))
1927 !zone_local(preferred_zone, zone))
1928 continue; 1927 continue;
1929 } 1928 }
1930 /* 1929 /*
@@ -2390,7 +2389,7 @@ static void prepare_slowpath(gfp_t gfp_mask, unsigned int order,
2390 * thrash fairness information for zones that are not 2389 * thrash fairness information for zones that are not
2391 * actually part of this zonelist's round-robin cycle. 2390 * actually part of this zonelist's round-robin cycle.
2392 */ 2391 */
2393 if (zone_reclaim_mode && !zone_local(preferred_zone, zone)) 2392 if (!zone_local(preferred_zone, zone))
2394 continue; 2393 continue;
2395 mod_zone_page_state(zone, NR_ALLOC_BATCH, 2394 mod_zone_page_state(zone, NR_ALLOC_BATCH,
2396 high_wmark_pages(zone) - 2395 high_wmark_pages(zone) -
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index cbb38545d9d6..a8b919925934 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -110,9 +110,10 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
110pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, 110pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
111 pte_t *ptep) 111 pte_t *ptep)
112{ 112{
113 struct mm_struct *mm = (vma)->vm_mm;
113 pte_t pte; 114 pte_t pte;
114 pte = ptep_get_and_clear((vma)->vm_mm, address, ptep); 115 pte = ptep_get_and_clear(mm, address, ptep);
115 if (pte_accessible(pte)) 116 if (pte_accessible(mm, pte))
116 flush_tlb_page(vma, address); 117 flush_tlb_page(vma, address);
117 return pte; 118 return pte;
118} 119}
@@ -191,6 +192,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
191void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 192void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
192 pmd_t *pmdp) 193 pmd_t *pmdp)
193{ 194{
195 pmd_t entry = *pmdp;
196 if (pmd_numa(entry))
197 entry = pmd_mknonnuma(entry);
194 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp)); 198 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
195 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 199 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
196} 200}
diff --git a/mm/rmap.c b/mm/rmap.c
index 55c8b8dc9ffb..068522d8502a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -600,7 +600,11 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
600 spinlock_t *ptl; 600 spinlock_t *ptl;
601 601
602 if (unlikely(PageHuge(page))) { 602 if (unlikely(PageHuge(page))) {
603 /* when pud is not present, pte will be NULL */
603 pte = huge_pte_offset(mm, address); 604 pte = huge_pte_offset(mm, address);
605 if (!pte)
606 return NULL;
607
604 ptl = huge_pte_lockptr(page_hstate(page), mm, pte); 608 ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
605 goto check; 609 goto check;
606 } 610 }
diff --git a/mm/shmem.c b/mm/shmem.c
index 8297623fcaed..902a14842b74 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2918,13 +2918,8 @@ static struct dentry_operations anon_ops = {
2918 .d_dname = simple_dname 2918 .d_dname = simple_dname
2919}; 2919};
2920 2920
2921/** 2921static struct file *__shmem_file_setup(const char *name, loff_t size,
2922 * shmem_file_setup - get an unlinked file living in tmpfs 2922 unsigned long flags, unsigned int i_flags)
2923 * @name: name for dentry (to be seen in /proc/<pid>/maps
2924 * @size: size to be set for the file
2925 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2926 */
2927struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
2928{ 2923{
2929 struct file *res; 2924 struct file *res;
2930 struct inode *inode; 2925 struct inode *inode;
@@ -2957,6 +2952,7 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
2957 if (!inode) 2952 if (!inode)
2958 goto put_dentry; 2953 goto put_dentry;
2959 2954
2955 inode->i_flags |= i_flags;
2960 d_instantiate(path.dentry, inode); 2956 d_instantiate(path.dentry, inode);
2961 inode->i_size = size; 2957 inode->i_size = size;
2962 clear_nlink(inode); /* It is unlinked */ 2958 clear_nlink(inode); /* It is unlinked */
@@ -2977,6 +2973,32 @@ put_memory:
2977 shmem_unacct_size(flags, size); 2973 shmem_unacct_size(flags, size);
2978 return res; 2974 return res;
2979} 2975}
2976
2977/**
2978 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
2979 * kernel internal. There will be NO LSM permission checks against the
2980 * underlying inode. So users of this interface must do LSM checks at a
2981 * higher layer. The one user is the big_key implementation. LSM checks
2982 * are provided at the key level rather than the inode level.
2983 * @name: name for dentry (to be seen in /proc/<pid>/maps
2984 * @size: size to be set for the file
2985 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2986 */
2987struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
2988{
2989 return __shmem_file_setup(name, size, flags, S_PRIVATE);
2990}
2991
2992/**
2993 * shmem_file_setup - get an unlinked file living in tmpfs
2994 * @name: name for dentry (to be seen in /proc/<pid>/maps
2995 * @size: size to be set for the file
2996 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2997 */
2998struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
2999{
3000 return __shmem_file_setup(name, size, flags, 0);
3001}
2980EXPORT_SYMBOL_GPL(shmem_file_setup); 3002EXPORT_SYMBOL_GPL(shmem_file_setup);
2981 3003
2982/** 3004/**
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 762896ebfcf5..47c908f1f626 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -530,6 +530,23 @@ static const struct header_ops vlan_header_ops = {
530 .parse = eth_header_parse, 530 .parse = eth_header_parse,
531}; 531};
532 532
533static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
534 unsigned short type,
535 const void *daddr, const void *saddr,
536 unsigned int len)
537{
538 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
539 struct net_device *real_dev = vlan->real_dev;
540
541 return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
542}
543
544static const struct header_ops vlan_passthru_header_ops = {
545 .create = vlan_passthru_hard_header,
546 .rebuild = dev_rebuild_header,
547 .parse = eth_header_parse,
548};
549
533static struct device_type vlan_type = { 550static struct device_type vlan_type = {
534 .name = "vlan", 551 .name = "vlan",
535}; 552};
@@ -573,7 +590,7 @@ static int vlan_dev_init(struct net_device *dev)
573 590
574 dev->needed_headroom = real_dev->needed_headroom; 591 dev->needed_headroom = real_dev->needed_headroom;
575 if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) { 592 if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
576 dev->header_ops = real_dev->header_ops; 593 dev->header_ops = &vlan_passthru_header_ops;
577 dev->hard_header_len = real_dev->hard_header_len; 594 dev->hard_header_len = real_dev->hard_header_len;
578 } else { 595 } else {
579 dev->header_ops = &vlan_header_ops; 596 dev->header_ops = &vlan_header_ops;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index a2b480a90872..b9c8a6eedf45 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -307,9 +307,9 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
307 hard_iface->bat_iv.ogm_buff = ogm_buff; 307 hard_iface->bat_iv.ogm_buff = ogm_buff;
308 308
309 batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; 309 batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
310 batadv_ogm_packet->header.packet_type = BATADV_IV_OGM; 310 batadv_ogm_packet->packet_type = BATADV_IV_OGM;
311 batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION; 311 batadv_ogm_packet->version = BATADV_COMPAT_VERSION;
312 batadv_ogm_packet->header.ttl = 2; 312 batadv_ogm_packet->ttl = 2;
313 batadv_ogm_packet->flags = BATADV_NO_FLAGS; 313 batadv_ogm_packet->flags = BATADV_NO_FLAGS;
314 batadv_ogm_packet->reserved = 0; 314 batadv_ogm_packet->reserved = 0;
315 batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE; 315 batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
@@ -346,7 +346,7 @@ batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
346 346
347 batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; 347 batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
348 batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP; 348 batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP;
349 batadv_ogm_packet->header.ttl = BATADV_TTL; 349 batadv_ogm_packet->ttl = BATADV_TTL;
350} 350}
351 351
352/* when do we schedule our own ogm to be sent */ 352/* when do we schedule our own ogm to be sent */
@@ -435,7 +435,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
435 fwd_str, (packet_num > 0 ? "aggregated " : ""), 435 fwd_str, (packet_num > 0 ? "aggregated " : ""),
436 batadv_ogm_packet->orig, 436 batadv_ogm_packet->orig,
437 ntohl(batadv_ogm_packet->seqno), 437 ntohl(batadv_ogm_packet->seqno),
438 batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl, 438 batadv_ogm_packet->tq, batadv_ogm_packet->ttl,
439 (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 439 (batadv_ogm_packet->flags & BATADV_DIRECTLINK ?
440 "on" : "off"), 440 "on" : "off"),
441 hard_iface->net_dev->name, 441 hard_iface->net_dev->name,
@@ -491,7 +491,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
491 /* multihomed peer assumed 491 /* multihomed peer assumed
492 * non-primary OGMs are only broadcasted on their interface 492 * non-primary OGMs are only broadcasted on their interface
493 */ 493 */
494 if ((directlink && (batadv_ogm_packet->header.ttl == 1)) || 494 if ((directlink && (batadv_ogm_packet->ttl == 1)) ||
495 (forw_packet->own && (forw_packet->if_incoming != primary_if))) { 495 (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
496 /* FIXME: what about aggregated packets ? */ 496 /* FIXME: what about aggregated packets ? */
497 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 497 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -499,7 +499,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
499 (forw_packet->own ? "Sending own" : "Forwarding"), 499 (forw_packet->own ? "Sending own" : "Forwarding"),
500 batadv_ogm_packet->orig, 500 batadv_ogm_packet->orig,
501 ntohl(batadv_ogm_packet->seqno), 501 ntohl(batadv_ogm_packet->seqno),
502 batadv_ogm_packet->header.ttl, 502 batadv_ogm_packet->ttl,
503 forw_packet->if_incoming->net_dev->name, 503 forw_packet->if_incoming->net_dev->name,
504 forw_packet->if_incoming->net_dev->dev_addr); 504 forw_packet->if_incoming->net_dev->dev_addr);
505 505
@@ -572,7 +572,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
572 */ 572 */
573 if ((!directlink) && 573 if ((!directlink) &&
574 (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) && 574 (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) &&
575 (batadv_ogm_packet->header.ttl != 1) && 575 (batadv_ogm_packet->ttl != 1) &&
576 576
577 /* own packets originating non-primary 577 /* own packets originating non-primary
578 * interfaces leave only that interface 578 * interfaces leave only that interface
@@ -587,7 +587,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
587 * interface only - we still can aggregate 587 * interface only - we still can aggregate
588 */ 588 */
589 if ((directlink) && 589 if ((directlink) &&
590 (new_bat_ogm_packet->header.ttl == 1) && 590 (new_bat_ogm_packet->ttl == 1) &&
591 (forw_packet->if_incoming == if_incoming) && 591 (forw_packet->if_incoming == if_incoming) &&
592 592
593 /* packets from direct neighbors or 593 /* packets from direct neighbors or
@@ -778,7 +778,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
778 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 778 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
779 uint16_t tvlv_len; 779 uint16_t tvlv_len;
780 780
781 if (batadv_ogm_packet->header.ttl <= 1) { 781 if (batadv_ogm_packet->ttl <= 1) {
782 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n"); 782 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
783 return; 783 return;
784 } 784 }
@@ -798,7 +798,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
798 798
799 tvlv_len = ntohs(batadv_ogm_packet->tvlv_len); 799 tvlv_len = ntohs(batadv_ogm_packet->tvlv_len);
800 800
801 batadv_ogm_packet->header.ttl--; 801 batadv_ogm_packet->ttl--;
802 memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN); 802 memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
803 803
804 /* apply hop penalty */ 804 /* apply hop penalty */
@@ -807,7 +807,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
807 807
808 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 808 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
809 "Forwarding packet: tq: %i, ttl: %i\n", 809 "Forwarding packet: tq: %i, ttl: %i\n",
810 batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl); 810 batadv_ogm_packet->tq, batadv_ogm_packet->ttl);
811 811
812 /* switch of primaries first hop flag when forwarding */ 812 /* switch of primaries first hop flag when forwarding */
813 batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP; 813 batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP;
@@ -972,8 +972,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
972 spin_unlock_bh(&neigh_node->bat_iv.lq_update_lock); 972 spin_unlock_bh(&neigh_node->bat_iv.lq_update_lock);
973 973
974 if (dup_status == BATADV_NO_DUP) { 974 if (dup_status == BATADV_NO_DUP) {
975 orig_node->last_ttl = batadv_ogm_packet->header.ttl; 975 orig_node->last_ttl = batadv_ogm_packet->ttl;
976 neigh_node->last_ttl = batadv_ogm_packet->header.ttl; 976 neigh_node->last_ttl = batadv_ogm_packet->ttl;
977 } 977 }
978 978
979 batadv_bonding_candidate_add(bat_priv, orig_node, neigh_node); 979 batadv_bonding_candidate_add(bat_priv, orig_node, neigh_node);
@@ -1247,7 +1247,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
1247 * packet in an aggregation. Here we expect that the padding 1247 * packet in an aggregation. Here we expect that the padding
1248 * is always zero (or not 0x01) 1248 * is always zero (or not 0x01)
1249 */ 1249 */
1250 if (batadv_ogm_packet->header.packet_type != BATADV_IV_OGM) 1250 if (batadv_ogm_packet->packet_type != BATADV_IV_OGM)
1251 return; 1251 return;
1252 1252
1253 /* could be changed by schedule_own_packet() */ 1253 /* could be changed by schedule_own_packet() */
@@ -1267,8 +1267,8 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
1267 if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig, 1267 if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig,
1268 batadv_ogm_packet->prev_sender, 1268 batadv_ogm_packet->prev_sender,
1269 ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq, 1269 ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq,
1270 batadv_ogm_packet->header.ttl, 1270 batadv_ogm_packet->ttl,
1271 batadv_ogm_packet->header.version, has_directlink_flag); 1271 batadv_ogm_packet->version, has_directlink_flag);
1272 1272
1273 rcu_read_lock(); 1273 rcu_read_lock();
1274 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { 1274 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
@@ -1433,7 +1433,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
1433 * seqno and similar ttl as the non-duplicate 1433 * seqno and similar ttl as the non-duplicate
1434 */ 1434 */
1435 sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno); 1435 sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno);
1436 similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl; 1436 similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->ttl;
1437 if (is_bidirect && ((dup_status == BATADV_NO_DUP) || 1437 if (is_bidirect && ((dup_status == BATADV_NO_DUP) ||
1438 (sameseq && similar_ttl))) 1438 (sameseq && similar_ttl)))
1439 batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr, 1439 batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 6c8c3934bd7b..b316a4cb6f14 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -349,7 +349,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
349 349
350 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; 350 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
351 351
352 switch (unicast_4addr_packet->u.header.packet_type) { 352 switch (unicast_4addr_packet->u.packet_type) {
353 case BATADV_UNICAST: 353 case BATADV_UNICAST:
354 batadv_dbg(BATADV_DBG_DAT, bat_priv, 354 batadv_dbg(BATADV_DBG_DAT, bat_priv,
355 "* encapsulated within a UNICAST packet\n"); 355 "* encapsulated within a UNICAST packet\n");
@@ -374,7 +374,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
374 break; 374 break;
375 default: 375 default:
376 batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n", 376 batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n",
377 unicast_4addr_packet->u.header.packet_type); 377 unicast_4addr_packet->u.packet_type);
378 } 378 }
379 break; 379 break;
380 case BATADV_BCAST: 380 case BATADV_BCAST:
@@ -387,7 +387,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
387 default: 387 default:
388 batadv_dbg(BATADV_DBG_DAT, bat_priv, 388 batadv_dbg(BATADV_DBG_DAT, bat_priv,
389 "* encapsulated within an unknown packet type (0x%x)\n", 389 "* encapsulated within an unknown packet type (0x%x)\n",
390 unicast_4addr_packet->u.header.packet_type); 390 unicast_4addr_packet->u.packet_type);
391 } 391 }
392} 392}
393 393
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 271d321b3a04..6ddb6145ffb5 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -355,7 +355,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
355 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES, 355 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
356 skb->len + ETH_HLEN); 356 skb->len + ETH_HLEN);
357 357
358 packet->header.ttl--; 358 packet->ttl--;
359 batadv_send_skb_packet(skb, neigh_node->if_incoming, 359 batadv_send_skb_packet(skb, neigh_node->if_incoming,
360 neigh_node->addr); 360 neigh_node->addr);
361 ret = true; 361 ret = true;
@@ -444,9 +444,9 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
444 goto out_err; 444 goto out_err;
445 445
446 /* Create one header to be copied to all fragments */ 446 /* Create one header to be copied to all fragments */
447 frag_header.header.packet_type = BATADV_UNICAST_FRAG; 447 frag_header.packet_type = BATADV_UNICAST_FRAG;
448 frag_header.header.version = BATADV_COMPAT_VERSION; 448 frag_header.version = BATADV_COMPAT_VERSION;
449 frag_header.header.ttl = BATADV_TTL; 449 frag_header.ttl = BATADV_TTL;
450 frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno)); 450 frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
451 frag_header.reserved = 0; 451 frag_header.reserved = 0;
452 frag_header.no = 0; 452 frag_header.no = 0;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 29ae4efe3543..130cc3217e2b 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -194,7 +194,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
194 goto free_skb; 194 goto free_skb;
195 } 195 }
196 196
197 if (icmp_header->header.packet_type != BATADV_ICMP) { 197 if (icmp_header->packet_type != BATADV_ICMP) {
198 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 198 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
199 "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n"); 199 "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
200 len = -EINVAL; 200 len = -EINVAL;
@@ -243,9 +243,9 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
243 243
244 icmp_header->uid = socket_client->index; 244 icmp_header->uid = socket_client->index;
245 245
246 if (icmp_header->header.version != BATADV_COMPAT_VERSION) { 246 if (icmp_header->version != BATADV_COMPAT_VERSION) {
247 icmp_header->msg_type = BATADV_PARAMETER_PROBLEM; 247 icmp_header->msg_type = BATADV_PARAMETER_PROBLEM;
248 icmp_header->header.version = BATADV_COMPAT_VERSION; 248 icmp_header->version = BATADV_COMPAT_VERSION;
249 batadv_socket_add_packet(socket_client, icmp_header, 249 batadv_socket_add_packet(socket_client, icmp_header,
250 packet_len); 250 packet_len);
251 goto free_skb; 251 goto free_skb;
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index c51a5e568f0a..1511f64a6cea 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -383,17 +383,17 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
383 383
384 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data; 384 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
385 385
386 if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) { 386 if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
387 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 387 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
388 "Drop packet: incompatible batman version (%i)\n", 388 "Drop packet: incompatible batman version (%i)\n",
389 batadv_ogm_packet->header.version); 389 batadv_ogm_packet->version);
390 goto err_free; 390 goto err_free;
391 } 391 }
392 392
393 /* all receive handlers return whether they received or reused 393 /* all receive handlers return whether they received or reused
394 * the supplied skb. if not, we have to free the skb. 394 * the supplied skb. if not, we have to free the skb.
395 */ 395 */
396 idx = batadv_ogm_packet->header.packet_type; 396 idx = batadv_ogm_packet->packet_type;
397 ret = (*batadv_rx_handler[idx])(skb, hard_iface); 397 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
398 398
399 if (ret == NET_RX_DROP) 399 if (ret == NET_RX_DROP)
@@ -426,8 +426,8 @@ static void batadv_recv_handler_init(void)
426 BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4); 426 BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4);
427 BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4); 427 BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4);
428 BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4); 428 BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4);
429 BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, icmph.dst) != 4); 429 BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, dst) != 4);
430 BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, icmph.dst) != 4); 430 BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, dst) != 4);
431 431
432 /* broadcast packet */ 432 /* broadcast packet */
433 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet; 433 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
@@ -1119,9 +1119,9 @@ void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
1119 skb_reserve(skb, ETH_HLEN); 1119 skb_reserve(skb, ETH_HLEN);
1120 tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len); 1120 tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
1121 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff; 1121 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
1122 unicast_tvlv_packet->header.packet_type = BATADV_UNICAST_TVLV; 1122 unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
1123 unicast_tvlv_packet->header.version = BATADV_COMPAT_VERSION; 1123 unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
1124 unicast_tvlv_packet->header.ttl = BATADV_TTL; 1124 unicast_tvlv_packet->ttl = BATADV_TTL;
1125 unicast_tvlv_packet->reserved = 0; 1125 unicast_tvlv_packet->reserved = 0;
1126 unicast_tvlv_packet->tvlv_len = htons(tvlv_len); 1126 unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
1127 unicast_tvlv_packet->align = 0; 1127 unicast_tvlv_packet->align = 0;
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 351e199bc0af..511d7e1eea38 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -722,7 +722,7 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
722{ 722{
723 if (orig_node->last_real_seqno != ntohl(ogm_packet->seqno)) 723 if (orig_node->last_real_seqno != ntohl(ogm_packet->seqno))
724 return false; 724 return false;
725 if (orig_node->last_ttl != ogm_packet->header.ttl + 1) 725 if (orig_node->last_ttl != ogm_packet->ttl + 1)
726 return false; 726 return false;
727 if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender)) 727 if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender))
728 return false; 728 return false;
@@ -1082,9 +1082,9 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
1082 coded_packet = (struct batadv_coded_packet *)skb_dest->data; 1082 coded_packet = (struct batadv_coded_packet *)skb_dest->data;
1083 skb_reset_mac_header(skb_dest); 1083 skb_reset_mac_header(skb_dest);
1084 1084
1085 coded_packet->header.packet_type = BATADV_CODED; 1085 coded_packet->packet_type = BATADV_CODED;
1086 coded_packet->header.version = BATADV_COMPAT_VERSION; 1086 coded_packet->version = BATADV_COMPAT_VERSION;
1087 coded_packet->header.ttl = packet1->header.ttl; 1087 coded_packet->ttl = packet1->ttl;
1088 1088
1089 /* Info about first unicast packet */ 1089 /* Info about first unicast packet */
1090 memcpy(coded_packet->first_source, first_source, ETH_ALEN); 1090 memcpy(coded_packet->first_source, first_source, ETH_ALEN);
@@ -1097,7 +1097,7 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
1097 memcpy(coded_packet->second_source, second_source, ETH_ALEN); 1097 memcpy(coded_packet->second_source, second_source, ETH_ALEN);
1098 memcpy(coded_packet->second_orig_dest, packet2->dest, ETH_ALEN); 1098 memcpy(coded_packet->second_orig_dest, packet2->dest, ETH_ALEN);
1099 coded_packet->second_crc = packet_id2; 1099 coded_packet->second_crc = packet_id2;
1100 coded_packet->second_ttl = packet2->header.ttl; 1100 coded_packet->second_ttl = packet2->ttl;
1101 coded_packet->second_ttvn = packet2->ttvn; 1101 coded_packet->second_ttvn = packet2->ttvn;
1102 coded_packet->coded_len = htons(coding_len); 1102 coded_packet->coded_len = htons(coding_len);
1103 1103
@@ -1452,7 +1452,7 @@ bool batadv_nc_skb_forward(struct sk_buff *skb,
1452 /* We only handle unicast packets */ 1452 /* We only handle unicast packets */
1453 payload = skb_network_header(skb); 1453 payload = skb_network_header(skb);
1454 packet = (struct batadv_unicast_packet *)payload; 1454 packet = (struct batadv_unicast_packet *)payload;
1455 if (packet->header.packet_type != BATADV_UNICAST) 1455 if (packet->packet_type != BATADV_UNICAST)
1456 goto out; 1456 goto out;
1457 1457
1458 /* Try to find a coding opportunity and send the skb if one is found */ 1458 /* Try to find a coding opportunity and send the skb if one is found */
@@ -1505,7 +1505,7 @@ void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
1505 /* Check for supported packet type */ 1505 /* Check for supported packet type */
1506 payload = skb_network_header(skb); 1506 payload = skb_network_header(skb);
1507 packet = (struct batadv_unicast_packet *)payload; 1507 packet = (struct batadv_unicast_packet *)payload;
1508 if (packet->header.packet_type != BATADV_UNICAST) 1508 if (packet->packet_type != BATADV_UNICAST)
1509 goto out; 1509 goto out;
1510 1510
1511 /* Find existing nc_path or create a new */ 1511 /* Find existing nc_path or create a new */
@@ -1623,7 +1623,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
1623 ttvn = coded_packet_tmp.second_ttvn; 1623 ttvn = coded_packet_tmp.second_ttvn;
1624 } else { 1624 } else {
1625 orig_dest = coded_packet_tmp.first_orig_dest; 1625 orig_dest = coded_packet_tmp.first_orig_dest;
1626 ttl = coded_packet_tmp.header.ttl; 1626 ttl = coded_packet_tmp.ttl;
1627 ttvn = coded_packet_tmp.first_ttvn; 1627 ttvn = coded_packet_tmp.first_ttvn;
1628 } 1628 }
1629 1629
@@ -1648,9 +1648,9 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
1648 1648
1649 /* Create decoded unicast packet */ 1649 /* Create decoded unicast packet */
1650 unicast_packet = (struct batadv_unicast_packet *)skb->data; 1650 unicast_packet = (struct batadv_unicast_packet *)skb->data;
1651 unicast_packet->header.packet_type = BATADV_UNICAST; 1651 unicast_packet->packet_type = BATADV_UNICAST;
1652 unicast_packet->header.version = BATADV_COMPAT_VERSION; 1652 unicast_packet->version = BATADV_COMPAT_VERSION;
1653 unicast_packet->header.ttl = ttl; 1653 unicast_packet->ttl = ttl;
1654 memcpy(unicast_packet->dest, orig_dest, ETH_ALEN); 1654 memcpy(unicast_packet->dest, orig_dest, ETH_ALEN);
1655 unicast_packet->ttvn = ttvn; 1655 unicast_packet->ttvn = ttvn;
1656 1656
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 207459b62966..2dd8f2422550 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -155,6 +155,7 @@ enum batadv_tvlv_type {
155 BATADV_TVLV_ROAM = 0x05, 155 BATADV_TVLV_ROAM = 0x05,
156}; 156};
157 157
158#pragma pack(2)
158/* the destination hardware field in the ARP frame is used to 159/* the destination hardware field in the ARP frame is used to
159 * transport the claim type and the group id 160 * transport the claim type and the group id
160 */ 161 */
@@ -163,24 +164,20 @@ struct batadv_bla_claim_dst {
163 uint8_t type; /* bla_claimframe */ 164 uint8_t type; /* bla_claimframe */
164 __be16 group; /* group id */ 165 __be16 group; /* group id */
165}; 166};
166 167#pragma pack()
167struct batadv_header {
168 uint8_t packet_type;
169 uint8_t version; /* batman version field */
170 uint8_t ttl;
171 /* the parent struct has to add a byte after the header to make
172 * everything 4 bytes aligned again
173 */
174};
175 168
176/** 169/**
177 * struct batadv_ogm_packet - ogm (routing protocol) packet 170 * struct batadv_ogm_packet - ogm (routing protocol) packet
178 * @header: common batman packet header 171 * @packet_type: batman-adv packet type, part of the general header
172 * @version: batman-adv protocol version, part of the genereal header
173 * @ttl: time to live for this packet, part of the genereal header
179 * @flags: contains routing relevant flags - see enum batadv_iv_flags 174 * @flags: contains routing relevant flags - see enum batadv_iv_flags
180 * @tvlv_len: length of tvlv data following the ogm header 175 * @tvlv_len: length of tvlv data following the ogm header
181 */ 176 */
182struct batadv_ogm_packet { 177struct batadv_ogm_packet {
183 struct batadv_header header; 178 uint8_t packet_type;
179 uint8_t version;
180 uint8_t ttl;
184 uint8_t flags; 181 uint8_t flags;
185 __be32 seqno; 182 __be32 seqno;
186 uint8_t orig[ETH_ALEN]; 183 uint8_t orig[ETH_ALEN];
@@ -196,29 +193,51 @@ struct batadv_ogm_packet {
196#define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet) 193#define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
197 194
198/** 195/**
199 * batadv_icmp_header - common ICMP header 196 * batadv_icmp_header - common members among all the ICMP packets
200 * @header: common batman header 197 * @packet_type: batman-adv packet type, part of the general header
198 * @version: batman-adv protocol version, part of the genereal header
199 * @ttl: time to live for this packet, part of the genereal header
201 * @msg_type: ICMP packet type 200 * @msg_type: ICMP packet type
202 * @dst: address of the destination node 201 * @dst: address of the destination node
203 * @orig: address of the source node 202 * @orig: address of the source node
204 * @uid: local ICMP socket identifier 203 * @uid: local ICMP socket identifier
204 * @align: not used - useful for alignment purposes only
205 *
206 * This structure is used for ICMP packets parsing only and it is never sent
207 * over the wire. The alignment field at the end is there to ensure that
208 * members are padded the same way as they are in real packets.
205 */ 209 */
206struct batadv_icmp_header { 210struct batadv_icmp_header {
207 struct batadv_header header; 211 uint8_t packet_type;
212 uint8_t version;
213 uint8_t ttl;
208 uint8_t msg_type; /* see ICMP message types above */ 214 uint8_t msg_type; /* see ICMP message types above */
209 uint8_t dst[ETH_ALEN]; 215 uint8_t dst[ETH_ALEN];
210 uint8_t orig[ETH_ALEN]; 216 uint8_t orig[ETH_ALEN];
211 uint8_t uid; 217 uint8_t uid;
218 uint8_t align[3];
212}; 219};
213 220
214/** 221/**
215 * batadv_icmp_packet - ICMP packet 222 * batadv_icmp_packet - ICMP packet
216 * @icmph: common ICMP header 223 * @packet_type: batman-adv packet type, part of the general header
224 * @version: batman-adv protocol version, part of the genereal header
225 * @ttl: time to live for this packet, part of the genereal header
226 * @msg_type: ICMP packet type
227 * @dst: address of the destination node
228 * @orig: address of the source node
229 * @uid: local ICMP socket identifier
217 * @reserved: not used - useful for alignment 230 * @reserved: not used - useful for alignment
218 * @seqno: ICMP sequence number 231 * @seqno: ICMP sequence number
219 */ 232 */
220struct batadv_icmp_packet { 233struct batadv_icmp_packet {
221 struct batadv_icmp_header icmph; 234 uint8_t packet_type;
235 uint8_t version;
236 uint8_t ttl;
237 uint8_t msg_type; /* see ICMP message types above */
238 uint8_t dst[ETH_ALEN];
239 uint8_t orig[ETH_ALEN];
240 uint8_t uid;
222 uint8_t reserved; 241 uint8_t reserved;
223 __be16 seqno; 242 __be16 seqno;
224}; 243};
@@ -227,13 +246,25 @@ struct batadv_icmp_packet {
227 246
228/** 247/**
229 * batadv_icmp_packet_rr - ICMP RouteRecord packet 248 * batadv_icmp_packet_rr - ICMP RouteRecord packet
230 * @icmph: common ICMP header 249 * @packet_type: batman-adv packet type, part of the general header
250 * @version: batman-adv protocol version, part of the genereal header
251 * @ttl: time to live for this packet, part of the genereal header
252 * @msg_type: ICMP packet type
253 * @dst: address of the destination node
254 * @orig: address of the source node
255 * @uid: local ICMP socket identifier
231 * @rr_cur: number of entries the rr array 256 * @rr_cur: number of entries the rr array
232 * @seqno: ICMP sequence number 257 * @seqno: ICMP sequence number
233 * @rr: route record array 258 * @rr: route record array
234 */ 259 */
235struct batadv_icmp_packet_rr { 260struct batadv_icmp_packet_rr {
236 struct batadv_icmp_header icmph; 261 uint8_t packet_type;
262 uint8_t version;
263 uint8_t ttl;
264 uint8_t msg_type; /* see ICMP message types above */
265 uint8_t dst[ETH_ALEN];
266 uint8_t orig[ETH_ALEN];
267 uint8_t uid;
237 uint8_t rr_cur; 268 uint8_t rr_cur;
238 __be16 seqno; 269 __be16 seqno;
239 uint8_t rr[BATADV_RR_LEN][ETH_ALEN]; 270 uint8_t rr[BATADV_RR_LEN][ETH_ALEN];
@@ -253,8 +284,18 @@ struct batadv_icmp_packet_rr {
253 */ 284 */
254#pragma pack(2) 285#pragma pack(2)
255 286
287/**
288 * struct batadv_unicast_packet - unicast packet for network payload
289 * @packet_type: batman-adv packet type, part of the general header
290 * @version: batman-adv protocol version, part of the genereal header
291 * @ttl: time to live for this packet, part of the genereal header
292 * @ttvn: translation table version number
293 * @dest: originator destination of the unicast packet
294 */
256struct batadv_unicast_packet { 295struct batadv_unicast_packet {
257 struct batadv_header header; 296 uint8_t packet_type;
297 uint8_t version;
298 uint8_t ttl;
258 uint8_t ttvn; /* destination translation table version number */ 299 uint8_t ttvn; /* destination translation table version number */
259 uint8_t dest[ETH_ALEN]; 300 uint8_t dest[ETH_ALEN];
260 /* "4 bytes boundary + 2 bytes" long to make the payload after the 301 /* "4 bytes boundary + 2 bytes" long to make the payload after the
@@ -280,7 +321,9 @@ struct batadv_unicast_4addr_packet {
280 321
281/** 322/**
282 * struct batadv_frag_packet - fragmented packet 323 * struct batadv_frag_packet - fragmented packet
283 * @header: common batman packet header with type, compatversion, and ttl 324 * @packet_type: batman-adv packet type, part of the general header
325 * @version: batman-adv protocol version, part of the genereal header
326 * @ttl: time to live for this packet, part of the genereal header
284 * @dest: final destination used when routing fragments 327 * @dest: final destination used when routing fragments
285 * @orig: originator of the fragment used when merging the packet 328 * @orig: originator of the fragment used when merging the packet
286 * @no: fragment number within this sequence 329 * @no: fragment number within this sequence
@@ -289,7 +332,9 @@ struct batadv_unicast_4addr_packet {
289 * @total_size: size of the merged packet 332 * @total_size: size of the merged packet
290 */ 333 */
291struct batadv_frag_packet { 334struct batadv_frag_packet {
292 struct batadv_header header; 335 uint8_t packet_type;
336 uint8_t version; /* batman version field */
337 uint8_t ttl;
293#if defined(__BIG_ENDIAN_BITFIELD) 338#if defined(__BIG_ENDIAN_BITFIELD)
294 uint8_t no:4; 339 uint8_t no:4;
295 uint8_t reserved:4; 340 uint8_t reserved:4;
@@ -305,8 +350,19 @@ struct batadv_frag_packet {
305 __be16 total_size; 350 __be16 total_size;
306}; 351};
307 352
353/**
354 * struct batadv_bcast_packet - broadcast packet for network payload
355 * @packet_type: batman-adv packet type, part of the general header
356 * @version: batman-adv protocol version, part of the genereal header
357 * @ttl: time to live for this packet, part of the genereal header
358 * @reserved: reserved byte for alignment
359 * @seqno: sequence identification
360 * @orig: originator of the broadcast packet
361 */
308struct batadv_bcast_packet { 362struct batadv_bcast_packet {
309 struct batadv_header header; 363 uint8_t packet_type;
364 uint8_t version; /* batman version field */
365 uint8_t ttl;
310 uint8_t reserved; 366 uint8_t reserved;
311 __be32 seqno; 367 __be32 seqno;
312 uint8_t orig[ETH_ALEN]; 368 uint8_t orig[ETH_ALEN];
@@ -315,11 +371,11 @@ struct batadv_bcast_packet {
315 */ 371 */
316}; 372};
317 373
318#pragma pack()
319
320/** 374/**
321 * struct batadv_coded_packet - network coded packet 375 * struct batadv_coded_packet - network coded packet
322 * @header: common batman packet header and ttl of first included packet 376 * @packet_type: batman-adv packet type, part of the general header
377 * @version: batman-adv protocol version, part of the genereal header
378 * @ttl: time to live for this packet, part of the genereal header
323 * @reserved: Align following fields to 2-byte boundaries 379 * @reserved: Align following fields to 2-byte boundaries
324 * @first_source: original source of first included packet 380 * @first_source: original source of first included packet
325 * @first_orig_dest: original destinal of first included packet 381 * @first_orig_dest: original destinal of first included packet
@@ -334,7 +390,9 @@ struct batadv_bcast_packet {
334 * @coded_len: length of network coded part of the payload 390 * @coded_len: length of network coded part of the payload
335 */ 391 */
336struct batadv_coded_packet { 392struct batadv_coded_packet {
337 struct batadv_header header; 393 uint8_t packet_type;
394 uint8_t version; /* batman version field */
395 uint8_t ttl;
338 uint8_t first_ttvn; 396 uint8_t first_ttvn;
339 /* uint8_t first_dest[ETH_ALEN]; - saved in mac header destination */ 397 /* uint8_t first_dest[ETH_ALEN]; - saved in mac header destination */
340 uint8_t first_source[ETH_ALEN]; 398 uint8_t first_source[ETH_ALEN];
@@ -349,9 +407,13 @@ struct batadv_coded_packet {
349 __be16 coded_len; 407 __be16 coded_len;
350}; 408};
351 409
410#pragma pack()
411
352/** 412/**
353 * struct batadv_unicast_tvlv - generic unicast packet with tvlv payload 413 * struct batadv_unicast_tvlv - generic unicast packet with tvlv payload
354 * @header: common batman packet header 414 * @packet_type: batman-adv packet type, part of the general header
415 * @version: batman-adv protocol version, part of the genereal header
416 * @ttl: time to live for this packet, part of the genereal header
355 * @reserved: reserved field (for packet alignment) 417 * @reserved: reserved field (for packet alignment)
356 * @src: address of the source 418 * @src: address of the source
357 * @dst: address of the destination 419 * @dst: address of the destination
@@ -359,7 +421,9 @@ struct batadv_coded_packet {
359 * @align: 2 bytes to align the header to a 4 byte boundry 421 * @align: 2 bytes to align the header to a 4 byte boundry
360 */ 422 */
361struct batadv_unicast_tvlv_packet { 423struct batadv_unicast_tvlv_packet {
362 struct batadv_header header; 424 uint8_t packet_type;
425 uint8_t version; /* batman version field */
426 uint8_t ttl;
363 uint8_t reserved; 427 uint8_t reserved;
364 uint8_t dst[ETH_ALEN]; 428 uint8_t dst[ETH_ALEN];
365 uint8_t src[ETH_ALEN]; 429 uint8_t src[ETH_ALEN];
@@ -420,13 +484,13 @@ struct batadv_tvlv_tt_vlan_data {
420 * struct batadv_tvlv_tt_change - translation table diff data 484 * struct batadv_tvlv_tt_change - translation table diff data
421 * @flags: status indicators concerning the non-mesh client (see 485 * @flags: status indicators concerning the non-mesh client (see
422 * batadv_tt_client_flags) 486 * batadv_tt_client_flags)
423 * @reserved: reserved field 487 * @reserved: reserved field - useful for alignment purposes only
424 * @addr: mac address of non-mesh client that triggered this tt change 488 * @addr: mac address of non-mesh client that triggered this tt change
425 * @vid: VLAN identifier 489 * @vid: VLAN identifier
426 */ 490 */
427struct batadv_tvlv_tt_change { 491struct batadv_tvlv_tt_change {
428 uint8_t flags; 492 uint8_t flags;
429 uint8_t reserved; 493 uint8_t reserved[3];
430 uint8_t addr[ETH_ALEN]; 494 uint8_t addr[ETH_ALEN];
431 __be16 vid; 495 __be16 vid;
432}; 496};
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index d4114d775ad6..46278bfb8fdb 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -308,7 +308,7 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
308 memcpy(icmph->dst, icmph->orig, ETH_ALEN); 308 memcpy(icmph->dst, icmph->orig, ETH_ALEN);
309 memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN); 309 memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
310 icmph->msg_type = BATADV_ECHO_REPLY; 310 icmph->msg_type = BATADV_ECHO_REPLY;
311 icmph->header.ttl = BATADV_TTL; 311 icmph->ttl = BATADV_TTL;
312 312
313 res = batadv_send_skb_to_orig(skb, orig_node, NULL); 313 res = batadv_send_skb_to_orig(skb, orig_node, NULL);
314 if (res != NET_XMIT_DROP) 314 if (res != NET_XMIT_DROP)
@@ -338,9 +338,9 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
338 icmp_packet = (struct batadv_icmp_packet *)skb->data; 338 icmp_packet = (struct batadv_icmp_packet *)skb->data;
339 339
340 /* send TTL exceeded if packet is an echo request (traceroute) */ 340 /* send TTL exceeded if packet is an echo request (traceroute) */
341 if (icmp_packet->icmph.msg_type != BATADV_ECHO_REQUEST) { 341 if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
342 pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n", 342 pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
343 icmp_packet->icmph.orig, icmp_packet->icmph.dst); 343 icmp_packet->orig, icmp_packet->dst);
344 goto out; 344 goto out;
345 } 345 }
346 346
@@ -349,7 +349,7 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
349 goto out; 349 goto out;
350 350
351 /* get routing information */ 351 /* get routing information */
352 orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->icmph.orig); 352 orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
353 if (!orig_node) 353 if (!orig_node)
354 goto out; 354 goto out;
355 355
@@ -359,11 +359,11 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
359 359
360 icmp_packet = (struct batadv_icmp_packet *)skb->data; 360 icmp_packet = (struct batadv_icmp_packet *)skb->data;
361 361
362 memcpy(icmp_packet->icmph.dst, icmp_packet->icmph.orig, ETH_ALEN); 362 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
363 memcpy(icmp_packet->icmph.orig, primary_if->net_dev->dev_addr, 363 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr,
364 ETH_ALEN); 364 ETH_ALEN);
365 icmp_packet->icmph.msg_type = BATADV_TTL_EXCEEDED; 365 icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
366 icmp_packet->icmph.header.ttl = BATADV_TTL; 366 icmp_packet->ttl = BATADV_TTL;
367 367
368 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) 368 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
369 ret = NET_RX_SUCCESS; 369 ret = NET_RX_SUCCESS;
@@ -434,7 +434,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
434 return batadv_recv_my_icmp_packet(bat_priv, skb); 434 return batadv_recv_my_icmp_packet(bat_priv, skb);
435 435
436 /* TTL exceeded */ 436 /* TTL exceeded */
437 if (icmph->header.ttl < 2) 437 if (icmph->ttl < 2)
438 return batadv_recv_icmp_ttl_exceeded(bat_priv, skb); 438 return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
439 439
440 /* get routing information */ 440 /* get routing information */
@@ -449,7 +449,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
449 icmph = (struct batadv_icmp_header *)skb->data; 449 icmph = (struct batadv_icmp_header *)skb->data;
450 450
451 /* decrement ttl */ 451 /* decrement ttl */
452 icmph->header.ttl--; 452 icmph->ttl--;
453 453
454 /* route it */ 454 /* route it */
455 if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP) 455 if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP)
@@ -709,7 +709,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
709 unicast_packet = (struct batadv_unicast_packet *)skb->data; 709 unicast_packet = (struct batadv_unicast_packet *)skb->data;
710 710
711 /* TTL exceeded */ 711 /* TTL exceeded */
712 if (unicast_packet->header.ttl < 2) { 712 if (unicast_packet->ttl < 2) {
713 pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n", 713 pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
714 ethhdr->h_source, unicast_packet->dest); 714 ethhdr->h_source, unicast_packet->dest);
715 goto out; 715 goto out;
@@ -727,9 +727,9 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
727 727
728 /* decrement ttl */ 728 /* decrement ttl */
729 unicast_packet = (struct batadv_unicast_packet *)skb->data; 729 unicast_packet = (struct batadv_unicast_packet *)skb->data;
730 unicast_packet->header.ttl--; 730 unicast_packet->ttl--;
731 731
732 switch (unicast_packet->header.packet_type) { 732 switch (unicast_packet->packet_type) {
733 case BATADV_UNICAST_4ADDR: 733 case BATADV_UNICAST_4ADDR:
734 hdr_len = sizeof(struct batadv_unicast_4addr_packet); 734 hdr_len = sizeof(struct batadv_unicast_4addr_packet);
735 break; 735 break;
@@ -970,7 +970,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
970 unicast_packet = (struct batadv_unicast_packet *)skb->data; 970 unicast_packet = (struct batadv_unicast_packet *)skb->data;
971 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; 971 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
972 972
973 is4addr = unicast_packet->header.packet_type == BATADV_UNICAST_4ADDR; 973 is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
974 /* the caller function should have already pulled 2 bytes */ 974 /* the caller function should have already pulled 2 bytes */
975 if (is4addr) 975 if (is4addr)
976 hdr_size = sizeof(*unicast_4addr_packet); 976 hdr_size = sizeof(*unicast_4addr_packet);
@@ -1160,7 +1160,7 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
1160 if (batadv_is_my_mac(bat_priv, bcast_packet->orig)) 1160 if (batadv_is_my_mac(bat_priv, bcast_packet->orig))
1161 goto out; 1161 goto out;
1162 1162
1163 if (bcast_packet->header.ttl < 2) 1163 if (bcast_packet->ttl < 2)
1164 goto out; 1164 goto out;
1165 1165
1166 orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig); 1166 orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index c83be5ebaa28..fba4dcfcfac2 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -161,11 +161,11 @@ batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
161 return false; 161 return false;
162 162
163 unicast_packet = (struct batadv_unicast_packet *)skb->data; 163 unicast_packet = (struct batadv_unicast_packet *)skb->data;
164 unicast_packet->header.version = BATADV_COMPAT_VERSION; 164 unicast_packet->version = BATADV_COMPAT_VERSION;
165 /* batman packet type: unicast */ 165 /* batman packet type: unicast */
166 unicast_packet->header.packet_type = BATADV_UNICAST; 166 unicast_packet->packet_type = BATADV_UNICAST;
167 /* set unicast ttl */ 167 /* set unicast ttl */
168 unicast_packet->header.ttl = BATADV_TTL; 168 unicast_packet->ttl = BATADV_TTL;
169 /* copy the destination for faster routing */ 169 /* copy the destination for faster routing */
170 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); 170 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
171 /* set the destination tt version number */ 171 /* set the destination tt version number */
@@ -221,7 +221,7 @@ bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
221 goto out; 221 goto out;
222 222
223 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; 223 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
224 uc_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR; 224 uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
225 memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN); 225 memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
226 uc_4addr_packet->subtype = packet_subtype; 226 uc_4addr_packet->subtype = packet_subtype;
227 uc_4addr_packet->reserved = 0; 227 uc_4addr_packet->reserved = 0;
@@ -436,7 +436,7 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
436 436
437 /* as we have a copy now, it is safe to decrease the TTL */ 437 /* as we have a copy now, it is safe to decrease the TTL */
438 bcast_packet = (struct batadv_bcast_packet *)newskb->data; 438 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
439 bcast_packet->header.ttl--; 439 bcast_packet->ttl--;
440 440
441 skb_reset_mac_header(newskb); 441 skb_reset_mac_header(newskb);
442 442
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 36f050876f82..a8f99d1486c0 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -264,11 +264,11 @@ static int batadv_interface_tx(struct sk_buff *skb,
264 goto dropped; 264 goto dropped;
265 265
266 bcast_packet = (struct batadv_bcast_packet *)skb->data; 266 bcast_packet = (struct batadv_bcast_packet *)skb->data;
267 bcast_packet->header.version = BATADV_COMPAT_VERSION; 267 bcast_packet->version = BATADV_COMPAT_VERSION;
268 bcast_packet->header.ttl = BATADV_TTL; 268 bcast_packet->ttl = BATADV_TTL;
269 269
270 /* batman packet type: broadcast */ 270 /* batman packet type: broadcast */
271 bcast_packet->header.packet_type = BATADV_BCAST; 271 bcast_packet->packet_type = BATADV_BCAST;
272 bcast_packet->reserved = 0; 272 bcast_packet->reserved = 0;
273 273
274 /* hw address of first interface is the orig mac because only 274 /* hw address of first interface is the orig mac because only
@@ -328,7 +328,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
328 struct sk_buff *skb, struct batadv_hard_iface *recv_if, 328 struct sk_buff *skb, struct batadv_hard_iface *recv_if,
329 int hdr_size, struct batadv_orig_node *orig_node) 329 int hdr_size, struct batadv_orig_node *orig_node)
330{ 330{
331 struct batadv_header *batadv_header = (struct batadv_header *)skb->data; 331 struct batadv_bcast_packet *batadv_bcast_packet;
332 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 332 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
333 __be16 ethertype = htons(ETH_P_BATMAN); 333 __be16 ethertype = htons(ETH_P_BATMAN);
334 struct vlan_ethhdr *vhdr; 334 struct vlan_ethhdr *vhdr;
@@ -336,7 +336,8 @@ void batadv_interface_rx(struct net_device *soft_iface,
336 unsigned short vid; 336 unsigned short vid;
337 bool is_bcast; 337 bool is_bcast;
338 338
339 is_bcast = (batadv_header->packet_type == BATADV_BCAST); 339 batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
340 is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST);
340 341
341 /* check if enough space is available for pulling, and pull */ 342 /* check if enough space is available for pulling, and pull */
342 if (!pskb_may_pull(skb, hdr_size)) 343 if (!pskb_may_pull(skb, hdr_size))
@@ -345,7 +346,12 @@ void batadv_interface_rx(struct net_device *soft_iface,
345 skb_pull_rcsum(skb, hdr_size); 346 skb_pull_rcsum(skb, hdr_size);
346 skb_reset_mac_header(skb); 347 skb_reset_mac_header(skb);
347 348
348 vid = batadv_get_vid(skb, hdr_size); 349 /* clean the netfilter state now that the batman-adv header has been
350 * removed
351 */
352 nf_reset(skb);
353
354 vid = batadv_get_vid(skb, 0);
349 ethhdr = eth_hdr(skb); 355 ethhdr = eth_hdr(skb);
350 356
351 switch (ntohs(ethhdr->h_proto)) { 357 switch (ntohs(ethhdr->h_proto)) {
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 4add57d4857f..ff625fedbc5e 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -333,7 +333,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
333 return; 333 return;
334 334
335 tt_change_node->change.flags = flags; 335 tt_change_node->change.flags = flags;
336 tt_change_node->change.reserved = 0; 336 memset(tt_change_node->change.reserved, 0,
337 sizeof(tt_change_node->change.reserved));
337 memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN); 338 memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN);
338 tt_change_node->change.vid = htons(common->vid); 339 tt_change_node->change.vid = htons(common->vid);
339 340
@@ -2221,7 +2222,8 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
2221 ETH_ALEN); 2222 ETH_ALEN);
2222 tt_change->flags = tt_common_entry->flags; 2223 tt_change->flags = tt_common_entry->flags;
2223 tt_change->vid = htons(tt_common_entry->vid); 2224 tt_change->vid = htons(tt_common_entry->vid);
2224 tt_change->reserved = 0; 2225 memset(tt_change->reserved, 0,
2226 sizeof(tt_change->reserved));
2225 2227
2226 tt_num_entries++; 2228 tt_num_entries++;
2227 tt_change++; 2229 tt_change++;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 6a6c8bb4fd72..7552f9e3089c 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -940,8 +940,22 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
940 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data); 940 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
941 skb_pull(skb, 1); 941 skb_pull(skb, 1);
942 942
943 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW && 943 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
944 bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { 944 /* No permission check is needed for user channel
945 * since that gets enforced when binding the socket.
946 *
947 * However check that the packet type is valid.
948 */
949 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
950 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
951 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
952 err = -EINVAL;
953 goto drop;
954 }
955
956 skb_queue_tail(&hdev->raw_q, skb);
957 queue_work(hdev->workqueue, &hdev->tx_work);
958 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
945 u16 opcode = get_unaligned_le16(skb->data); 959 u16 opcode = get_unaligned_le16(skb->data);
946 u16 ogf = hci_opcode_ogf(opcode); 960 u16 ogf = hci_opcode_ogf(opcode);
947 u16 ocf = hci_opcode_ocf(opcode); 961 u16 ocf = hci_opcode_ocf(opcode);
@@ -972,14 +986,6 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
972 goto drop; 986 goto drop;
973 } 987 }
974 988
975 if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
976 bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
977 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
978 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
979 err = -EINVAL;
980 goto drop;
981 }
982
983 skb_queue_tail(&hdev->raw_q, skb); 989 skb_queue_tail(&hdev->raw_q, skb);
984 queue_work(hdev->workqueue, &hdev->tx_work); 990 queue_work(hdev->workqueue, &hdev->tx_work);
985 } 991 }
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 4c214b2b88ef..ef66365b7354 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1998,7 +1998,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1998 u32 old; 1998 u32 old;
1999 struct net_bridge_mdb_htable *mdb; 1999 struct net_bridge_mdb_htable *mdb;
2000 2000
2001 spin_lock(&br->multicast_lock); 2001 spin_lock_bh(&br->multicast_lock);
2002 if (!netif_running(br->dev)) 2002 if (!netif_running(br->dev))
2003 goto unlock; 2003 goto unlock;
2004 2004
@@ -2030,7 +2030,7 @@ rollback:
2030 } 2030 }
2031 2031
2032unlock: 2032unlock:
2033 spin_unlock(&br->multicast_lock); 2033 spin_unlock_bh(&br->multicast_lock);
2034 2034
2035 return err; 2035 return err;
2036} 2036}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 229d820bdf0b..045d56eaeca2 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -426,6 +426,16 @@ netdev_features_t br_features_recompute(struct net_bridge *br,
426int br_handle_frame_finish(struct sk_buff *skb); 426int br_handle_frame_finish(struct sk_buff *skb);
427rx_handler_result_t br_handle_frame(struct sk_buff **pskb); 427rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
428 428
429static inline bool br_rx_handler_check_rcu(const struct net_device *dev)
430{
431 return rcu_dereference(dev->rx_handler) == br_handle_frame;
432}
433
434static inline struct net_bridge_port *br_port_get_check_rcu(const struct net_device *dev)
435{
436 return br_rx_handler_check_rcu(dev) ? br_port_get_rcu(dev) : NULL;
437}
438
429/* br_ioctl.c */ 439/* br_ioctl.c */
430int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 440int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
431int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, 441int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd,
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 8660ea3be705..bdb459d21ad8 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -153,7 +153,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
153 if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0) 153 if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
154 goto err; 154 goto err;
155 155
156 p = br_port_get_rcu(dev); 156 p = br_port_get_check_rcu(dev);
157 if (!p) 157 if (!p)
158 goto err; 158 goto err;
159 159
diff --git a/net/compat.c b/net/compat.c
index 618c6a8a911b..dd32e34c1e2c 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -72,7 +72,7 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
72 __get_user(kmsg->msg_flags, &umsg->msg_flags)) 72 __get_user(kmsg->msg_flags, &umsg->msg_flags))
73 return -EFAULT; 73 return -EFAULT;
74 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) 74 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
75 return -EINVAL; 75 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
76 kmsg->msg_name = compat_ptr(tmp1); 76 kmsg->msg_name = compat_ptr(tmp1);
77 kmsg->msg_iov = compat_ptr(tmp2); 77 kmsg->msg_iov = compat_ptr(tmp2);
78 kmsg->msg_control = compat_ptr(tmp3); 78 kmsg->msg_control = compat_ptr(tmp3);
diff --git a/net/core/dev.c b/net/core/dev.c
index ba3b7ea5ebb3..0ce469e5ec80 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2539,7 +2539,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb,
2539} 2539}
2540 2540
2541int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2541int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2542 struct netdev_queue *txq, void *accel_priv) 2542 struct netdev_queue *txq)
2543{ 2543{
2544 const struct net_device_ops *ops = dev->netdev_ops; 2544 const struct net_device_ops *ops = dev->netdev_ops;
2545 int rc = NETDEV_TX_OK; 2545 int rc = NETDEV_TX_OK;
@@ -2605,13 +2605,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2605 dev_queue_xmit_nit(skb, dev); 2605 dev_queue_xmit_nit(skb, dev);
2606 2606
2607 skb_len = skb->len; 2607 skb_len = skb->len;
2608 if (accel_priv)
2609 rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv);
2610 else
2611 rc = ops->ndo_start_xmit(skb, dev); 2608 rc = ops->ndo_start_xmit(skb, dev);
2612 2609
2613 trace_net_dev_xmit(skb, rc, dev, skb_len); 2610 trace_net_dev_xmit(skb, rc, dev, skb_len);
2614 if (rc == NETDEV_TX_OK && txq) 2611 if (rc == NETDEV_TX_OK)
2615 txq_trans_update(txq); 2612 txq_trans_update(txq);
2616 return rc; 2613 return rc;
2617 } 2614 }
@@ -2627,10 +2624,7 @@ gso:
2627 dev_queue_xmit_nit(nskb, dev); 2624 dev_queue_xmit_nit(nskb, dev);
2628 2625
2629 skb_len = nskb->len; 2626 skb_len = nskb->len;
2630 if (accel_priv) 2627 rc = ops->ndo_start_xmit(nskb, dev);
2631 rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv);
2632 else
2633 rc = ops->ndo_start_xmit(nskb, dev);
2634 trace_net_dev_xmit(nskb, rc, dev, skb_len); 2628 trace_net_dev_xmit(nskb, rc, dev, skb_len);
2635 if (unlikely(rc != NETDEV_TX_OK)) { 2629 if (unlikely(rc != NETDEV_TX_OK)) {
2636 if (rc & ~NETDEV_TX_MASK) 2630 if (rc & ~NETDEV_TX_MASK)
@@ -2811,7 +2805,7 @@ EXPORT_SYMBOL(dev_loopback_xmit);
2811 * the BH enable code must have IRQs enabled so that it will not deadlock. 2805 * the BH enable code must have IRQs enabled so that it will not deadlock.
2812 * --BLG 2806 * --BLG
2813 */ 2807 */
2814int dev_queue_xmit(struct sk_buff *skb) 2808int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
2815{ 2809{
2816 struct net_device *dev = skb->dev; 2810 struct net_device *dev = skb->dev;
2817 struct netdev_queue *txq; 2811 struct netdev_queue *txq;
@@ -2827,7 +2821,7 @@ int dev_queue_xmit(struct sk_buff *skb)
2827 2821
2828 skb_update_prio(skb); 2822 skb_update_prio(skb);
2829 2823
2830 txq = netdev_pick_tx(dev, skb); 2824 txq = netdev_pick_tx(dev, skb, accel_priv);
2831 q = rcu_dereference_bh(txq->qdisc); 2825 q = rcu_dereference_bh(txq->qdisc);
2832 2826
2833#ifdef CONFIG_NET_CLS_ACT 2827#ifdef CONFIG_NET_CLS_ACT
@@ -2863,7 +2857,7 @@ int dev_queue_xmit(struct sk_buff *skb)
2863 2857
2864 if (!netif_xmit_stopped(txq)) { 2858 if (!netif_xmit_stopped(txq)) {
2865 __this_cpu_inc(xmit_recursion); 2859 __this_cpu_inc(xmit_recursion);
2866 rc = dev_hard_start_xmit(skb, dev, txq, NULL); 2860 rc = dev_hard_start_xmit(skb, dev, txq);
2867 __this_cpu_dec(xmit_recursion); 2861 __this_cpu_dec(xmit_recursion);
2868 if (dev_xmit_complete(rc)) { 2862 if (dev_xmit_complete(rc)) {
2869 HARD_TX_UNLOCK(dev, txq); 2863 HARD_TX_UNLOCK(dev, txq);
@@ -2892,8 +2886,19 @@ out:
2892 rcu_read_unlock_bh(); 2886 rcu_read_unlock_bh();
2893 return rc; 2887 return rc;
2894} 2888}
2889
2890int dev_queue_xmit(struct sk_buff *skb)
2891{
2892 return __dev_queue_xmit(skb, NULL);
2893}
2895EXPORT_SYMBOL(dev_queue_xmit); 2894EXPORT_SYMBOL(dev_queue_xmit);
2896 2895
2896int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
2897{
2898 return __dev_queue_xmit(skb, accel_priv);
2899}
2900EXPORT_SYMBOL(dev_queue_xmit_accel);
2901
2897 2902
2898/*======================================================================= 2903/*=======================================================================
2899 Receiver routines 2904 Receiver routines
@@ -4500,7 +4505,7 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4500{ 4505{
4501 struct netdev_adjacent *upper; 4506 struct netdev_adjacent *upper;
4502 4507
4503 WARN_ON_ONCE(!rcu_read_lock_held()); 4508 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4504 4509
4505 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 4510 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4506 4511
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 95897183226e..e70301eb7a4a 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -64,7 +64,6 @@ static struct genl_family net_drop_monitor_family = {
64 .hdrsize = 0, 64 .hdrsize = 0,
65 .name = "NET_DM", 65 .name = "NET_DM",
66 .version = 2, 66 .version = 2,
67 .maxattr = NET_DM_CMD_MAX,
68}; 67};
69 68
70static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data); 69static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index d6ef17322500..2fc5beaf5783 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -395,17 +395,21 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
395EXPORT_SYMBOL(__netdev_pick_tx); 395EXPORT_SYMBOL(__netdev_pick_tx);
396 396
397struct netdev_queue *netdev_pick_tx(struct net_device *dev, 397struct netdev_queue *netdev_pick_tx(struct net_device *dev,
398 struct sk_buff *skb) 398 struct sk_buff *skb,
399 void *accel_priv)
399{ 400{
400 int queue_index = 0; 401 int queue_index = 0;
401 402
402 if (dev->real_num_tx_queues != 1) { 403 if (dev->real_num_tx_queues != 1) {
403 const struct net_device_ops *ops = dev->netdev_ops; 404 const struct net_device_ops *ops = dev->netdev_ops;
404 if (ops->ndo_select_queue) 405 if (ops->ndo_select_queue)
405 queue_index = ops->ndo_select_queue(dev, skb); 406 queue_index = ops->ndo_select_queue(dev, skb,
407 accel_priv);
406 else 408 else
407 queue_index = __netdev_pick_tx(dev, skb); 409 queue_index = __netdev_pick_tx(dev, skb);
408 queue_index = dev_cap_txqueue(dev, queue_index); 410
411 if (!accel_priv)
412 queue_index = dev_cap_txqueue(dev, queue_index);
409 } 413 }
410 414
411 skb_set_queue_mapping(skb, queue_index); 415 skb_set_queue_mapping(skb, queue_index);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index ca15f32821fb..932c6d7cf666 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1161,6 +1161,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1161 neigh->parms->reachable_time : 1161 neigh->parms->reachable_time :
1162 0))); 1162 0)));
1163 neigh->nud_state = new; 1163 neigh->nud_state = new;
1164 notify = 1;
1164 } 1165 }
1165 1166
1166 if (lladdr != neigh->ha) { 1167 if (lladdr != neigh->ha) {
@@ -1274,7 +1275,7 @@ int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
1274 1275
1275 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, 1276 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1276 skb->len) < 0 && 1277 skb->len) < 0 &&
1277 dev->header_ops->rebuild(skb)) 1278 dev_rebuild_header(skb))
1278 return 0; 1279 return 0;
1279 1280
1280 return dev_queue_xmit(skb); 1281 return dev_queue_xmit(skb);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 8f971990677c..19fe9c717ced 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -375,7 +375,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
375 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { 375 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
376 struct netdev_queue *txq; 376 struct netdev_queue *txq;
377 377
378 txq = netdev_pick_tx(dev, skb); 378 txq = netdev_pick_tx(dev, skb, NULL);
379 379
380 /* try until next clock tick */ 380 /* try until next clock tick */
381 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 381 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
@@ -386,8 +386,14 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
386 !vlan_hw_offload_capable(netif_skb_features(skb), 386 !vlan_hw_offload_capable(netif_skb_features(skb),
387 skb->vlan_proto)) { 387 skb->vlan_proto)) {
388 skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)); 388 skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
389 if (unlikely(!skb)) 389 if (unlikely(!skb)) {
390 break; 390 /* This is actually a packet drop, but we
391 * don't want the code at the end of this
392 * function to try and re-queue a NULL skb.
393 */
394 status = NETDEV_TX_OK;
395 goto unlock_txq;
396 }
391 skb->vlan_tci = 0; 397 skb->vlan_tci = 0;
392 } 398 }
393 399
@@ -395,6 +401,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
395 if (status == NETDEV_TX_OK) 401 if (status == NETDEV_TX_OK)
396 txq_trans_update(txq); 402 txq_trans_update(txq);
397 } 403 }
404 unlock_txq:
398 __netif_tx_unlock(txq); 405 __netif_tx_unlock(txq);
399 406
400 if (status == NETDEV_TX_OK) 407 if (status == NETDEV_TX_OK)
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 261357a66300..a797fff7f222 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2527,6 +2527,8 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
2527 if (x) { 2527 if (x) {
2528 int ret; 2528 int ret;
2529 __u8 *eth; 2529 __u8 *eth;
2530 struct iphdr *iph;
2531
2530 nhead = x->props.header_len - skb_headroom(skb); 2532 nhead = x->props.header_len - skb_headroom(skb);
2531 if (nhead > 0) { 2533 if (nhead > 0) {
2532 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); 2534 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
@@ -2548,6 +2550,11 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
2548 eth = (__u8 *) skb_push(skb, ETH_HLEN); 2550 eth = (__u8 *) skb_push(skb, ETH_HLEN);
2549 memcpy(eth, pkt_dev->hh, 12); 2551 memcpy(eth, pkt_dev->hh, 12);
2550 *(u16 *) &eth[12] = protocol; 2552 *(u16 *) &eth[12] = protocol;
2553
2554 /* Update IPv4 header len as well as checksum value */
2555 iph = ip_hdr(skb);
2556 iph->tot_len = htons(skb->len - ETH_HLEN);
2557 ip_send_check(iph);
2551 } 2558 }
2552 } 2559 }
2553 return 1; 2560 return 1;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2718fed53d8c..06e72d3cdf60 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3584,6 +3584,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
3584 skb->tstamp.tv64 = 0; 3584 skb->tstamp.tv64 = 0;
3585 skb->pkt_type = PACKET_HOST; 3585 skb->pkt_type = PACKET_HOST;
3586 skb->skb_iif = 0; 3586 skb->skb_iif = 0;
3587 skb->local_df = 0;
3587 skb_dst_drop(skb); 3588 skb_dst_drop(skb);
3588 skb->mark = 0; 3589 skb->mark = 0;
3589 secpath_reset(skb); 3590 secpath_reset(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index ab20ed9b0f31..5393b4b719d7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -882,7 +882,7 @@ set_rcvbuf:
882 882
883 case SO_PEEK_OFF: 883 case SO_PEEK_OFF:
884 if (sock->ops->set_peek_off) 884 if (sock->ops->set_peek_off)
885 sock->ops->set_peek_off(sk, val); 885 ret = sock->ops->set_peek_off(sk, val);
886 else 886 else
887 ret = -EOPNOTSUPP; 887 ret = -EOPNOTSUPP;
888 break; 888 break;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 4ac71ff7c2e4..2b90a786e475 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -851,7 +851,6 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
851 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 851 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
852 if (flowlabel == NULL) 852 if (flowlabel == NULL)
853 return -EINVAL; 853 return -EINVAL;
854 usin->sin6_addr = flowlabel->dst;
855 fl6_sock_release(flowlabel); 854 fl6_sock_release(flowlabel);
856 } 855 }
857 } 856 }
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index 4c6bdf97a657..595ddf0459db 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -152,17 +152,6 @@ static const struct file_operations dccpprobe_fops = {
152 .llseek = noop_llseek, 152 .llseek = noop_llseek,
153}; 153};
154 154
155static __init int setup_jprobe(void)
156{
157 int ret = register_jprobe(&dccp_send_probe);
158
159 if (ret) {
160 request_module("dccp");
161 ret = register_jprobe(&dccp_send_probe);
162 }
163 return ret;
164}
165
166static __init int dccpprobe_init(void) 155static __init int dccpprobe_init(void)
167{ 156{
168 int ret = -ENOMEM; 157 int ret = -ENOMEM;
@@ -174,7 +163,13 @@ static __init int dccpprobe_init(void)
174 if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops)) 163 if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops))
175 goto err0; 164 goto err0;
176 165
177 ret = setup_jprobe(); 166 ret = register_jprobe(&dccp_send_probe);
167 if (ret) {
168 ret = request_module("dccp");
169 if (!ret)
170 ret = register_jprobe(&dccp_send_probe);
171 }
172
178 if (ret) 173 if (ret)
179 goto err1; 174 goto err1;
180 175
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 003f5bb3acd2..4bdab1521878 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -288,7 +288,8 @@ void hsr_addr_subst_dest(struct hsr_priv *hsr_priv, struct ethhdr *ethhdr,
288static bool seq_nr_after(u16 a, u16 b) 288static bool seq_nr_after(u16 a, u16 b)
289{ 289{
290 /* Remove inconsistency where 290 /* Remove inconsistency where
291 * seq_nr_after(a, b) == seq_nr_before(a, b) */ 291 * seq_nr_after(a, b) == seq_nr_before(a, b)
292 */
292 if ((int) b - a == 32768) 293 if ((int) b - a == 32768)
293 return false; 294 return false;
294 295
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index 5325af85eea6..01a5261ac7a5 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -23,6 +23,8 @@ static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
23 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 }, 23 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
24 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 }, 24 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
25 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 }, 25 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
26 [IFLA_HSR_SUPERVISION_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN },
27 [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
26}; 28};
27 29
28 30
@@ -59,6 +61,31 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
59 return hsr_dev_finalize(dev, link, multicast_spec); 61 return hsr_dev_finalize(dev, link, multicast_spec);
60} 62}
61 63
64static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
65{
66 struct hsr_priv *hsr_priv;
67
68 hsr_priv = netdev_priv(dev);
69
70 if (hsr_priv->slave[0])
71 if (nla_put_u32(skb, IFLA_HSR_SLAVE1, hsr_priv->slave[0]->ifindex))
72 goto nla_put_failure;
73
74 if (hsr_priv->slave[1])
75 if (nla_put_u32(skb, IFLA_HSR_SLAVE2, hsr_priv->slave[1]->ifindex))
76 goto nla_put_failure;
77
78 if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
79 hsr_priv->sup_multicast_addr) ||
80 nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr_priv->sequence_nr))
81 goto nla_put_failure;
82
83 return 0;
84
85nla_put_failure:
86 return -EMSGSIZE;
87}
88
62static struct rtnl_link_ops hsr_link_ops __read_mostly = { 89static struct rtnl_link_ops hsr_link_ops __read_mostly = {
63 .kind = "hsr", 90 .kind = "hsr",
64 .maxtype = IFLA_HSR_MAX, 91 .maxtype = IFLA_HSR_MAX,
@@ -66,6 +93,7 @@ static struct rtnl_link_ops hsr_link_ops __read_mostly = {
66 .priv_size = sizeof(struct hsr_priv), 93 .priv_size = sizeof(struct hsr_priv),
67 .setup = hsr_dev_setup, 94 .setup = hsr_dev_setup,
68 .newlink = hsr_newlink, 95 .newlink = hsr_newlink,
96 .fill_info = hsr_fill_info,
69}; 97};
70 98
71 99
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 459e200c08a4..a2d2456a557a 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -547,7 +547,7 @@ static int lowpan_header_create(struct sk_buff *skb,
547 hc06_ptr += 3; 547 hc06_ptr += 3;
548 } else { 548 } else {
549 /* compress nothing */ 549 /* compress nothing */
550 memcpy(hc06_ptr, &hdr, 4); 550 memcpy(hc06_ptr, hdr, 4);
551 /* replace the top byte with new ECN | DSCP format */ 551 /* replace the top byte with new ECN | DSCP format */
552 *hc06_ptr = tmp; 552 *hc06_ptr = tmp;
553 hc06_ptr += 4; 553 hc06_ptr += 4;
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 523be38e37de..f2e15738534d 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -104,7 +104,10 @@ errout:
104static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) 104static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
105{ 105{
106 struct fib_result *result = (struct fib_result *) arg->result; 106 struct fib_result *result = (struct fib_result *) arg->result;
107 struct net_device *dev = result->fi->fib_dev; 107 struct net_device *dev = NULL;
108
109 if (result->fi)
110 dev = result->fi->fib_dev;
108 111
109 /* do not accept result if the route does 112 /* do not accept result if the route does
110 * not meet the required prefix length 113 * not meet the required prefix length
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index e5d436188464..2cd02f32f99f 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -28,6 +28,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
28 netdev_features_t enc_features; 28 netdev_features_t enc_features;
29 int ghl = GRE_HEADER_SECTION; 29 int ghl = GRE_HEADER_SECTION;
30 struct gre_base_hdr *greh; 30 struct gre_base_hdr *greh;
31 u16 mac_offset = skb->mac_header;
31 int mac_len = skb->mac_len; 32 int mac_len = skb->mac_len;
32 __be16 protocol = skb->protocol; 33 __be16 protocol = skb->protocol;
33 int tnl_hlen; 34 int tnl_hlen;
@@ -58,13 +59,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
58 } else 59 } else
59 csum = false; 60 csum = false;
60 61
62 if (unlikely(!pskb_may_pull(skb, ghl)))
63 goto out;
64
61 /* setup inner skb. */ 65 /* setup inner skb. */
62 skb->protocol = greh->protocol; 66 skb->protocol = greh->protocol;
63 skb->encapsulation = 0; 67 skb->encapsulation = 0;
64 68
65 if (unlikely(!pskb_may_pull(skb, ghl)))
66 goto out;
67
68 __skb_pull(skb, ghl); 69 __skb_pull(skb, ghl);
69 skb_reset_mac_header(skb); 70 skb_reset_mac_header(skb);
70 skb_set_network_header(skb, skb_inner_network_offset(skb)); 71 skb_set_network_header(skb, skb_inner_network_offset(skb));
@@ -73,8 +74,10 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
73 /* segment inner packet. */ 74 /* segment inner packet. */
74 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); 75 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
75 segs = skb_mac_gso_segment(skb, enc_features); 76 segs = skb_mac_gso_segment(skb, enc_features);
76 if (!segs || IS_ERR(segs)) 77 if (!segs || IS_ERR(segs)) {
78 skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);
77 goto out; 79 goto out;
80 }
78 81
79 skb = segs; 82 skb = segs;
80 tnl_hlen = skb_tnl_header_len(skb); 83 tnl_hlen = skb_tnl_header_len(skb);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 56a964a553d2..a0f52dac8940 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -106,6 +106,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
106 106
107 r->id.idiag_sport = inet->inet_sport; 107 r->id.idiag_sport = inet->inet_sport;
108 r->id.idiag_dport = inet->inet_dport; 108 r->id.idiag_dport = inet->inet_dport;
109
110 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
111 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
112
109 r->id.idiag_src[0] = inet->inet_rcv_saddr; 113 r->id.idiag_src[0] = inet->inet_rcv_saddr;
110 r->id.idiag_dst[0] = inet->inet_daddr; 114 r->id.idiag_dst[0] = inet->inet_daddr;
111 115
@@ -240,12 +244,19 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
240 244
241 r->idiag_family = tw->tw_family; 245 r->idiag_family = tw->tw_family;
242 r->idiag_retrans = 0; 246 r->idiag_retrans = 0;
247
243 r->id.idiag_if = tw->tw_bound_dev_if; 248 r->id.idiag_if = tw->tw_bound_dev_if;
244 sock_diag_save_cookie(tw, r->id.idiag_cookie); 249 sock_diag_save_cookie(tw, r->id.idiag_cookie);
250
245 r->id.idiag_sport = tw->tw_sport; 251 r->id.idiag_sport = tw->tw_sport;
246 r->id.idiag_dport = tw->tw_dport; 252 r->id.idiag_dport = tw->tw_dport;
253
254 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
255 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
256
247 r->id.idiag_src[0] = tw->tw_rcv_saddr; 257 r->id.idiag_src[0] = tw->tw_rcv_saddr;
248 r->id.idiag_dst[0] = tw->tw_daddr; 258 r->id.idiag_dst[0] = tw->tw_daddr;
259
249 r->idiag_state = tw->tw_substate; 260 r->idiag_state = tw->tw_substate;
250 r->idiag_timer = 3; 261 r->idiag_timer = 3;
251 r->idiag_expires = jiffies_to_msecs(tmo); 262 r->idiag_expires = jiffies_to_msecs(tmo);
@@ -726,8 +737,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
726 737
727 r->id.idiag_sport = inet->inet_sport; 738 r->id.idiag_sport = inet->inet_sport;
728 r->id.idiag_dport = ireq->ir_rmt_port; 739 r->id.idiag_dport = ireq->ir_rmt_port;
740
741 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
742 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
743
729 r->id.idiag_src[0] = ireq->ir_loc_addr; 744 r->id.idiag_src[0] = ireq->ir_loc_addr;
730 r->id.idiag_dst[0] = ireq->ir_rmt_addr; 745 r->id.idiag_dst[0] = ireq->ir_rmt_addr;
746
731 r->idiag_expires = jiffies_to_msecs(tmo); 747 r->idiag_expires = jiffies_to_msecs(tmo);
732 r->idiag_rqueue = 0; 748 r->idiag_rqueue = 0;
733 r->idiag_wqueue = 0; 749 r->idiag_wqueue = 0;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d7aea4c5b940..e560ef34cf4b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -217,6 +217,7 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
217 iph->saddr, iph->daddr, tpi->key); 217 iph->saddr, iph->daddr, tpi->key);
218 218
219 if (tunnel) { 219 if (tunnel) {
220 skb_pop_mac_header(skb);
220 ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error); 221 ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error);
221 return PACKET_RCVD; 222 return PACKET_RCVD;
222 } 223 }
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 912402752f2f..df184616493f 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -828,7 +828,7 @@ static int __ip_append_data(struct sock *sk,
828 828
829 if (cork->length + length > maxnonfragsize - fragheaderlen) { 829 if (cork->length + length > maxnonfragsize - fragheaderlen) {
830 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, 830 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
831 mtu-exthdrlen); 831 mtu - (opt ? opt->optlen : 0));
832 return -EMSGSIZE; 832 return -EMSGSIZE;
833 } 833 }
834 834
@@ -1151,7 +1151,8 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1151 mtu : 0xFFFF; 1151 mtu : 0xFFFF;
1152 1152
1153 if (cork->length + size > maxnonfragsize - fragheaderlen) { 1153 if (cork->length + size > maxnonfragsize - fragheaderlen) {
1154 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu); 1154 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
1155 mtu - (opt ? opt->optlen : 0));
1155 return -EMSGSIZE; 1156 return -EMSGSIZE;
1156 } 1157 }
1157 1158
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 3f858266fa7e..ddf32a6bc415 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -386,7 +386,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
386/* 386/*
387 * Handle MSG_ERRQUEUE 387 * Handle MSG_ERRQUEUE
388 */ 388 */
389int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) 389int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
390{ 390{
391 struct sock_exterr_skb *serr; 391 struct sock_exterr_skb *serr;
392 struct sk_buff *skb, *skb2; 392 struct sk_buff *skb, *skb2;
@@ -423,6 +423,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
423 serr->addr_offset); 423 serr->addr_offset);
424 sin->sin_port = serr->port; 424 sin->sin_port = serr->port;
425 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 425 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
426 *addr_len = sizeof(*sin);
426 } 427 }
427 428
428 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); 429 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index f13bd91d9a56..a313c3fbeb46 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -423,6 +423,7 @@ static void synproxy_tg4_destroy(const struct xt_tgdtor_param *par)
423static struct xt_target synproxy_tg4_reg __read_mostly = { 423static struct xt_target synproxy_tg4_reg __read_mostly = {
424 .name = "SYNPROXY", 424 .name = "SYNPROXY",
425 .family = NFPROTO_IPV4, 425 .family = NFPROTO_IPV4,
426 .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD),
426 .target = synproxy_tg4, 427 .target = synproxy_tg4,
427 .targetsize = sizeof(struct xt_synproxy_info), 428 .targetsize = sizeof(struct xt_synproxy_info),
428 .checkentry = synproxy_tg4_check, 429 .checkentry = synproxy_tg4_check,
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
index fff5ba1a33b7..4a5e94ac314a 100644
--- a/net/ipv4/netfilter/nft_reject_ipv4.c
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -72,7 +72,7 @@ static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
72{ 72{
73 const struct nft_reject *priv = nft_expr_priv(expr); 73 const struct nft_reject *priv = nft_expr_priv(expr);
74 74
75 if (nla_put_be32(skb, NFTA_REJECT_TYPE, priv->type)) 75 if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
76 goto nla_put_failure; 76 goto nla_put_failure;
77 77
78 switch (priv->type) { 78 switch (priv->type) {
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 876c6ca2d8f9..242e7f4ed6f4 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -772,7 +772,7 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
772 err = PTR_ERR(rt); 772 err = PTR_ERR(rt);
773 rt = NULL; 773 rt = NULL;
774 if (err == -ENETUNREACH) 774 if (err == -ENETUNREACH)
775 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); 775 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
776 goto out; 776 goto out;
777 } 777 }
778 778
@@ -841,10 +841,11 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
841 841
842 if (flags & MSG_ERRQUEUE) { 842 if (flags & MSG_ERRQUEUE) {
843 if (family == AF_INET) { 843 if (family == AF_INET) {
844 return ip_recv_error(sk, msg, len); 844 return ip_recv_error(sk, msg, len, addr_len);
845#if IS_ENABLED(CONFIG_IPV6) 845#if IS_ENABLED(CONFIG_IPV6)
846 } else if (family == AF_INET6) { 846 } else if (family == AF_INET6) {
847 return pingv6_ops.ipv6_recv_error(sk, msg, len); 847 return pingv6_ops.ipv6_recv_error(sk, msg, len,
848 addr_len);
848#endif 849#endif
849 } 850 }
850 } 851 }
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index ce848461acbb..46d6a1c923a8 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -31,10 +31,6 @@
31const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly; 31const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
32const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly; 32const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly;
33 33
34/*
35 * Add a protocol handler to the hash tables
36 */
37
38int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) 34int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
39{ 35{
40 if (!prot->netns_ok) { 36 if (!prot->netns_ok) {
@@ -55,10 +51,6 @@ int inet_add_offload(const struct net_offload *prot, unsigned char protocol)
55} 51}
56EXPORT_SYMBOL(inet_add_offload); 52EXPORT_SYMBOL(inet_add_offload);
57 53
58/*
59 * Remove a protocol from the hash tables.
60 */
61
62int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol) 54int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
63{ 55{
64 int ret; 56 int ret;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 5cb8ddb505ee..23c3e5b5bb53 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -697,7 +697,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
697 goto out; 697 goto out;
698 698
699 if (flags & MSG_ERRQUEUE) { 699 if (flags & MSG_ERRQUEUE) {
700 err = ip_recv_error(sk, msg, len); 700 err = ip_recv_error(sk, msg, len, addr_len);
701 goto out; 701 goto out;
702 } 702 }
703 703
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 59a6f8b90cd9..067213924751 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -177,7 +177,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
177 if (IS_ERR(rt)) { 177 if (IS_ERR(rt)) {
178 err = PTR_ERR(rt); 178 err = PTR_ERR(rt);
179 if (err == -ENETUNREACH) 179 if (err == -ENETUNREACH)
180 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 180 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
181 return err; 181 return err;
182 } 182 }
183 183
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index 03e9154f7e68..f7e522c558ba 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -6,13 +6,6 @@
6#include <linux/memcontrol.h> 6#include <linux/memcontrol.h>
7#include <linux/module.h> 7#include <linux/module.h>
8 8
9static void memcg_tcp_enter_memory_pressure(struct sock *sk)
10{
11 if (sk->sk_cgrp->memory_pressure)
12 sk->sk_cgrp->memory_pressure = 1;
13}
14EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
15
16int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 9int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
17{ 10{
18 /* 11 /*
@@ -60,7 +53,6 @@ EXPORT_SYMBOL(tcp_destroy_cgroup);
60static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) 53static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
61{ 54{
62 struct cg_proto *cg_proto; 55 struct cg_proto *cg_proto;
63 u64 old_lim;
64 int i; 56 int i;
65 int ret; 57 int ret;
66 58
@@ -71,7 +63,6 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
71 if (val > RES_COUNTER_MAX) 63 if (val > RES_COUNTER_MAX)
72 val = RES_COUNTER_MAX; 64 val = RES_COUNTER_MAX;
73 65
74 old_lim = res_counter_read_u64(&cg_proto->memory_allocated, RES_LIMIT);
75 ret = res_counter_set_limit(&cg_proto->memory_allocated, val); 66 ret = res_counter_set_limit(&cg_proto->memory_allocated, val);
76 if (ret) 67 if (ret)
77 return ret; 68 return ret;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index a2b68a108eae..05606353c7e7 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -274,33 +274,32 @@ static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *
274{ 274{
275 const struct iphdr *iph = skb_gro_network_header(skb); 275 const struct iphdr *iph = skb_gro_network_header(skb);
276 __wsum wsum; 276 __wsum wsum;
277 __sum16 sum; 277
278 /* Don't bother verifying checksum if we're going to flush anyway. */
279 if (NAPI_GRO_CB(skb)->flush)
280 goto skip_csum;
281
282 wsum = skb->csum;
278 283
279 switch (skb->ip_summed) { 284 switch (skb->ip_summed) {
285 case CHECKSUM_NONE:
286 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
287 0);
288
289 /* fall through */
290
280 case CHECKSUM_COMPLETE: 291 case CHECKSUM_COMPLETE:
281 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr, 292 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
282 skb->csum)) { 293 wsum)) {
283 skb->ip_summed = CHECKSUM_UNNECESSARY; 294 skb->ip_summed = CHECKSUM_UNNECESSARY;
284 break; 295 break;
285 } 296 }
286flush: 297
287 NAPI_GRO_CB(skb)->flush = 1; 298 NAPI_GRO_CB(skb)->flush = 1;
288 return NULL; 299 return NULL;
289
290 case CHECKSUM_NONE:
291 wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
292 skb_gro_len(skb), IPPROTO_TCP, 0);
293 sum = csum_fold(skb_checksum(skb,
294 skb_gro_offset(skb),
295 skb_gro_len(skb),
296 wsum));
297 if (sum)
298 goto flush;
299
300 skb->ip_summed = CHECKSUM_UNNECESSARY;
301 break;
302 } 300 }
303 301
302skip_csum:
304 return tcp_gro_receive(head, skb); 303 return tcp_gro_receive(head, skb);
305} 304}
306 305
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5944d7d668dd..a7e4729e974b 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -560,15 +560,11 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
560 __be16 sport, __be16 dport, 560 __be16 sport, __be16 dport,
561 struct udp_table *udptable) 561 struct udp_table *udptable)
562{ 562{
563 struct sock *sk;
564 const struct iphdr *iph = ip_hdr(skb); 563 const struct iphdr *iph = ip_hdr(skb);
565 564
566 if (unlikely(sk = skb_steal_sock(skb))) 565 return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport,
567 return sk; 566 iph->daddr, dport, inet_iif(skb),
568 else 567 udptable);
569 return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport,
570 iph->daddr, dport, inet_iif(skb),
571 udptable);
572} 568}
573 569
574struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, 570struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
@@ -999,7 +995,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
999 err = PTR_ERR(rt); 995 err = PTR_ERR(rt);
1000 rt = NULL; 996 rt = NULL;
1001 if (err == -ENETUNREACH) 997 if (err == -ENETUNREACH)
1002 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); 998 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
1003 goto out; 999 goto out;
1004 } 1000 }
1005 1001
@@ -1098,6 +1094,9 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
1098 struct udp_sock *up = udp_sk(sk); 1094 struct udp_sock *up = udp_sk(sk);
1099 int ret; 1095 int ret;
1100 1096
1097 if (flags & MSG_SENDPAGE_NOTLAST)
1098 flags |= MSG_MORE;
1099
1101 if (!up->pending) { 1100 if (!up->pending) {
1102 struct msghdr msg = { .msg_flags = flags|MSG_MORE }; 1101 struct msghdr msg = { .msg_flags = flags|MSG_MORE };
1103 1102
@@ -1236,7 +1235,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1236 bool slow; 1235 bool slow;
1237 1236
1238 if (flags & MSG_ERRQUEUE) 1237 if (flags & MSG_ERRQUEUE)
1239 return ip_recv_error(sk, msg, len); 1238 return ip_recv_error(sk, msg, len, addr_len);
1240 1239
1241try_again: 1240try_again:
1242 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), 1241 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
@@ -1600,12 +1599,16 @@ static void flush_stack(struct sock **stack, unsigned int count,
1600 kfree_skb(skb1); 1599 kfree_skb(skb1);
1601} 1600}
1602 1601
1603static void udp_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) 1602/* For TCP sockets, sk_rx_dst is protected by socket lock
1603 * For UDP, we use xchg() to guard against concurrent changes.
1604 */
1605static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
1604{ 1606{
1605 struct dst_entry *dst = skb_dst(skb); 1607 struct dst_entry *old;
1606 1608
1607 dst_hold(dst); 1609 dst_hold(dst);
1608 sk->sk_rx_dst = dst; 1610 old = xchg(&sk->sk_rx_dst, dst);
1611 dst_release(old);
1609} 1612}
1610 1613
1611/* 1614/*
@@ -1736,15 +1739,16 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1736 if (udp4_csum_init(skb, uh, proto)) 1739 if (udp4_csum_init(skb, uh, proto))
1737 goto csum_error; 1740 goto csum_error;
1738 1741
1739 if (skb->sk) { 1742 sk = skb_steal_sock(skb);
1743 if (sk) {
1744 struct dst_entry *dst = skb_dst(skb);
1740 int ret; 1745 int ret;
1741 sk = skb->sk;
1742 1746
1743 if (unlikely(sk->sk_rx_dst == NULL)) 1747 if (unlikely(sk->sk_rx_dst != dst))
1744 udp_sk_rx_dst_set(sk, skb); 1748 udp_sk_rx_dst_set(sk, dst);
1745 1749
1746 ret = udp_queue_rcv_skb(sk, skb); 1750 ret = udp_queue_rcv_skb(sk, skb);
1747 1751 sock_put(sk);
1748 /* a return value > 0 means to resubmit the input, but 1752 /* a return value > 0 means to resubmit the input, but
1749 * it wants the return to be -protocol, or 0 1753 * it wants the return to be -protocol, or 0
1750 */ 1754 */
@@ -1910,17 +1914,20 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
1910 1914
1911void udp_v4_early_demux(struct sk_buff *skb) 1915void udp_v4_early_demux(struct sk_buff *skb)
1912{ 1916{
1913 const struct iphdr *iph = ip_hdr(skb); 1917 struct net *net = dev_net(skb->dev);
1914 const struct udphdr *uh = udp_hdr(skb); 1918 const struct iphdr *iph;
1919 const struct udphdr *uh;
1915 struct sock *sk; 1920 struct sock *sk;
1916 struct dst_entry *dst; 1921 struct dst_entry *dst;
1917 struct net *net = dev_net(skb->dev);
1918 int dif = skb->dev->ifindex; 1922 int dif = skb->dev->ifindex;
1919 1923
1920 /* validate the packet */ 1924 /* validate the packet */
1921 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) 1925 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
1922 return; 1926 return;
1923 1927
1928 iph = ip_hdr(skb);
1929 uh = udp_hdr(skb);
1930
1924 if (skb->pkt_type == PACKET_BROADCAST || 1931 if (skb->pkt_type == PACKET_BROADCAST ||
1925 skb->pkt_type == PACKET_MULTICAST) 1932 skb->pkt_type == PACKET_MULTICAST)
1926 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, 1933 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
@@ -2471,6 +2478,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2471 netdev_features_t features) 2478 netdev_features_t features)
2472{ 2479{
2473 struct sk_buff *segs = ERR_PTR(-EINVAL); 2480 struct sk_buff *segs = ERR_PTR(-EINVAL);
2481 u16 mac_offset = skb->mac_header;
2474 int mac_len = skb->mac_len; 2482 int mac_len = skb->mac_len;
2475 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); 2483 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
2476 __be16 protocol = skb->protocol; 2484 __be16 protocol = skb->protocol;
@@ -2490,8 +2498,11 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2490 /* segment inner packet. */ 2498 /* segment inner packet. */
2491 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); 2499 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
2492 segs = skb_mac_gso_segment(skb, enc_features); 2500 segs = skb_mac_gso_segment(skb, enc_features);
2493 if (!segs || IS_ERR(segs)) 2501 if (!segs || IS_ERR(segs)) {
2502 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
2503 mac_len);
2494 goto out; 2504 goto out;
2505 }
2495 2506
2496 outer_hlen = skb_tnl_header_len(skb); 2507 outer_hlen = skb_tnl_header_len(skb);
2497 skb = segs; 2508 skb = segs;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 83206de2bc76..79c62bdcd3c5 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -41,6 +41,14 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
41{ 41{
42 struct sk_buff *segs = ERR_PTR(-EINVAL); 42 struct sk_buff *segs = ERR_PTR(-EINVAL);
43 unsigned int mss; 43 unsigned int mss;
44 int offset;
45 __wsum csum;
46
47 if (skb->encapsulation &&
48 skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) {
49 segs = skb_udp_tunnel_segment(skb, features);
50 goto out;
51 }
44 52
45 mss = skb_shinfo(skb)->gso_size; 53 mss = skb_shinfo(skb)->gso_size;
46 if (unlikely(skb->len <= mss)) 54 if (unlikely(skb->len <= mss))
@@ -63,27 +71,20 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
63 goto out; 71 goto out;
64 } 72 }
65 73
74 /* Do software UFO. Complete and fill in the UDP checksum as
75 * HW cannot do checksum of UDP packets sent as multiple
76 * IP fragments.
77 */
78 offset = skb_checksum_start_offset(skb);
79 csum = skb_checksum(skb, offset, skb->len - offset, 0);
80 offset += skb->csum_offset;
81 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
82 skb->ip_summed = CHECKSUM_NONE;
83
66 /* Fragment the skb. IP headers of the fragments are updated in 84 /* Fragment the skb. IP headers of the fragments are updated in
67 * inet_gso_segment() 85 * inet_gso_segment()
68 */ 86 */
69 if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) 87 segs = skb_segment(skb, features);
70 segs = skb_udp_tunnel_segment(skb, features);
71 else {
72 int offset;
73 __wsum csum;
74
75 /* Do software UFO. Complete and fill in the UDP checksum as
76 * HW cannot do checksum of UDP packets sent as multiple
77 * IP fragments.
78 */
79 offset = skb_checksum_start_offset(skb);
80 csum = skb_checksum(skb, offset, skb->len - offset, 0);
81 offset += skb->csum_offset;
82 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
83 skb->ip_summed = CHECKSUM_NONE;
84
85 segs = skb_segment(skb, features);
86 }
87out: 88out:
88 return segs; 89 return segs;
89} 90}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 12c97d8aa6bb..abe46a4228ce 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1671,7 +1671,7 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
1671static void addrconf_join_anycast(struct inet6_ifaddr *ifp) 1671static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
1672{ 1672{
1673 struct in6_addr addr; 1673 struct in6_addr addr;
1674 if (ifp->prefix_len == 127) /* RFC 6164 */ 1674 if (ifp->prefix_len >= 127) /* RFC 6164 */
1675 return; 1675 return;
1676 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); 1676 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
1677 if (ipv6_addr_any(&addr)) 1677 if (ipv6_addr_any(&addr))
@@ -1682,7 +1682,7 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
1682static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) 1682static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
1683{ 1683{
1684 struct in6_addr addr; 1684 struct in6_addr addr;
1685 if (ifp->prefix_len == 127) /* RFC 6164 */ 1685 if (ifp->prefix_len >= 127) /* RFC 6164 */
1686 return; 1686 return;
1687 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); 1687 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
1688 if (ipv6_addr_any(&addr)) 1688 if (ipv6_addr_any(&addr))
@@ -2509,7 +2509,8 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
2509 struct inet6_ifaddr *ifp; 2509 struct inet6_ifaddr *ifp;
2510 2510
2511 ifp = ipv6_add_addr(idev, addr, NULL, plen, 2511 ifp = ipv6_add_addr(idev, addr, NULL, plen,
2512 scope, IFA_F_PERMANENT, 0, 0); 2512 scope, IFA_F_PERMANENT,
2513 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
2513 if (!IS_ERR(ifp)) { 2514 if (!IS_ERR(ifp)) {
2514 spin_lock_bh(&ifp->lock); 2515 spin_lock_bh(&ifp->lock);
2515 ifp->flags &= ~IFA_F_TENTATIVE; 2516 ifp->flags &= ~IFA_F_TENTATIVE;
@@ -2613,7 +2614,7 @@ static void init_loopback(struct net_device *dev)
2613 if (sp_ifa->rt) 2614 if (sp_ifa->rt)
2614 continue; 2615 continue;
2615 2616
2616 sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); 2617 sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, false);
2617 2618
2618 /* Failure cases are ignored */ 2619 /* Failure cases are ignored */
2619 if (!IS_ERR(sp_rt)) { 2620 if (!IS_ERR(sp_rt)) {
@@ -2637,7 +2638,8 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
2637#endif 2638#endif
2638 2639
2639 2640
2640 ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 0, 0); 2641 ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags,
2642 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
2641 if (!IS_ERR(ifp)) { 2643 if (!IS_ERR(ifp)) {
2642 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0); 2644 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
2643 addrconf_dad_start(ifp); 2645 addrconf_dad_start(ifp);
@@ -3456,7 +3458,12 @@ restart:
3456 &inet6_addr_lst[i], addr_lst) { 3458 &inet6_addr_lst[i], addr_lst) {
3457 unsigned long age; 3459 unsigned long age;
3458 3460
3459 if (ifp->flags & IFA_F_PERMANENT) 3461 /* When setting preferred_lft to a value not zero or
3462 * infinity, while valid_lft is infinity
3463 * IFA_F_PERMANENT has a non-infinity life time.
3464 */
3465 if ((ifp->flags & IFA_F_PERMANENT) &&
3466 (ifp->prefered_lft == INFINITY_LIFE_TIME))
3460 continue; 3467 continue;
3461 3468
3462 spin_lock(&ifp->lock); 3469 spin_lock(&ifp->lock);
@@ -3481,7 +3488,8 @@ restart:
3481 ifp->flags |= IFA_F_DEPRECATED; 3488 ifp->flags |= IFA_F_DEPRECATED;
3482 } 3489 }
3483 3490
3484 if (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)) 3491 if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
3492 (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
3485 next = ifp->tstamp + ifp->valid_lft * HZ; 3493 next = ifp->tstamp + ifp->valid_lft * HZ;
3486 3494
3487 spin_unlock(&ifp->lock); 3495 spin_unlock(&ifp->lock);
@@ -3761,7 +3769,8 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
3761 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope), 3769 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
3762 ifa->idev->dev->ifindex); 3770 ifa->idev->dev->ifindex);
3763 3771
3764 if (!(ifa->flags&IFA_F_PERMANENT)) { 3772 if (!((ifa->flags&IFA_F_PERMANENT) &&
3773 (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
3765 preferred = ifa->prefered_lft; 3774 preferred = ifa->prefered_lft;
3766 valid = ifa->valid_lft; 3775 valid = ifa->valid_lft;
3767 if (preferred != INFINITY_LIFE_TIME) { 3776 if (preferred != INFINITY_LIFE_TIME) {
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index a454b0ff57c7..93b1aa34c432 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -73,7 +73,6 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
73 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 73 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
74 if (flowlabel == NULL) 74 if (flowlabel == NULL)
75 return -EINVAL; 75 return -EINVAL;
76 usin->sin6_addr = flowlabel->dst;
77 } 76 }
78 } 77 }
79 78
@@ -318,7 +317,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
318/* 317/*
319 * Handle MSG_ERRQUEUE 318 * Handle MSG_ERRQUEUE
320 */ 319 */
321int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) 320int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
322{ 321{
323 struct ipv6_pinfo *np = inet6_sk(sk); 322 struct ipv6_pinfo *np = inet6_sk(sk);
324 struct sock_exterr_skb *serr; 323 struct sock_exterr_skb *serr;
@@ -369,6 +368,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
369 &sin->sin6_addr); 368 &sin->sin6_addr);
370 sin->sin6_scope_id = 0; 369 sin->sin6_scope_id = 0;
371 } 370 }
371 *addr_len = sizeof(*sin);
372 } 372 }
373 373
374 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); 374 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
@@ -377,6 +377,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
377 if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) { 377 if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
378 sin->sin6_family = AF_INET6; 378 sin->sin6_family = AF_INET6;
379 sin->sin6_flowinfo = 0; 379 sin->sin6_flowinfo = 0;
380 sin->sin6_port = 0;
380 if (skb->protocol == htons(ETH_P_IPV6)) { 381 if (skb->protocol == htons(ETH_P_IPV6)) {
381 sin->sin6_addr = ipv6_hdr(skb)->saddr; 382 sin->sin6_addr = ipv6_hdr(skb)->saddr;
382 if (np->rxopt.all) 383 if (np->rxopt.all)
@@ -423,7 +424,8 @@ EXPORT_SYMBOL_GPL(ipv6_recv_error);
423/* 424/*
424 * Handle IPV6_RECVPATHMTU 425 * Handle IPV6_RECVPATHMTU
425 */ 426 */
426int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len) 427int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
428 int *addr_len)
427{ 429{
428 struct ipv6_pinfo *np = inet6_sk(sk); 430 struct ipv6_pinfo *np = inet6_sk(sk);
429 struct sk_buff *skb; 431 struct sk_buff *skb;
@@ -457,6 +459,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
457 sin->sin6_port = 0; 459 sin->sin6_port = 0;
458 sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id; 460 sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
459 sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr; 461 sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr;
462 *addr_len = sizeof(*sin);
460 } 463 }
461 464
462 put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info); 465 put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index e27591635f92..3fd0a578329e 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -122,7 +122,11 @@ out:
122static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) 122static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
123{ 123{
124 struct rt6_info *rt = (struct rt6_info *) arg->result; 124 struct rt6_info *rt = (struct rt6_info *) arg->result;
125 struct net_device *dev = rt->rt6i_idev->dev; 125 struct net_device *dev = NULL;
126
127 if (rt->rt6i_idev)
128 dev = rt->rt6i_idev->dev;
129
126 /* do not accept result if the route does 130 /* do not accept result if the route does
127 * not meet the required prefix length 131 * not meet the required prefix length
128 */ 132 */
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 59df872e2f4d..e6f931997996 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -116,8 +116,8 @@ static int ip6_finish_output2(struct sk_buff *skb)
116 } 116 }
117 rcu_read_unlock_bh(); 117 rcu_read_unlock_bh();
118 118
119 IP6_INC_STATS_BH(dev_net(dst->dev), 119 IP6_INC_STATS(dev_net(dst->dev),
120 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 120 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
121 kfree_skb(skb); 121 kfree_skb(skb);
122 return -EINVAL; 122 return -EINVAL;
123} 123}
@@ -1193,11 +1193,35 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1193 1193
1194 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + 1194 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1195 (opt ? opt->opt_nflen : 0); 1195 (opt ? opt->opt_nflen : 0);
1196 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr); 1196 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1197 sizeof(struct frag_hdr);
1197 1198
1198 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) { 1199 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1199 if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) { 1200 unsigned int maxnonfragsize, headersize;
1200 ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen); 1201
1202 headersize = sizeof(struct ipv6hdr) +
1203 (opt ? opt->tot_len : 0) +
1204 (dst_allfrag(&rt->dst) ?
1205 sizeof(struct frag_hdr) : 0) +
1206 rt->rt6i_nfheader_len;
1207
1208 maxnonfragsize = (np->pmtudisc >= IPV6_PMTUDISC_DO) ?
1209 mtu : sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1210
1211 /* dontfrag active */
1212 if ((cork->length + length > mtu - headersize) && dontfrag &&
1213 (sk->sk_protocol == IPPROTO_UDP ||
1214 sk->sk_protocol == IPPROTO_RAW)) {
1215 ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1216 sizeof(struct ipv6hdr));
1217 goto emsgsize;
1218 }
1219
1220 if (cork->length + length > maxnonfragsize - headersize) {
1221emsgsize:
1222 ipv6_local_error(sk, EMSGSIZE, fl6,
1223 mtu - headersize +
1224 sizeof(struct ipv6hdr));
1201 return -EMSGSIZE; 1225 return -EMSGSIZE;
1202 } 1226 }
1203 } 1227 }
@@ -1222,12 +1246,6 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1222 * --yoshfuji 1246 * --yoshfuji
1223 */ 1247 */
1224 1248
1225 if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
1226 sk->sk_protocol == IPPROTO_RAW)) {
1227 ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
1228 return -EMSGSIZE;
1229 }
1230
1231 skb = skb_peek_tail(&sk->sk_write_queue); 1249 skb = skb_peek_tail(&sk->sk_write_queue);
1232 cork->length += length; 1250 cork->length += length;
1233 if (((length > mtu) || 1251 if (((length > mtu) ||
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index d6062325db08..7881965a8248 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -103,16 +103,25 @@ struct ip6_tnl_net {
103 103
104static struct net_device_stats *ip6_get_stats(struct net_device *dev) 104static struct net_device_stats *ip6_get_stats(struct net_device *dev)
105{ 105{
106 struct pcpu_tstats sum = { 0 }; 106 struct pcpu_tstats tmp, sum = { 0 };
107 int i; 107 int i;
108 108
109 for_each_possible_cpu(i) { 109 for_each_possible_cpu(i) {
110 unsigned int start;
110 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); 111 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
111 112
112 sum.rx_packets += tstats->rx_packets; 113 do {
113 sum.rx_bytes += tstats->rx_bytes; 114 start = u64_stats_fetch_begin_bh(&tstats->syncp);
114 sum.tx_packets += tstats->tx_packets; 115 tmp.rx_packets = tstats->rx_packets;
115 sum.tx_bytes += tstats->tx_bytes; 116 tmp.rx_bytes = tstats->rx_bytes;
117 tmp.tx_packets = tstats->tx_packets;
118 tmp.tx_bytes = tstats->tx_bytes;
119 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
120
121 sum.rx_packets += tmp.rx_packets;
122 sum.rx_bytes += tmp.rx_bytes;
123 sum.tx_packets += tmp.tx_packets;
124 sum.tx_bytes += tmp.tx_bytes;
116 } 125 }
117 dev->stats.rx_packets = sum.rx_packets; 126 dev->stats.rx_packets = sum.rx_packets;
118 dev->stats.rx_bytes = sum.rx_bytes; 127 dev->stats.rx_bytes = sum.rx_bytes;
@@ -824,8 +833,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
824 } 833 }
825 834
826 tstats = this_cpu_ptr(t->dev->tstats); 835 tstats = this_cpu_ptr(t->dev->tstats);
836 u64_stats_update_begin(&tstats->syncp);
827 tstats->rx_packets++; 837 tstats->rx_packets++;
828 tstats->rx_bytes += skb->len; 838 tstats->rx_bytes += skb->len;
839 u64_stats_update_end(&tstats->syncp);
829 840
830 netif_rx(skb); 841 netif_rx(skb);
831 842
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index ed94ba61dda0..7b42d5ef868d 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -75,26 +75,6 @@ struct vti6_net {
75 struct ip6_tnl __rcu **tnls[2]; 75 struct ip6_tnl __rcu **tnls[2];
76}; 76};
77 77
78static struct net_device_stats *vti6_get_stats(struct net_device *dev)
79{
80 struct pcpu_tstats sum = { 0 };
81 int i;
82
83 for_each_possible_cpu(i) {
84 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
85
86 sum.rx_packets += tstats->rx_packets;
87 sum.rx_bytes += tstats->rx_bytes;
88 sum.tx_packets += tstats->tx_packets;
89 sum.tx_bytes += tstats->tx_bytes;
90 }
91 dev->stats.rx_packets = sum.rx_packets;
92 dev->stats.rx_bytes = sum.rx_bytes;
93 dev->stats.tx_packets = sum.tx_packets;
94 dev->stats.tx_bytes = sum.tx_bytes;
95 return &dev->stats;
96}
97
98#define for_each_vti6_tunnel_rcu(start) \ 78#define for_each_vti6_tunnel_rcu(start) \
99 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 79 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
100 80
@@ -331,8 +311,10 @@ static int vti6_rcv(struct sk_buff *skb)
331 } 311 }
332 312
333 tstats = this_cpu_ptr(t->dev->tstats); 313 tstats = this_cpu_ptr(t->dev->tstats);
314 u64_stats_update_begin(&tstats->syncp);
334 tstats->rx_packets++; 315 tstats->rx_packets++;
335 tstats->rx_bytes += skb->len; 316 tstats->rx_bytes += skb->len;
317 u64_stats_update_end(&tstats->syncp);
336 318
337 skb->mark = 0; 319 skb->mark = 0;
338 secpath_reset(skb); 320 secpath_reset(skb);
@@ -716,7 +698,7 @@ static const struct net_device_ops vti6_netdev_ops = {
716 .ndo_start_xmit = vti6_tnl_xmit, 698 .ndo_start_xmit = vti6_tnl_xmit,
717 .ndo_do_ioctl = vti6_ioctl, 699 .ndo_do_ioctl = vti6_ioctl,
718 .ndo_change_mtu = vti6_change_mtu, 700 .ndo_change_mtu = vti6_change_mtu,
719 .ndo_get_stats = vti6_get_stats, 701 .ndo_get_stats64 = ip_tunnel_get_stats64,
720}; 702};
721 703
722/** 704/**
@@ -750,12 +732,18 @@ static void vti6_dev_setup(struct net_device *dev)
750static inline int vti6_dev_init_gen(struct net_device *dev) 732static inline int vti6_dev_init_gen(struct net_device *dev)
751{ 733{
752 struct ip6_tnl *t = netdev_priv(dev); 734 struct ip6_tnl *t = netdev_priv(dev);
735 int i;
753 736
754 t->dev = dev; 737 t->dev = dev;
755 t->net = dev_net(dev); 738 t->net = dev_net(dev);
756 dev->tstats = alloc_percpu(struct pcpu_tstats); 739 dev->tstats = alloc_percpu(struct pcpu_tstats);
757 if (!dev->tstats) 740 if (!dev->tstats)
758 return -ENOMEM; 741 return -ENOMEM;
742 for_each_possible_cpu(i) {
743 struct pcpu_tstats *stats;
744 stats = per_cpu_ptr(dev->tstats, i);
745 u64_stats_init(&stats->syncp);
746 }
759 return 0; 747 return 0;
760} 748}
761 749
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 3512177deb4d..300865171394 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1277,6 +1277,9 @@ skip_linkparms:
1277 ri->prefix_len == 0) 1277 ri->prefix_len == 0)
1278 continue; 1278 continue;
1279#endif 1279#endif
1280 if (ri->prefix_len == 0 &&
1281 !in6_dev->cnf.accept_ra_defrtr)
1282 continue;
1280 if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen) 1283 if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
1281 continue; 1284 continue;
1282 rt6_route_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3, 1285 rt6_route_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3,
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index f78f41aca8e9..a0d17270117c 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -446,6 +446,7 @@ static void synproxy_tg6_destroy(const struct xt_tgdtor_param *par)
446static struct xt_target synproxy_tg6_reg __read_mostly = { 446static struct xt_target synproxy_tg6_reg __read_mostly = {
447 .name = "SYNPROXY", 447 .name = "SYNPROXY",
448 .family = NFPROTO_IPV6, 448 .family = NFPROTO_IPV6,
449 .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD),
449 .target = synproxy_tg6, 450 .target = synproxy_tg6,
450 .targetsize = sizeof(struct xt_synproxy_info), 451 .targetsize = sizeof(struct xt_synproxy_info),
451 .checkentry = synproxy_tg6_check, 452 .checkentry = synproxy_tg6_check,
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 8815e31a87fe..a83243c3d656 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -57,7 +57,8 @@ static struct inet_protosw pingv6_protosw = {
57 57
58 58
59/* Compatibility glue so we can support IPv6 when it's compiled as a module */ 59/* Compatibility glue so we can support IPv6 when it's compiled as a module */
60static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) 60static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
61 int *addr_len)
61{ 62{
62 return -EAFNOSUPPORT; 63 return -EAFNOSUPPORT;
63} 64}
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 22d1bd4670da..e048cf1bb6a2 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -36,10 +36,6 @@ int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol
36} 36}
37EXPORT_SYMBOL(inet6_add_protocol); 37EXPORT_SYMBOL(inet6_add_protocol);
38 38
39/*
40 * Remove a protocol from the hash tables.
41 */
42
43int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol) 39int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol)
44{ 40{
45 int ret; 41 int ret;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index e24ff1df0401..b6bb87e55805 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -466,10 +466,10 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
466 return -EOPNOTSUPP; 466 return -EOPNOTSUPP;
467 467
468 if (flags & MSG_ERRQUEUE) 468 if (flags & MSG_ERRQUEUE)
469 return ipv6_recv_error(sk, msg, len); 469 return ipv6_recv_error(sk, msg, len, addr_len);
470 470
471 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 471 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
472 return ipv6_recv_rxpmtu(sk, msg, len); 472 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
473 473
474 skb = skb_recv_datagram(sk, flags, noblock, &err); 474 skb = skb_recv_datagram(sk, flags, noblock, &err);
475 if (!skb) 475 if (!skb)
@@ -792,7 +792,6 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
792 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 792 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
793 if (flowlabel == NULL) 793 if (flowlabel == NULL)
794 return -EINVAL; 794 return -EINVAL;
795 daddr = &flowlabel->dst;
796 } 795 }
797 } 796 }
798 797
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7faa9d5e1503..4b4944c3e4c4 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -84,6 +84,8 @@ static int ip6_dst_gc(struct dst_ops *ops);
84 84
85static int ip6_pkt_discard(struct sk_buff *skb); 85static int ip6_pkt_discard(struct sk_buff *skb);
86static int ip6_pkt_discard_out(struct sk_buff *skb); 86static int ip6_pkt_discard_out(struct sk_buff *skb);
87static int ip6_pkt_prohibit(struct sk_buff *skb);
88static int ip6_pkt_prohibit_out(struct sk_buff *skb);
87static void ip6_link_failure(struct sk_buff *skb); 89static void ip6_link_failure(struct sk_buff *skb);
88static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 90static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
89 struct sk_buff *skb, u32 mtu); 91 struct sk_buff *skb, u32 mtu);
@@ -234,9 +236,6 @@ static const struct rt6_info ip6_null_entry_template = {
234 236
235#ifdef CONFIG_IPV6_MULTIPLE_TABLES 237#ifdef CONFIG_IPV6_MULTIPLE_TABLES
236 238
237static int ip6_pkt_prohibit(struct sk_buff *skb);
238static int ip6_pkt_prohibit_out(struct sk_buff *skb);
239
240static const struct rt6_info ip6_prohibit_entry_template = { 239static const struct rt6_info ip6_prohibit_entry_template = {
241 .dst = { 240 .dst = {
242 .__refcnt = ATOMIC_INIT(1), 241 .__refcnt = ATOMIC_INIT(1),
@@ -1565,21 +1564,24 @@ int ip6_route_add(struct fib6_config *cfg)
1565 goto out; 1564 goto out;
1566 } 1565 }
1567 } 1566 }
1568 rt->dst.output = ip6_pkt_discard_out;
1569 rt->dst.input = ip6_pkt_discard;
1570 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP; 1567 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1571 switch (cfg->fc_type) { 1568 switch (cfg->fc_type) {
1572 case RTN_BLACKHOLE: 1569 case RTN_BLACKHOLE:
1573 rt->dst.error = -EINVAL; 1570 rt->dst.error = -EINVAL;
1571 rt->dst.output = dst_discard;
1572 rt->dst.input = dst_discard;
1574 break; 1573 break;
1575 case RTN_PROHIBIT: 1574 case RTN_PROHIBIT:
1576 rt->dst.error = -EACCES; 1575 rt->dst.error = -EACCES;
1576 rt->dst.output = ip6_pkt_prohibit_out;
1577 rt->dst.input = ip6_pkt_prohibit;
1577 break; 1578 break;
1578 case RTN_THROW: 1579 case RTN_THROW:
1579 rt->dst.error = -EAGAIN;
1580 break;
1581 default: 1580 default:
1582 rt->dst.error = -ENETUNREACH; 1581 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1582 : -ENETUNREACH;
1583 rt->dst.output = ip6_pkt_discard_out;
1584 rt->dst.input = ip6_pkt_discard;
1583 break; 1585 break;
1584 } 1586 }
1585 goto install_route; 1587 goto install_route;
@@ -1903,9 +1905,7 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1903 else 1905 else
1904 rt->rt6i_gateway = *dest; 1906 rt->rt6i_gateway = *dest;
1905 rt->rt6i_flags = ort->rt6i_flags; 1907 rt->rt6i_flags = ort->rt6i_flags;
1906 if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) == 1908 rt6_set_from(rt, ort);
1907 (RTF_DEFAULT | RTF_ADDRCONF))
1908 rt6_set_from(rt, ort);
1909 rt->rt6i_metric = 0; 1909 rt->rt6i_metric = 0;
1910 1910
1911#ifdef CONFIG_IPV6_SUBTREES 1911#ifdef CONFIG_IPV6_SUBTREES
@@ -2144,8 +2144,6 @@ static int ip6_pkt_discard_out(struct sk_buff *skb)
2144 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); 2144 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2145} 2145}
2146 2146
2147#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2148
2149static int ip6_pkt_prohibit(struct sk_buff *skb) 2147static int ip6_pkt_prohibit(struct sk_buff *skb)
2150{ 2148{
2151 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES); 2149 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
@@ -2157,8 +2155,6 @@ static int ip6_pkt_prohibit_out(struct sk_buff *skb)
2157 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); 2155 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2158} 2156}
2159 2157
2160#endif
2161
2162/* 2158/*
2163 * Allocate a dst for local (unicast / anycast) address. 2159 * Allocate a dst for local (unicast / anycast) address.
2164 */ 2160 */
@@ -2168,12 +2164,10 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2168 bool anycast) 2164 bool anycast)
2169{ 2165{
2170 struct net *net = dev_net(idev->dev); 2166 struct net *net = dev_net(idev->dev);
2171 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL); 2167 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
2172 2168 DST_NOCOUNT, NULL);
2173 if (!rt) { 2169 if (!rt)
2174 net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n");
2175 return ERR_PTR(-ENOMEM); 2170 return ERR_PTR(-ENOMEM);
2176 }
2177 2171
2178 in6_dev_hold(idev); 2172 in6_dev_hold(idev);
2179 2173
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 1b4a4a953675..d3005b34476a 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -478,14 +478,44 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
478 dev_put(dev); 478 dev_put(dev);
479} 479}
480 480
481/* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH
482 * if sufficient data bytes are available
483 */
484static int ipip6_err_gen_icmpv6_unreach(struct sk_buff *skb)
485{
486 const struct iphdr *iph = (const struct iphdr *) skb->data;
487 struct rt6_info *rt;
488 struct sk_buff *skb2;
489
490 if (!pskb_may_pull(skb, iph->ihl * 4 + sizeof(struct ipv6hdr) + 8))
491 return 1;
492
493 skb2 = skb_clone(skb, GFP_ATOMIC);
494
495 if (!skb2)
496 return 1;
497
498 skb_dst_drop(skb2);
499 skb_pull(skb2, iph->ihl * 4);
500 skb_reset_network_header(skb2);
501
502 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0);
503
504 if (rt && rt->dst.dev)
505 skb2->dev = rt->dst.dev;
506
507 icmpv6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
508
509 if (rt)
510 ip6_rt_put(rt);
511
512 kfree_skb(skb2);
513
514 return 0;
515}
481 516
482static int ipip6_err(struct sk_buff *skb, u32 info) 517static int ipip6_err(struct sk_buff *skb, u32 info)
483{ 518{
484
485/* All the routers (except for Linux) return only
486 8 bytes of packet payload. It means, that precise relaying of
487 ICMP in the real Internet is absolutely infeasible.
488 */
489 const struct iphdr *iph = (const struct iphdr *)skb->data; 519 const struct iphdr *iph = (const struct iphdr *)skb->data;
490 const int type = icmp_hdr(skb)->type; 520 const int type = icmp_hdr(skb)->type;
491 const int code = icmp_hdr(skb)->code; 521 const int code = icmp_hdr(skb)->code;
@@ -500,7 +530,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
500 case ICMP_DEST_UNREACH: 530 case ICMP_DEST_UNREACH:
501 switch (code) { 531 switch (code) {
502 case ICMP_SR_FAILED: 532 case ICMP_SR_FAILED:
503 case ICMP_PORT_UNREACH:
504 /* Impossible event. */ 533 /* Impossible event. */
505 return 0; 534 return 0;
506 default: 535 default:
@@ -545,6 +574,9 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
545 goto out; 574 goto out;
546 575
547 err = 0; 576 err = 0;
577 if (!ipip6_err_gen_icmpv6_unreach(skb))
578 goto out;
579
548 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) 580 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
549 goto out; 581 goto out;
550 582
@@ -670,8 +702,10 @@ static int ipip6_rcv(struct sk_buff *skb)
670 } 702 }
671 703
672 tstats = this_cpu_ptr(tunnel->dev->tstats); 704 tstats = this_cpu_ptr(tunnel->dev->tstats);
705 u64_stats_update_begin(&tstats->syncp);
673 tstats->rx_packets++; 706 tstats->rx_packets++;
674 tstats->rx_bytes += skb->len; 707 tstats->rx_bytes += skb->len;
708 u64_stats_update_end(&tstats->syncp);
675 709
676 netif_rx(skb); 710 netif_rx(skb);
677 711
@@ -892,7 +926,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
892 if (tunnel->parms.iph.daddr && skb_dst(skb)) 926 if (tunnel->parms.iph.daddr && skb_dst(skb))
893 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); 927 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
894 928
895 if (skb->len > mtu) { 929 if (skb->len > mtu && !skb_is_gso(skb)) {
896 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 930 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
897 ip_rt_put(rt); 931 ip_rt_put(rt);
898 goto tx_error; 932 goto tx_error;
@@ -919,7 +953,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
919 if (!new_skb) { 953 if (!new_skb) {
920 ip_rt_put(rt); 954 ip_rt_put(rt);
921 dev->stats.tx_dropped++; 955 dev->stats.tx_dropped++;
922 dev_kfree_skb(skb); 956 kfree_skb(skb);
923 return NETDEV_TX_OK; 957 return NETDEV_TX_OK;
924 } 958 }
925 if (skb->sk) 959 if (skb->sk)
@@ -934,8 +968,10 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
934 tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); 968 tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
935 969
936 skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT); 970 skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT);
937 if (IS_ERR(skb)) 971 if (IS_ERR(skb)) {
972 ip_rt_put(rt);
938 goto out; 973 goto out;
974 }
939 975
940 err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos, 976 err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos,
941 ttl, df, !net_eq(tunnel->net, dev_net(dev))); 977 ttl, df, !net_eq(tunnel->net, dev_net(dev)));
@@ -945,7 +981,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
945tx_error_icmp: 981tx_error_icmp:
946 dst_link_failure(skb); 982 dst_link_failure(skb);
947tx_error: 983tx_error:
948 dev_kfree_skb(skb); 984 kfree_skb(skb);
949out: 985out:
950 dev->stats.tx_errors++; 986 dev->stats.tx_errors++;
951 return NETDEV_TX_OK; 987 return NETDEV_TX_OK;
@@ -985,7 +1021,7 @@ static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
985 1021
986tx_err: 1022tx_err:
987 dev->stats.tx_errors++; 1023 dev->stats.tx_errors++;
988 dev_kfree_skb(skb); 1024 kfree_skb(skb);
989 return NETDEV_TX_OK; 1025 return NETDEV_TX_OK;
990 1026
991} 1027}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 0740f93a114a..f67033b4bb66 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -156,7 +156,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
156 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 156 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
157 if (flowlabel == NULL) 157 if (flowlabel == NULL)
158 return -EINVAL; 158 return -EINVAL;
159 usin->sin6_addr = flowlabel->dst;
160 fl6_sock_release(flowlabel); 159 fl6_sock_release(flowlabel);
161 } 160 }
162 } 161 }
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index c1097c798900..6d18157dc32c 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -37,34 +37,32 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
37{ 37{
38 const struct ipv6hdr *iph = skb_gro_network_header(skb); 38 const struct ipv6hdr *iph = skb_gro_network_header(skb);
39 __wsum wsum; 39 __wsum wsum;
40 __sum16 sum; 40
41 /* Don't bother verifying checksum if we're going to flush anyway. */
42 if (NAPI_GRO_CB(skb)->flush)
43 goto skip_csum;
44
45 wsum = skb->csum;
41 46
42 switch (skb->ip_summed) { 47 switch (skb->ip_summed) {
48 case CHECKSUM_NONE:
49 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
50 wsum);
51
52 /* fall through */
53
43 case CHECKSUM_COMPLETE: 54 case CHECKSUM_COMPLETE:
44 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr, 55 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
45 skb->csum)) { 56 wsum)) {
46 skb->ip_summed = CHECKSUM_UNNECESSARY; 57 skb->ip_summed = CHECKSUM_UNNECESSARY;
47 break; 58 break;
48 } 59 }
49flush: 60
50 NAPI_GRO_CB(skb)->flush = 1; 61 NAPI_GRO_CB(skb)->flush = 1;
51 return NULL; 62 return NULL;
52
53 case CHECKSUM_NONE:
54 wsum = ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
55 skb_gro_len(skb),
56 IPPROTO_TCP, 0));
57 sum = csum_fold(skb_checksum(skb,
58 skb_gro_offset(skb),
59 skb_gro_len(skb),
60 wsum));
61 if (sum)
62 goto flush;
63
64 skb->ip_summed = CHECKSUM_UNNECESSARY;
65 break;
66 } 63 }
67 64
65skip_csum:
68 return tcp_gro_receive(head, skb); 66 return tcp_gro_receive(head, skb);
69} 67}
70 68
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 81eb8cf8389b..089c741a3992 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -393,10 +393,10 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
393 bool slow; 393 bool slow;
394 394
395 if (flags & MSG_ERRQUEUE) 395 if (flags & MSG_ERRQUEUE)
396 return ipv6_recv_error(sk, msg, len); 396 return ipv6_recv_error(sk, msg, len, addr_len);
397 397
398 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 398 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
399 return ipv6_recv_rxpmtu(sk, msg, len); 399 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
400 400
401try_again: 401try_again:
402 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), 402 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
@@ -1140,7 +1140,6 @@ do_udp_sendmsg:
1140 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 1140 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1141 if (flowlabel == NULL) 1141 if (flowlabel == NULL)
1142 return -EINVAL; 1142 return -EINVAL;
1143 daddr = &flowlabel->dst;
1144 } 1143 }
1145 } 1144 }
1146 1145
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index cfd65304be60..bb6e206ea70b 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -528,7 +528,6 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
528 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 528 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
529 if (flowlabel == NULL) 529 if (flowlabel == NULL)
530 return -EINVAL; 530 return -EINVAL;
531 daddr = &flowlabel->dst;
532 } 531 }
533 } 532 }
534 533
@@ -665,7 +664,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
665 *addr_len = sizeof(*lsa); 664 *addr_len = sizeof(*lsa);
666 665
667 if (flags & MSG_ERRQUEUE) 666 if (flags & MSG_ERRQUEUE)
668 return ipv6_recv_error(sk, msg, len); 667 return ipv6_recv_error(sk, msg, len, addr_len);
669 668
670 skb = skb_recv_datagram(sk, flags, noblock, &err); 669 skb = skb_recv_datagram(sk, flags, noblock, &err);
671 if (!skb) 670 if (!skb)
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 7b01b9f5846c..c71b699eb555 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -715,7 +715,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
715 unsigned long cpu_flags; 715 unsigned long cpu_flags;
716 size_t copied = 0; 716 size_t copied = 0;
717 u32 peek_seq = 0; 717 u32 peek_seq = 0;
718 u32 *seq; 718 u32 *seq, skb_len;
719 unsigned long used; 719 unsigned long used;
720 int target; /* Read at least this many bytes */ 720 int target; /* Read at least this many bytes */
721 long timeo; 721 long timeo;
@@ -812,6 +812,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
812 } 812 }
813 continue; 813 continue;
814 found_ok_skb: 814 found_ok_skb:
815 skb_len = skb->len;
815 /* Ok so how much can we use? */ 816 /* Ok so how much can we use? */
816 used = skb->len - offset; 817 used = skb->len - offset;
817 if (len < used) 818 if (len < used)
@@ -844,7 +845,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
844 } 845 }
845 846
846 /* Partial read */ 847 /* Partial read */
847 if (used + offset < skb->len) 848 if (used + offset < skb_len)
848 continue; 849 continue;
849 } while (len > 0); 850 } while (len > 0);
850 851
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 95667b088c5b..364ce0c5962f 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1368,7 +1368,7 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1368 changed |= 1368 changed |=
1369 ieee80211_mps_set_sta_local_pm(sta, 1369 ieee80211_mps_set_sta_local_pm(sta,
1370 params->local_pm); 1370 params->local_pm);
1371 ieee80211_bss_info_change_notify(sdata, changed); 1371 ieee80211_mbss_info_change_notify(sdata, changed);
1372#endif 1372#endif
1373 } 1373 }
1374 1374
@@ -2488,8 +2488,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
2488 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2488 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2489 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2489 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2490 2490
2491 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2491 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2492 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
2493 return -EOPNOTSUPP; 2492 return -EOPNOTSUPP;
2494 2493
2495 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) 2494 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
@@ -3120,9 +3119,17 @@ static int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
3120 params->chandef.chan->band) 3119 params->chandef.chan->band)
3121 return -EINVAL; 3120 return -EINVAL;
3122 3121
3122 ifmsh->chsw_init = true;
3123 if (!ifmsh->pre_value)
3124 ifmsh->pre_value = 1;
3125 else
3126 ifmsh->pre_value++;
3127
3123 err = ieee80211_mesh_csa_beacon(sdata, params, true); 3128 err = ieee80211_mesh_csa_beacon(sdata, params, true);
3124 if (err < 0) 3129 if (err < 0) {
3130 ifmsh->chsw_init = false;
3125 return err; 3131 return err;
3132 }
3126 break; 3133 break;
3127#endif 3134#endif
3128 default: 3135 default:
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 531be040b9ae..27a39de89679 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -823,6 +823,10 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
823 if (err) 823 if (err)
824 return false; 824 return false;
825 825
826 /* channel switch is not supported, disconnect */
827 if (!(sdata->local->hw.wiphy->flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH))
828 goto disconnect;
829
826 params.count = csa_ie.count; 830 params.count = csa_ie.count;
827 params.chandef = csa_ie.chandef; 831 params.chandef = csa_ie.chandef;
828 832
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 29dc505be125..4aea4e791113 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1228,6 +1228,7 @@ struct ieee80211_csa_ie {
1228 u8 mode; 1228 u8 mode;
1229 u8 count; 1229 u8 count;
1230 u8 ttl; 1230 u8 ttl;
1231 u16 pre_value;
1231}; 1232};
1232 1233
1233/* Parsed Information Elements */ 1234/* Parsed Information Elements */
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index ff101ea1d9ae..a0757913046e 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1061,7 +1061,8 @@ static void ieee80211_uninit(struct net_device *dev)
1061} 1061}
1062 1062
1063static u16 ieee80211_netdev_select_queue(struct net_device *dev, 1063static u16 ieee80211_netdev_select_queue(struct net_device *dev,
1064 struct sk_buff *skb) 1064 struct sk_buff *skb,
1065 void *accel_priv)
1065{ 1066{
1066 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); 1067 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
1067} 1068}
@@ -1078,7 +1079,8 @@ static const struct net_device_ops ieee80211_dataif_ops = {
1078}; 1079};
1079 1080
1080static u16 ieee80211_monitor_select_queue(struct net_device *dev, 1081static u16 ieee80211_monitor_select_queue(struct net_device *dev,
1081 struct sk_buff *skb) 1082 struct sk_buff *skb,
1083 void *accel_priv)
1082{ 1084{
1083 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1085 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1084 struct ieee80211_local *local = sdata->local; 1086 struct ieee80211_local *local = sdata->local;
@@ -1325,7 +1327,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1325 sdata->vif.bss_conf.bssid = NULL; 1327 sdata->vif.bss_conf.bssid = NULL;
1326 break; 1328 break;
1327 case NL80211_IFTYPE_AP_VLAN: 1329 case NL80211_IFTYPE_AP_VLAN:
1328 break;
1329 case NL80211_IFTYPE_P2P_DEVICE: 1330 case NL80211_IFTYPE_P2P_DEVICE:
1330 sdata->vif.bss_conf.bssid = sdata->vif.addr; 1331 sdata->vif.bss_conf.bssid = sdata->vif.addr;
1331 break; 1332 break;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 21d5d44444d0..7d1c3ac48ed9 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -940,6 +940,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
940 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", 940 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
941 result); 941 result);
942 942
943 local->hw.conf.flags = IEEE80211_CONF_IDLE;
944
943 ieee80211_led_init(local); 945 ieee80211_led_init(local);
944 946
945 rtnl_lock(); 947 rtnl_lock();
@@ -1047,6 +1049,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1047 1049
1048 cancel_work_sync(&local->restart_work); 1050 cancel_work_sync(&local->restart_work);
1049 cancel_work_sync(&local->reconfig_filter); 1051 cancel_work_sync(&local->reconfig_filter);
1052 flush_work(&local->sched_scan_stopped_work);
1050 1053
1051 ieee80211_clear_tx_pending(local); 1054 ieee80211_clear_tx_pending(local);
1052 rate_control_deinitialize(local); 1055 rate_control_deinitialize(local);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 896fe3bd599e..ba105257d03f 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -943,14 +943,19 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
943 params.chandef.chan->center_freq); 943 params.chandef.chan->center_freq);
944 944
945 params.block_tx = csa_ie.mode & WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT; 945 params.block_tx = csa_ie.mode & WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT;
946 if (beacon) 946 if (beacon) {
947 ifmsh->chsw_ttl = csa_ie.ttl - 1; 947 ifmsh->chsw_ttl = csa_ie.ttl - 1;
948 else 948 if (ifmsh->pre_value >= csa_ie.pre_value)
949 ifmsh->chsw_ttl = 0; 949 return false;
950 ifmsh->pre_value = csa_ie.pre_value;
951 }
950 952
951 if (ifmsh->chsw_ttl > 0) 953 if (ifmsh->chsw_ttl < ifmsh->mshcfg.dot11MeshTTL) {
952 if (ieee80211_mesh_csa_beacon(sdata, &params, false) < 0) 954 if (ieee80211_mesh_csa_beacon(sdata, &params, false) < 0)
953 return false; 955 return false;
956 } else {
957 return false;
958 }
954 959
955 sdata->csa_radar_required = params.radar_required; 960 sdata->csa_radar_required = params.radar_required;
956 961
@@ -1163,7 +1168,6 @@ static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
1163 offset_ttl = (len < 42) ? 7 : 10; 1168 offset_ttl = (len < 42) ? 7 : 10;
1164 *(pos + offset_ttl) -= 1; 1169 *(pos + offset_ttl) -= 1;
1165 *(pos + offset_ttl + 1) &= ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; 1170 *(pos + offset_ttl + 1) &= ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
1166 sdata->u.mesh.chsw_ttl = *(pos + offset_ttl);
1167 1171
1168 memcpy(mgmt_fwd, mgmt, len); 1172 memcpy(mgmt_fwd, mgmt, len);
1169 eth_broadcast_addr(mgmt_fwd->da); 1173 eth_broadcast_addr(mgmt_fwd->da);
@@ -1182,7 +1186,7 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
1182 u16 pre_value; 1186 u16 pre_value;
1183 bool fwd_csa = true; 1187 bool fwd_csa = true;
1184 size_t baselen; 1188 size_t baselen;
1185 u8 *pos, ttl; 1189 u8 *pos;
1186 1190
1187 if (mgmt->u.action.u.measurement.action_code != 1191 if (mgmt->u.action.u.measurement.action_code !=
1188 WLAN_ACTION_SPCT_CHL_SWITCH) 1192 WLAN_ACTION_SPCT_CHL_SWITCH)
@@ -1193,8 +1197,8 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
1193 u.action.u.chan_switch.variable); 1197 u.action.u.chan_switch.variable);
1194 ieee802_11_parse_elems(pos, len - baselen, false, &elems); 1198 ieee802_11_parse_elems(pos, len - baselen, false, &elems);
1195 1199
1196 ttl = elems.mesh_chansw_params_ie->mesh_ttl; 1200 ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl;
1197 if (!--ttl) 1201 if (!--ifmsh->chsw_ttl)
1198 fwd_csa = false; 1202 fwd_csa = false;
1199 1203
1200 pre_value = le16_to_cpu(elems.mesh_chansw_params_ie->mesh_pre_value); 1204 pre_value = le16_to_cpu(elems.mesh_chansw_params_ie->mesh_pre_value);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index d7504ab61a34..b3a3ce316656 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1910,6 +1910,8 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
1910 if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) 1910 if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL)
1911 already = true; 1911 already = true;
1912 1912
1913 ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL;
1914
1913 mutex_unlock(&sdata->local->mtx); 1915 mutex_unlock(&sdata->local->mtx);
1914 1916
1915 if (already) 1917 if (already)
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 5d60779a0c1b..4096ff6cc24f 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -226,7 +226,7 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
226 nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); 226 nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
227 227
228 nsecs += minstrel_mcs_groups[group].duration[rate]; 228 nsecs += minstrel_mcs_groups[group].duration[rate];
229 tp = 1000000 * ((mr->probability * 1000) / nsecs); 229 tp = 1000000 * ((prob * 1000) / nsecs);
230 230
231 mr->cur_tp = MINSTREL_TRUNC(tp); 231 mr->cur_tp = MINSTREL_TRUNC(tp);
232} 232}
@@ -277,13 +277,15 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
277 if (!(mg->supported & BIT(i))) 277 if (!(mg->supported & BIT(i)))
278 continue; 278 continue;
279 279
280 index = MCS_GROUP_RATES * group + i;
281
280 /* initialize rates selections starting indexes */ 282 /* initialize rates selections starting indexes */
281 if (!mg_rates_valid) { 283 if (!mg_rates_valid) {
282 mg->max_tp_rate = mg->max_tp_rate2 = 284 mg->max_tp_rate = mg->max_tp_rate2 =
283 mg->max_prob_rate = i; 285 mg->max_prob_rate = i;
284 if (!mi_rates_valid) { 286 if (!mi_rates_valid) {
285 mi->max_tp_rate = mi->max_tp_rate2 = 287 mi->max_tp_rate = mi->max_tp_rate2 =
286 mi->max_prob_rate = i; 288 mi->max_prob_rate = index;
287 mi_rates_valid = true; 289 mi_rates_valid = true;
288 } 290 }
289 mg_rates_valid = true; 291 mg_rates_valid = true;
@@ -291,7 +293,6 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
291 293
292 mr = &mg->rates[i]; 294 mr = &mg->rates[i];
293 mr->retry_updated = false; 295 mr->retry_updated = false;
294 index = MCS_GROUP_RATES * group + i;
295 minstrel_calc_rate_ewma(mr); 296 minstrel_calc_rate_ewma(mr);
296 minstrel_ht_calc_tp(mi, group, i); 297 minstrel_ht_calc_tp(mi, group, i);
297 298
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index caecef870c0e..2b0debb0422b 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -911,7 +911,8 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
911 u16 sc; 911 u16 sc;
912 u8 tid, ack_policy; 912 u8 tid, ack_policy;
913 913
914 if (!ieee80211_is_data_qos(hdr->frame_control)) 914 if (!ieee80211_is_data_qos(hdr->frame_control) ||
915 is_multicast_ether_addr(hdr->addr1))
915 goto dont_reorder; 916 goto dont_reorder;
916 917
917 /* 918 /*
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 5ad66a83ef7f..bcc4833d7542 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -1088,6 +1088,6 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
1088 1088
1089 trace_api_sched_scan_stopped(local); 1089 trace_api_sched_scan_stopped(local);
1090 1090
1091 ieee80211_queue_work(&local->hw, &local->sched_scan_stopped_work); 1091 schedule_work(&local->sched_scan_stopped_work);
1092} 1092}
1093EXPORT_SYMBOL(ieee80211_sched_scan_stopped); 1093EXPORT_SYMBOL(ieee80211_sched_scan_stopped);
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index a40da20b32e0..6ab009070084 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -78,6 +78,8 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
78 if (elems->mesh_chansw_params_ie) { 78 if (elems->mesh_chansw_params_ie) {
79 csa_ie->ttl = elems->mesh_chansw_params_ie->mesh_ttl; 79 csa_ie->ttl = elems->mesh_chansw_params_ie->mesh_ttl;
80 csa_ie->mode = elems->mesh_chansw_params_ie->mesh_flags; 80 csa_ie->mode = elems->mesh_chansw_params_ie->mesh_flags;
81 csa_ie->pre_value = le16_to_cpu(
82 elems->mesh_chansw_params_ie->mesh_pre_value);
81 } 83 }
82 84
83 new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band); 85 new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index c558b246ef00..ca7fa7f0613d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -463,7 +463,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
463{ 463{
464 struct sta_info *sta = tx->sta; 464 struct sta_info *sta = tx->sta;
465 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 465 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
466 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
467 struct ieee80211_local *local = tx->local; 466 struct ieee80211_local *local = tx->local;
468 467
469 if (unlikely(!sta)) 468 if (unlikely(!sta))
@@ -474,15 +473,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
474 !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) { 473 !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
475 int ac = skb_get_queue_mapping(tx->skb); 474 int ac = skb_get_queue_mapping(tx->skb);
476 475
477 /* only deauth, disassoc and action are bufferable MMPDUs */
478 if (ieee80211_is_mgmt(hdr->frame_control) &&
479 !ieee80211_is_deauth(hdr->frame_control) &&
480 !ieee80211_is_disassoc(hdr->frame_control) &&
481 !ieee80211_is_action(hdr->frame_control)) {
482 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
483 return TX_CONTINUE;
484 }
485
486 ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n", 476 ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
487 sta->sta.addr, sta->sta.aid, ac); 477 sta->sta.addr, sta->sta.aid, ac);
488 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 478 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
@@ -525,9 +515,22 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
525static ieee80211_tx_result debug_noinline 515static ieee80211_tx_result debug_noinline
526ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) 516ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
527{ 517{
518 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
519 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
520
528 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) 521 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
529 return TX_CONTINUE; 522 return TX_CONTINUE;
530 523
524 /* only deauth, disassoc and action are bufferable MMPDUs */
525 if (ieee80211_is_mgmt(hdr->frame_control) &&
526 !ieee80211_is_deauth(hdr->frame_control) &&
527 !ieee80211_is_disassoc(hdr->frame_control) &&
528 !ieee80211_is_action(hdr->frame_control)) {
529 if (tx->flags & IEEE80211_TX_UNICAST)
530 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
531 return TX_CONTINUE;
532 }
533
531 if (tx->flags & IEEE80211_TX_UNICAST) 534 if (tx->flags & IEEE80211_TX_UNICAST)
532 return ieee80211_tx_h_unicast_ps_buf(tx); 535 return ieee80211_tx_h_unicast_ps_buf(tx);
533 else 536 else
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 592a18171f95..9f9b9bd3fd44 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2278,17 +2278,15 @@ void ieee80211_dfs_radar_detected_work(struct work_struct *work)
2278{ 2278{
2279 struct ieee80211_local *local = 2279 struct ieee80211_local *local =
2280 container_of(work, struct ieee80211_local, radar_detected_work); 2280 container_of(work, struct ieee80211_local, radar_detected_work);
2281 struct cfg80211_chan_def chandef; 2281 struct cfg80211_chan_def chandef = local->hw.conf.chandef;
2282 2282
2283 ieee80211_dfs_cac_cancel(local); 2283 ieee80211_dfs_cac_cancel(local);
2284 2284
2285 if (local->use_chanctx) 2285 if (local->use_chanctx)
2286 /* currently not handled */ 2286 /* currently not handled */
2287 WARN_ON(1); 2287 WARN_ON(1);
2288 else { 2288 else
2289 chandef = local->hw.conf.chandef;
2290 cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL); 2289 cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL);
2291 }
2292} 2290}
2293 2291
2294void ieee80211_radar_detected(struct ieee80211_hw *hw) 2292void ieee80211_radar_detected(struct ieee80211_hw *hw)
@@ -2459,14 +2457,9 @@ int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
2459 WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT : 0x00; 2457 WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT : 0x00;
2460 put_unaligned_le16(WLAN_REASON_MESH_CHAN, pos); /* Reason Cd */ 2458 put_unaligned_le16(WLAN_REASON_MESH_CHAN, pos); /* Reason Cd */
2461 pos += 2; 2459 pos += 2;
2462 if (!ifmsh->pre_value)
2463 ifmsh->pre_value = 1;
2464 else
2465 ifmsh->pre_value++;
2466 pre_value = cpu_to_le16(ifmsh->pre_value); 2460 pre_value = cpu_to_le16(ifmsh->pre_value);
2467 memcpy(pos, &pre_value, 2); /* Precedence Value */ 2461 memcpy(pos, &pre_value, 2); /* Precedence Value */
2468 pos += 2; 2462 pos += 2;
2469 ifmsh->chsw_init = true;
2470 } 2463 }
2471 2464
2472 ieee80211_tx_skb(sdata, skb); 2465 ieee80211_tx_skb(sdata, skb);
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index 2bc2dec20b00..6226803fc490 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -59,7 +59,7 @@ hash_netnet4_data_equal(const struct hash_netnet4_elem *ip1,
59 u32 *multi) 59 u32 *multi)
60{ 60{
61 return ip1->ipcmp == ip2->ipcmp && 61 return ip1->ipcmp == ip2->ipcmp &&
62 ip2->ccmp == ip2->ccmp; 62 ip1->ccmp == ip2->ccmp;
63} 63}
64 64
65static inline int 65static inline int
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index c8beafd401aa..5a355a46d1dc 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -63,6 +63,7 @@
63#include <net/ip_vs.h> 63#include <net/ip_vs.h>
64#include <net/netfilter/nf_conntrack_core.h> 64#include <net/netfilter/nf_conntrack_core.h>
65#include <net/netfilter/nf_conntrack_expect.h> 65#include <net/netfilter/nf_conntrack_expect.h>
66#include <net/netfilter/nf_conntrack_seqadj.h>
66#include <net/netfilter/nf_conntrack_helper.h> 67#include <net/netfilter/nf_conntrack_helper.h>
67#include <net/netfilter/nf_conntrack_zones.h> 68#include <net/netfilter/nf_conntrack_zones.h>
68 69
@@ -97,6 +98,11 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
97 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) 98 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
98 return; 99 return;
99 100
101 /* Applications may adjust TCP seqs */
102 if (cp->app && nf_ct_protonum(ct) == IPPROTO_TCP &&
103 !nfct_seqadj(ct) && !nfct_seqadj_ext_add(ct))
104 return;
105
100 /* 106 /*
101 * The connection is not yet in the hashtable, so we update it. 107 * The connection is not yet in the hashtable, so we update it.
102 * CIP->VIP will remain the same, so leave the tuple in 108 * CIP->VIP will remain the same, so leave the tuple in
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
index 17c1bcb182c6..f6e2ae91a80b 100644
--- a/net/netfilter/nf_conntrack_seqadj.c
+++ b/net/netfilter/nf_conntrack_seqadj.c
@@ -36,6 +36,11 @@ int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
36 if (off == 0) 36 if (off == 0)
37 return 0; 37 return 0;
38 38
39 if (unlikely(!seqadj)) {
40 WARN_ONCE(1, "Missing nfct_seqadj_ext_add() setup call\n");
41 return 0;
42 }
43
39 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); 44 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
40 45
41 spin_lock_bh(&ct->lock); 46 spin_lock_bh(&ct->lock);
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
index 902fb0a6b38a..7a394df0deb7 100644
--- a/net/netfilter/nf_conntrack_timestamp.c
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -97,7 +97,6 @@ int nf_conntrack_tstamp_pernet_init(struct net *net)
97void nf_conntrack_tstamp_pernet_fini(struct net *net) 97void nf_conntrack_tstamp_pernet_fini(struct net *net)
98{ 98{
99 nf_conntrack_tstamp_fini_sysctl(net); 99 nf_conntrack_tstamp_fini_sysctl(net);
100 nf_ct_extend_unregister(&tstamp_extend);
101} 100}
102 101
103int nf_conntrack_tstamp_init(void) 102int nf_conntrack_tstamp_init(void)
diff --git a/net/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c
index f02b3605823e..1fb2258c3535 100644
--- a/net/netfilter/nf_nat_irc.c
+++ b/net/netfilter/nf_nat_irc.c
@@ -34,10 +34,14 @@ static unsigned int help(struct sk_buff *skb,
34 struct nf_conntrack_expect *exp) 34 struct nf_conntrack_expect *exp)
35{ 35{
36 char buffer[sizeof("4294967296 65635")]; 36 char buffer[sizeof("4294967296 65635")];
37 struct nf_conn *ct = exp->master;
38 union nf_inet_addr newaddr;
37 u_int16_t port; 39 u_int16_t port;
38 unsigned int ret; 40 unsigned int ret;
39 41
40 /* Reply comes from server. */ 42 /* Reply comes from server. */
43 newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3;
44
41 exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; 45 exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
42 exp->dir = IP_CT_DIR_REPLY; 46 exp->dir = IP_CT_DIR_REPLY;
43 exp->expectfn = nf_nat_follow_master; 47 exp->expectfn = nf_nat_follow_master;
@@ -57,17 +61,35 @@ static unsigned int help(struct sk_buff *skb,
57 } 61 }
58 62
59 if (port == 0) { 63 if (port == 0) {
60 nf_ct_helper_log(skb, exp->master, "all ports in use"); 64 nf_ct_helper_log(skb, ct, "all ports in use");
61 return NF_DROP; 65 return NF_DROP;
62 } 66 }
63 67
64 ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo, 68 /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27
65 protoff, matchoff, matchlen, buffer, 69 * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28
66 strlen(buffer)); 70 * strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26
71 * strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26
72 * strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27
73 *
74 * AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits,
75 * 255.255.255.255==4294967296, 10 digits)
76 * P: bound port (min 1 d, max 5d (65635))
77 * F: filename (min 1 d )
78 * S: size (min 1 d )
79 * 0x01, \n: terminators
80 */
81 /* AAA = "us", ie. where server normally talks to. */
82 snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port);
83 pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n",
84 buffer, &newaddr.ip, port);
85
86 ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff,
87 matchlen, buffer, strlen(buffer));
67 if (ret != NF_ACCEPT) { 88 if (ret != NF_ACCEPT) {
68 nf_ct_helper_log(skb, exp->master, "cannot mangle packet"); 89 nf_ct_helper_log(skb, ct, "cannot mangle packet");
69 nf_ct_unexpect_related(exp); 90 nf_ct_unexpect_related(exp);
70 } 91 }
92
71 return ret; 93 return ret;
72} 94}
73 95
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index dcddc49c0e08..71a9f49a768b 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -312,6 +312,9 @@ static int nf_tables_table_enable(struct nft_table *table)
312 int err, i = 0; 312 int err, i = 0;
313 313
314 list_for_each_entry(chain, &table->chains, list) { 314 list_for_each_entry(chain, &table->chains, list) {
315 if (!(chain->flags & NFT_BASE_CHAIN))
316 continue;
317
315 err = nf_register_hook(&nft_base_chain(chain)->ops); 318 err = nf_register_hook(&nft_base_chain(chain)->ops);
316 if (err < 0) 319 if (err < 0)
317 goto err; 320 goto err;
@@ -321,6 +324,9 @@ static int nf_tables_table_enable(struct nft_table *table)
321 return 0; 324 return 0;
322err: 325err:
323 list_for_each_entry(chain, &table->chains, list) { 326 list_for_each_entry(chain, &table->chains, list) {
327 if (!(chain->flags & NFT_BASE_CHAIN))
328 continue;
329
324 if (i-- <= 0) 330 if (i-- <= 0)
325 break; 331 break;
326 332
@@ -333,8 +339,10 @@ static int nf_tables_table_disable(struct nft_table *table)
333{ 339{
334 struct nft_chain *chain; 340 struct nft_chain *chain;
335 341
336 list_for_each_entry(chain, &table->chains, list) 342 list_for_each_entry(chain, &table->chains, list) {
337 nf_unregister_hook(&nft_base_chain(chain)->ops); 343 if (chain->flags & NFT_BASE_CHAIN)
344 nf_unregister_hook(&nft_base_chain(chain)->ops);
345 }
338 346
339 return 0; 347 return 0;
340} 348}
@@ -1717,6 +1725,19 @@ nf_tables_delrule_one(struct nft_ctx *ctx, struct nft_rule *rule)
1717 return -ENOENT; 1725 return -ENOENT;
1718} 1726}
1719 1727
1728static int nf_table_delrule_by_chain(struct nft_ctx *ctx)
1729{
1730 struct nft_rule *rule;
1731 int err;
1732
1733 list_for_each_entry(rule, &ctx->chain->rules, list) {
1734 err = nf_tables_delrule_one(ctx, rule);
1735 if (err < 0)
1736 return err;
1737 }
1738 return 0;
1739}
1740
1720static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, 1741static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
1721 const struct nlmsghdr *nlh, 1742 const struct nlmsghdr *nlh,
1722 const struct nlattr * const nla[]) 1743 const struct nlattr * const nla[])
@@ -1725,8 +1746,8 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
1725 const struct nft_af_info *afi; 1746 const struct nft_af_info *afi;
1726 struct net *net = sock_net(skb->sk); 1747 struct net *net = sock_net(skb->sk);
1727 const struct nft_table *table; 1748 const struct nft_table *table;
1728 struct nft_chain *chain; 1749 struct nft_chain *chain = NULL;
1729 struct nft_rule *rule, *tmp; 1750 struct nft_rule *rule;
1730 int family = nfmsg->nfgen_family, err = 0; 1751 int family = nfmsg->nfgen_family, err = 0;
1731 struct nft_ctx ctx; 1752 struct nft_ctx ctx;
1732 1753
@@ -1738,22 +1759,29 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
1738 if (IS_ERR(table)) 1759 if (IS_ERR(table))
1739 return PTR_ERR(table); 1760 return PTR_ERR(table);
1740 1761
1741 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); 1762 if (nla[NFTA_RULE_CHAIN]) {
1742 if (IS_ERR(chain)) 1763 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
1743 return PTR_ERR(chain); 1764 if (IS_ERR(chain))
1765 return PTR_ERR(chain);
1766 }
1744 1767
1745 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 1768 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
1746 1769
1747 if (nla[NFTA_RULE_HANDLE]) { 1770 if (chain) {
1748 rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]); 1771 if (nla[NFTA_RULE_HANDLE]) {
1749 if (IS_ERR(rule)) 1772 rule = nf_tables_rule_lookup(chain,
1750 return PTR_ERR(rule); 1773 nla[NFTA_RULE_HANDLE]);
1774 if (IS_ERR(rule))
1775 return PTR_ERR(rule);
1751 1776
1752 err = nf_tables_delrule_one(&ctx, rule);
1753 } else {
1754 /* Remove all rules in this chain */
1755 list_for_each_entry_safe(rule, tmp, &chain->rules, list) {
1756 err = nf_tables_delrule_one(&ctx, rule); 1777 err = nf_tables_delrule_one(&ctx, rule);
1778 } else {
1779 err = nf_table_delrule_by_chain(&ctx);
1780 }
1781 } else {
1782 list_for_each_entry(chain, &table->chains, list) {
1783 ctx.chain = chain;
1784 err = nf_table_delrule_by_chain(&ctx);
1757 if (err < 0) 1785 if (err < 0)
1758 break; 1786 break;
1759 } 1787 }
@@ -2078,17 +2106,21 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2078 struct netlink_callback *cb) 2106 struct netlink_callback *cb)
2079{ 2107{
2080 const struct nft_set *set; 2108 const struct nft_set *set;
2081 unsigned int idx = 0, s_idx = cb->args[0]; 2109 unsigned int idx, s_idx = cb->args[0];
2082 struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2]; 2110 struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
2083 2111
2084 if (cb->args[1]) 2112 if (cb->args[1])
2085 return skb->len; 2113 return skb->len;
2086 2114
2087 list_for_each_entry(table, &ctx->afi->tables, list) { 2115 list_for_each_entry(table, &ctx->afi->tables, list) {
2088 if (cur_table && cur_table != table) 2116 if (cur_table) {
2089 continue; 2117 if (cur_table != table)
2118 continue;
2090 2119
2120 cur_table = NULL;
2121 }
2091 ctx->table = table; 2122 ctx->table = table;
2123 idx = 0;
2092 list_for_each_entry(set, &ctx->table->sets, list) { 2124 list_for_each_entry(set, &ctx->table->sets, list) {
2093 if (idx < s_idx) 2125 if (idx < s_idx)
2094 goto cont; 2126 goto cont;
@@ -2350,7 +2382,9 @@ static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
2350 enum nft_registers dreg; 2382 enum nft_registers dreg;
2351 2383
2352 dreg = nft_type_to_reg(set->dtype); 2384 dreg = nft_type_to_reg(set->dtype);
2353 return nft_validate_data_load(ctx, dreg, &elem->data, set->dtype); 2385 return nft_validate_data_load(ctx, dreg, &elem->data,
2386 set->dtype == NFT_DATA_VERDICT ?
2387 NFT_DATA_VERDICT : NFT_DATA_VALUE);
2354} 2388}
2355 2389
2356int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, 2390int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 3c4b69e5fe17..a155d19a225e 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -1053,6 +1053,7 @@ static void __net_exit nfnl_log_net_exit(struct net *net)
1053#ifdef CONFIG_PROC_FS 1053#ifdef CONFIG_PROC_FS
1054 remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter); 1054 remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
1055#endif 1055#endif
1056 nf_log_unset(net, &nfulnl_logger);
1056} 1057}
1057 1058
1058static struct pernet_operations nfnl_log_net_ops = { 1059static struct pernet_operations nfnl_log_net_ops = {
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index 8e0bb75e7c51..55c939f5371f 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -31,7 +31,7 @@ static void nft_exthdr_eval(const struct nft_expr *expr,
31{ 31{
32 struct nft_exthdr *priv = nft_expr_priv(expr); 32 struct nft_exthdr *priv = nft_expr_priv(expr);
33 struct nft_data *dest = &data[priv->dreg]; 33 struct nft_data *dest = &data[priv->dreg];
34 unsigned int offset; 34 unsigned int offset = 0;
35 int err; 35 int err;
36 36
37 err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); 37 err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 9ff035c71403..a3910fc2122b 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -325,21 +325,24 @@ static void htable_gc(unsigned long htlong)
325 add_timer(&ht->timer); 325 add_timer(&ht->timer);
326} 326}
327 327
328static void htable_destroy(struct xt_hashlimit_htable *hinfo) 328static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
329{ 329{
330 struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net); 330 struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net);
331 struct proc_dir_entry *parent; 331 struct proc_dir_entry *parent;
332 332
333 del_timer_sync(&hinfo->timer);
334
335 if (hinfo->family == NFPROTO_IPV4) 333 if (hinfo->family == NFPROTO_IPV4)
336 parent = hashlimit_net->ipt_hashlimit; 334 parent = hashlimit_net->ipt_hashlimit;
337 else 335 else
338 parent = hashlimit_net->ip6t_hashlimit; 336 parent = hashlimit_net->ip6t_hashlimit;
339 337
340 if(parent != NULL) 338 if (parent != NULL)
341 remove_proc_entry(hinfo->name, parent); 339 remove_proc_entry(hinfo->name, parent);
340}
342 341
342static void htable_destroy(struct xt_hashlimit_htable *hinfo)
343{
344 del_timer_sync(&hinfo->timer);
345 htable_remove_proc_entry(hinfo);
343 htable_selective_cleanup(hinfo, select_all); 346 htable_selective_cleanup(hinfo, select_all);
344 kfree(hinfo->name); 347 kfree(hinfo->name);
345 vfree(hinfo); 348 vfree(hinfo);
@@ -883,21 +886,15 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
883static void __net_exit hashlimit_proc_net_exit(struct net *net) 886static void __net_exit hashlimit_proc_net_exit(struct net *net)
884{ 887{
885 struct xt_hashlimit_htable *hinfo; 888 struct xt_hashlimit_htable *hinfo;
886 struct proc_dir_entry *pde;
887 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); 889 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
888 890
889 /* recent_net_exit() is called before recent_mt_destroy(). Make sure 891 /* hashlimit_net_exit() is called before hashlimit_mt_destroy().
890 * that the parent xt_recent proc entry is is empty before trying to 892 * Make sure that the parent ipt_hashlimit and ip6t_hashlimit proc
891 * remove it. 893 * entries is empty before trying to remove it.
892 */ 894 */
893 mutex_lock(&hashlimit_mutex); 895 mutex_lock(&hashlimit_mutex);
894 pde = hashlimit_net->ipt_hashlimit;
895 if (pde == NULL)
896 pde = hashlimit_net->ip6t_hashlimit;
897
898 hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) 896 hlist_for_each_entry(hinfo, &hashlimit_net->htables, node)
899 remove_proc_entry(hinfo->name, pde); 897 htable_remove_proc_entry(hinfo);
900
901 hashlimit_net->ipt_hashlimit = NULL; 898 hashlimit_net->ipt_hashlimit = NULL;
902 hashlimit_net->ip6t_hashlimit = NULL; 899 hashlimit_net->ip6t_hashlimit = NULL;
903 mutex_unlock(&hashlimit_mutex); 900 mutex_unlock(&hashlimit_mutex);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 4518a57aa5fe..713671ae45af 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -74,9 +74,12 @@ static struct list_head family_ht[GENL_FAM_TAB_SIZE];
74 * Bit 17 is marked as already used since the VFS quota code 74 * Bit 17 is marked as already used since the VFS quota code
75 * also abused this API and relied on family == group ID, we 75 * also abused this API and relied on family == group ID, we
76 * cater to that by giving it a static family and group ID. 76 * cater to that by giving it a static family and group ID.
77 * Bit 18 is marked as already used since the PMCRAID driver
78 * did the same thing as the VFS quota code (maybe copied?)
77 */ 79 */
78static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) | 80static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
79 BIT(GENL_ID_VFS_DQUOT); 81 BIT(GENL_ID_VFS_DQUOT) |
82 BIT(GENL_ID_PMCRAID);
80static unsigned long *mc_groups = &mc_group_start; 83static unsigned long *mc_groups = &mc_group_start;
81static unsigned long mc_groups_longs = 1; 84static unsigned long mc_groups_longs = 1;
82 85
@@ -139,6 +142,7 @@ static u16 genl_generate_id(void)
139 142
140 for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) { 143 for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) {
141 if (id_gen_idx != GENL_ID_VFS_DQUOT && 144 if (id_gen_idx != GENL_ID_VFS_DQUOT &&
145 id_gen_idx != GENL_ID_PMCRAID &&
142 !genl_family_find_byid(id_gen_idx)) 146 !genl_family_find_byid(id_gen_idx))
143 return id_gen_idx; 147 return id_gen_idx;
144 if (++id_gen_idx > GENL_MAX_ID) 148 if (++id_gen_idx > GENL_MAX_ID)
@@ -214,7 +218,7 @@ static int genl_validate_assign_mc_groups(struct genl_family *family)
214{ 218{
215 int first_id; 219 int first_id;
216 int n_groups = family->n_mcgrps; 220 int n_groups = family->n_mcgrps;
217 int err, i; 221 int err = 0, i;
218 bool groups_allocated = false; 222 bool groups_allocated = false;
219 223
220 if (!n_groups) 224 if (!n_groups)
@@ -236,9 +240,12 @@ static int genl_validate_assign_mc_groups(struct genl_family *family)
236 } else if (strcmp(family->name, "NET_DM") == 0) { 240 } else if (strcmp(family->name, "NET_DM") == 0) {
237 first_id = 1; 241 first_id = 1;
238 BUG_ON(n_groups != 1); 242 BUG_ON(n_groups != 1);
239 } else if (strcmp(family->name, "VFS_DQUOT") == 0) { 243 } else if (family->id == GENL_ID_VFS_DQUOT) {
240 first_id = GENL_ID_VFS_DQUOT; 244 first_id = GENL_ID_VFS_DQUOT;
241 BUG_ON(n_groups != 1); 245 BUG_ON(n_groups != 1);
246 } else if (family->id == GENL_ID_PMCRAID) {
247 first_id = GENL_ID_PMCRAID;
248 BUG_ON(n_groups != 1);
242 } else { 249 } else {
243 groups_allocated = true; 250 groups_allocated = true;
244 err = genl_allocate_reserve_groups(n_groups, &first_id); 251 err = genl_allocate_reserve_groups(n_groups, &first_id);
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 872529105abc..83b9927e7d19 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -384,7 +384,7 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
384{ 384{
385 dev->dep_link_up = true; 385 dev->dep_link_up = true;
386 386
387 if (!dev->active_target) { 387 if (!dev->active_target && rf_mode == NFC_RF_INITIATOR) {
388 struct nfc_target *target; 388 struct nfc_target *target;
389 389
390 target = nfc_find_target(dev, target_idx); 390 target = nfc_find_target(dev, target_idx);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ac27c86ef6d1..88cfbc189558 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -237,6 +237,30 @@ struct packet_skb_cb {
237static void __fanout_unlink(struct sock *sk, struct packet_sock *po); 237static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
238static void __fanout_link(struct sock *sk, struct packet_sock *po); 238static void __fanout_link(struct sock *sk, struct packet_sock *po);
239 239
240static struct net_device *packet_cached_dev_get(struct packet_sock *po)
241{
242 struct net_device *dev;
243
244 rcu_read_lock();
245 dev = rcu_dereference(po->cached_dev);
246 if (likely(dev))
247 dev_hold(dev);
248 rcu_read_unlock();
249
250 return dev;
251}
252
253static void packet_cached_dev_assign(struct packet_sock *po,
254 struct net_device *dev)
255{
256 rcu_assign_pointer(po->cached_dev, dev);
257}
258
259static void packet_cached_dev_reset(struct packet_sock *po)
260{
261 RCU_INIT_POINTER(po->cached_dev, NULL);
262}
263
240/* register_prot_hook must be invoked with the po->bind_lock held, 264/* register_prot_hook must be invoked with the po->bind_lock held,
241 * or from a context in which asynchronous accesses to the packet 265 * or from a context in which asynchronous accesses to the packet
242 * socket is not possible (packet_create()). 266 * socket is not possible (packet_create()).
@@ -246,12 +270,10 @@ static void register_prot_hook(struct sock *sk)
246 struct packet_sock *po = pkt_sk(sk); 270 struct packet_sock *po = pkt_sk(sk);
247 271
248 if (!po->running) { 272 if (!po->running) {
249 if (po->fanout) { 273 if (po->fanout)
250 __fanout_link(sk, po); 274 __fanout_link(sk, po);
251 } else { 275 else
252 dev_add_pack(&po->prot_hook); 276 dev_add_pack(&po->prot_hook);
253 rcu_assign_pointer(po->cached_dev, po->prot_hook.dev);
254 }
255 277
256 sock_hold(sk); 278 sock_hold(sk);
257 po->running = 1; 279 po->running = 1;
@@ -270,12 +292,11 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
270 struct packet_sock *po = pkt_sk(sk); 292 struct packet_sock *po = pkt_sk(sk);
271 293
272 po->running = 0; 294 po->running = 0;
273 if (po->fanout) { 295
296 if (po->fanout)
274 __fanout_unlink(sk, po); 297 __fanout_unlink(sk, po);
275 } else { 298 else
276 __dev_remove_pack(&po->prot_hook); 299 __dev_remove_pack(&po->prot_hook);
277 RCU_INIT_POINTER(po->cached_dev, NULL);
278 }
279 300
280 __sock_put(sk); 301 __sock_put(sk);
281 302
@@ -439,9 +460,9 @@ static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
439 460
440 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc; 461 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
441 462
442 spin_lock(&rb_queue->lock); 463 spin_lock_bh(&rb_queue->lock);
443 pkc->delete_blk_timer = 1; 464 pkc->delete_blk_timer = 1;
444 spin_unlock(&rb_queue->lock); 465 spin_unlock_bh(&rb_queue->lock);
445 466
446 prb_del_retire_blk_timer(pkc); 467 prb_del_retire_blk_timer(pkc);
447} 468}
@@ -2059,19 +2080,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2059 return tp_len; 2080 return tp_len;
2060} 2081}
2061 2082
2062static struct net_device *packet_cached_dev_get(struct packet_sock *po)
2063{
2064 struct net_device *dev;
2065
2066 rcu_read_lock();
2067 dev = rcu_dereference(po->cached_dev);
2068 if (dev)
2069 dev_hold(dev);
2070 rcu_read_unlock();
2071
2072 return dev;
2073}
2074
2075static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) 2083static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2076{ 2084{
2077 struct sk_buff *skb; 2085 struct sk_buff *skb;
@@ -2088,7 +2096,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2088 2096
2089 mutex_lock(&po->pg_vec_lock); 2097 mutex_lock(&po->pg_vec_lock);
2090 2098
2091 if (saddr == NULL) { 2099 if (likely(saddr == NULL)) {
2092 dev = packet_cached_dev_get(po); 2100 dev = packet_cached_dev_get(po);
2093 proto = po->num; 2101 proto = po->num;
2094 addr = NULL; 2102 addr = NULL;
@@ -2242,7 +2250,7 @@ static int packet_snd(struct socket *sock,
2242 * Get and verify the address. 2250 * Get and verify the address.
2243 */ 2251 */
2244 2252
2245 if (saddr == NULL) { 2253 if (likely(saddr == NULL)) {
2246 dev = packet_cached_dev_get(po); 2254 dev = packet_cached_dev_get(po);
2247 proto = po->num; 2255 proto = po->num;
2248 addr = NULL; 2256 addr = NULL;
@@ -2451,6 +2459,8 @@ static int packet_release(struct socket *sock)
2451 2459
2452 spin_lock(&po->bind_lock); 2460 spin_lock(&po->bind_lock);
2453 unregister_prot_hook(sk, false); 2461 unregister_prot_hook(sk, false);
2462 packet_cached_dev_reset(po);
2463
2454 if (po->prot_hook.dev) { 2464 if (po->prot_hook.dev) {
2455 dev_put(po->prot_hook.dev); 2465 dev_put(po->prot_hook.dev);
2456 po->prot_hook.dev = NULL; 2466 po->prot_hook.dev = NULL;
@@ -2506,14 +2516,17 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
2506 2516
2507 spin_lock(&po->bind_lock); 2517 spin_lock(&po->bind_lock);
2508 unregister_prot_hook(sk, true); 2518 unregister_prot_hook(sk, true);
2519
2509 po->num = protocol; 2520 po->num = protocol;
2510 po->prot_hook.type = protocol; 2521 po->prot_hook.type = protocol;
2511 if (po->prot_hook.dev) 2522 if (po->prot_hook.dev)
2512 dev_put(po->prot_hook.dev); 2523 dev_put(po->prot_hook.dev);
2513 po->prot_hook.dev = dev;
2514 2524
2525 po->prot_hook.dev = dev;
2515 po->ifindex = dev ? dev->ifindex : 0; 2526 po->ifindex = dev ? dev->ifindex : 0;
2516 2527
2528 packet_cached_dev_assign(po, dev);
2529
2517 if (protocol == 0) 2530 if (protocol == 0)
2518 goto out_unlock; 2531 goto out_unlock;
2519 2532
@@ -2626,7 +2639,8 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
2626 po = pkt_sk(sk); 2639 po = pkt_sk(sk);
2627 sk->sk_family = PF_PACKET; 2640 sk->sk_family = PF_PACKET;
2628 po->num = proto; 2641 po->num = proto;
2629 RCU_INIT_POINTER(po->cached_dev, NULL); 2642
2643 packet_cached_dev_reset(po);
2630 2644
2631 sk->sk_destruct = packet_sock_destruct; 2645 sk->sk_destruct = packet_sock_destruct;
2632 sk_refcnt_debug_inc(sk); 2646 sk_refcnt_debug_inc(sk);
@@ -3337,6 +3351,7 @@ static int packet_notifier(struct notifier_block *this,
3337 sk->sk_error_report(sk); 3351 sk->sk_error_report(sk);
3338 } 3352 }
3339 if (msg == NETDEV_UNREGISTER) { 3353 if (msg == NETDEV_UNREGISTER) {
3354 packet_cached_dev_reset(po);
3340 po->ifindex = -1; 3355 po->ifindex = -1;
3341 if (po->prot_hook.dev) 3356 if (po->prot_hook.dev)
3342 dev_put(po->prot_hook.dev); 3357 dev_put(po->prot_hook.dev);
diff --git a/net/rds/ib.c b/net/rds/ib.c
index b4c8b0022fee..ba2dffeff608 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -338,7 +338,8 @@ static int rds_ib_laddr_check(__be32 addr)
338 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); 338 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
339 /* due to this, we will claim to support iWARP devices unless we 339 /* due to this, we will claim to support iWARP devices unless we
340 check node_type. */ 340 check node_type. */
341 if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA) 341 if (ret || !cm_id->device ||
342 cm_id->device->node_type != RDMA_NODE_IB_CA)
342 ret = -EADDRNOTAVAIL; 343 ret = -EADDRNOTAVAIL;
343 344
344 rdsdebug("addr %pI4 ret %d node type %d\n", 345 rdsdebug("addr %pI4 ret %d node type %d\n",
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index e59094981175..37be6e226d1b 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -552,9 +552,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
552 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { 552 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
553 rds_cong_map_updated(conn->c_fcong, ~(u64) 0); 553 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
554 scat = &rm->data.op_sg[sg]; 554 scat = &rm->data.op_sg[sg];
555 ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; 555 ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
556 ret = min_t(int, ret, scat->length - conn->c_xmit_data_off); 556 return sizeof(struct rds_header) + ret;
557 return ret;
558 } 557 }
559 558
560 /* FIXME we may overallocate here */ 559 /* FIXME we may overallocate here */
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 33af77246bfe..62ced6516c58 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1253 1253
1254 if (msg->msg_name) { 1254 if (msg->msg_name) {
1255 struct sockaddr_rose *srose; 1255 struct sockaddr_rose *srose;
1256 struct full_sockaddr_rose *full_srose = msg->msg_name;
1256 1257
1257 memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose)); 1258 memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
1258 srose = msg->msg_name; 1259 srose = msg->msg_name;
@@ -1260,18 +1261,9 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1260 srose->srose_addr = rose->dest_addr; 1261 srose->srose_addr = rose->dest_addr;
1261 srose->srose_call = rose->dest_call; 1262 srose->srose_call = rose->dest_call;
1262 srose->srose_ndigis = rose->dest_ndigis; 1263 srose->srose_ndigis = rose->dest_ndigis;
1263 if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { 1264 for (n = 0 ; n < rose->dest_ndigis ; n++)
1264 struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; 1265 full_srose->srose_digis[n] = rose->dest_digis[n];
1265 for (n = 0 ; n < rose->dest_ndigis ; n++) 1266 msg->msg_namelen = sizeof(struct full_sockaddr_rose);
1266 full_srose->srose_digis[n] = rose->dest_digis[n];
1267 msg->msg_namelen = sizeof(struct full_sockaddr_rose);
1268 } else {
1269 if (rose->dest_ndigis >= 1) {
1270 srose->srose_ndigis = 1;
1271 srose->srose_digi = rose->dest_digis[0];
1272 }
1273 msg->msg_namelen = sizeof(struct sockaddr_rose);
1274 }
1275 } 1267 }
1276 1268
1277 skb_free_datagram(sk, skb); 1269 skb_free_datagram(sk, skb);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index fd7072827a40..69cb848e8345 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -270,6 +270,16 @@ int tcf_register_action(struct tc_action_ops *act)
270{ 270{
271 struct tc_action_ops *a, **ap; 271 struct tc_action_ops *a, **ap;
272 272
273 /* Must supply act, dump, cleanup and init */
274 if (!act->act || !act->dump || !act->cleanup || !act->init)
275 return -EINVAL;
276
277 /* Supply defaults */
278 if (!act->lookup)
279 act->lookup = tcf_hash_search;
280 if (!act->walk)
281 act->walk = tcf_generic_walker;
282
273 write_lock(&act_mod_lock); 283 write_lock(&act_mod_lock);
274 for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) { 284 for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) {
275 if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) { 285 if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
@@ -381,7 +391,7 @@ int tcf_action_exec(struct sk_buff *skb, const struct tc_action *act,
381 } 391 }
382 while ((a = act) != NULL) { 392 while ((a = act) != NULL) {
383repeat: 393repeat:
384 if (a->ops && a->ops->act) { 394 if (a->ops) {
385 ret = a->ops->act(skb, a, res); 395 ret = a->ops->act(skb, a, res);
386 if (TC_MUNGED & skb->tc_verd) { 396 if (TC_MUNGED & skb->tc_verd) {
387 /* copied already, allow trampling */ 397 /* copied already, allow trampling */
@@ -405,7 +415,7 @@ void tcf_action_destroy(struct tc_action *act, int bind)
405 struct tc_action *a; 415 struct tc_action *a;
406 416
407 for (a = act; a; a = act) { 417 for (a = act; a; a = act) {
408 if (a->ops && a->ops->cleanup) { 418 if (a->ops) {
409 if (a->ops->cleanup(a, bind) == ACT_P_DELETED) 419 if (a->ops->cleanup(a, bind) == ACT_P_DELETED)
410 module_put(a->ops->owner); 420 module_put(a->ops->owner);
411 act = act->next; 421 act = act->next;
@@ -424,7 +434,7 @@ tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
424{ 434{
425 int err = -EINVAL; 435 int err = -EINVAL;
426 436
427 if (a->ops == NULL || a->ops->dump == NULL) 437 if (a->ops == NULL)
428 return err; 438 return err;
429 return a->ops->dump(skb, a, bind, ref); 439 return a->ops->dump(skb, a, bind, ref);
430} 440}
@@ -436,7 +446,7 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
436 unsigned char *b = skb_tail_pointer(skb); 446 unsigned char *b = skb_tail_pointer(skb);
437 struct nlattr *nest; 447 struct nlattr *nest;
438 448
439 if (a->ops == NULL || a->ops->dump == NULL) 449 if (a->ops == NULL)
440 return err; 450 return err;
441 451
442 if (nla_put_string(skb, TCA_KIND, a->ops->kind)) 452 if (nla_put_string(skb, TCA_KIND, a->ops->kind))
@@ -723,8 +733,6 @@ tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid)
723 a->ops = tc_lookup_action(tb[TCA_ACT_KIND]); 733 a->ops = tc_lookup_action(tb[TCA_ACT_KIND]);
724 if (a->ops == NULL) 734 if (a->ops == NULL)
725 goto err_free; 735 goto err_free;
726 if (a->ops->lookup == NULL)
727 goto err_mod;
728 err = -ENOENT; 736 err = -ENOENT;
729 if (a->ops->lookup(a, index) == 0) 737 if (a->ops->lookup(a, index) == 0)
730 goto err_mod; 738 goto err_mod;
@@ -1084,12 +1092,6 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1084 memset(&a, 0, sizeof(struct tc_action)); 1092 memset(&a, 0, sizeof(struct tc_action));
1085 a.ops = a_o; 1093 a.ops = a_o;
1086 1094
1087 if (a_o->walk == NULL) {
1088 WARN(1, "tc_dump_action: %s !capable of dumping table\n",
1089 a_o->kind);
1090 goto out_module_put;
1091 }
1092
1093 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1095 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1094 cb->nlh->nlmsg_type, sizeof(*t), 0); 1096 cb->nlh->nlmsg_type, sizeof(*t), 0);
1095 if (!nlh) 1097 if (!nlh)
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 3a4c0caa1f7d..11fe1a416433 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -77,16 +77,16 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
77 &csum_idx_gen, &csum_hash_info); 77 &csum_idx_gen, &csum_hash_info);
78 if (IS_ERR(pc)) 78 if (IS_ERR(pc))
79 return PTR_ERR(pc); 79 return PTR_ERR(pc);
80 p = to_tcf_csum(pc);
81 ret = ACT_P_CREATED; 80 ret = ACT_P_CREATED;
82 } else { 81 } else {
83 p = to_tcf_csum(pc); 82 if (bind)/* dont override defaults */
84 if (!ovr) { 83 return 0;
85 tcf_hash_release(pc, bind, &csum_hash_info); 84 tcf_hash_release(pc, bind, &csum_hash_info);
85 if (!ovr)
86 return -EEXIST; 86 return -EEXIST;
87 }
88 } 87 }
89 88
89 p = to_tcf_csum(pc);
90 spin_lock_bh(&p->tcf_lock); 90 spin_lock_bh(&p->tcf_lock);
91 p->tcf_action = parm->action; 91 p->tcf_action = parm->action;
92 p->update_flags = parm->update_flags; 92 p->update_flags = parm->update_flags;
@@ -585,9 +585,7 @@ static struct tc_action_ops act_csum_ops = {
585 .act = tcf_csum, 585 .act = tcf_csum,
586 .dump = tcf_csum_dump, 586 .dump = tcf_csum_dump,
587 .cleanup = tcf_csum_cleanup, 587 .cleanup = tcf_csum_cleanup,
588 .lookup = tcf_hash_search,
589 .init = tcf_csum_init, 588 .init = tcf_csum_init,
590 .walk = tcf_generic_walker
591}; 589};
592 590
593MODULE_DESCRIPTION("Checksum updating actions"); 591MODULE_DESCRIPTION("Checksum updating actions");
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index fd2b3cff5fa2..eb9ba60ebab4 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -102,10 +102,11 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
102 return PTR_ERR(pc); 102 return PTR_ERR(pc);
103 ret = ACT_P_CREATED; 103 ret = ACT_P_CREATED;
104 } else { 104 } else {
105 if (!ovr) { 105 if (bind)/* dont override defaults */
106 tcf_hash_release(pc, bind, &gact_hash_info); 106 return 0;
107 tcf_hash_release(pc, bind, &gact_hash_info);
108 if (!ovr)
107 return -EEXIST; 109 return -EEXIST;
108 }
109 } 110 }
110 111
111 gact = to_gact(pc); 112 gact = to_gact(pc);
@@ -206,9 +207,7 @@ static struct tc_action_ops act_gact_ops = {
206 .act = tcf_gact, 207 .act = tcf_gact,
207 .dump = tcf_gact_dump, 208 .dump = tcf_gact_dump,
208 .cleanup = tcf_gact_cleanup, 209 .cleanup = tcf_gact_cleanup,
209 .lookup = tcf_hash_search,
210 .init = tcf_gact_init, 210 .init = tcf_gact_init,
211 .walk = tcf_generic_walker
212}; 211};
213 212
214MODULE_AUTHOR("Jamal Hadi Salim(2002-4)"); 213MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 60d88b6b9560..dcbfe8ce04a6 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -141,10 +141,12 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
141 return PTR_ERR(pc); 141 return PTR_ERR(pc);
142 ret = ACT_P_CREATED; 142 ret = ACT_P_CREATED;
143 } else { 143 } else {
144 if (!ovr) { 144 if (bind)/* dont override defaults */
145 tcf_ipt_release(to_ipt(pc), bind); 145 return 0;
146 tcf_ipt_release(to_ipt(pc), bind);
147
148 if (!ovr)
146 return -EEXIST; 149 return -EEXIST;
147 }
148 } 150 }
149 ipt = to_ipt(pc); 151 ipt = to_ipt(pc);
150 152
@@ -298,9 +300,7 @@ static struct tc_action_ops act_ipt_ops = {
298 .act = tcf_ipt, 300 .act = tcf_ipt,
299 .dump = tcf_ipt_dump, 301 .dump = tcf_ipt_dump,
300 .cleanup = tcf_ipt_cleanup, 302 .cleanup = tcf_ipt_cleanup,
301 .lookup = tcf_hash_search,
302 .init = tcf_ipt_init, 303 .init = tcf_ipt_init,
303 .walk = tcf_generic_walker
304}; 304};
305 305
306static struct tc_action_ops act_xt_ops = { 306static struct tc_action_ops act_xt_ops = {
@@ -312,9 +312,7 @@ static struct tc_action_ops act_xt_ops = {
312 .act = tcf_ipt, 312 .act = tcf_ipt,
313 .dump = tcf_ipt_dump, 313 .dump = tcf_ipt_dump,
314 .cleanup = tcf_ipt_cleanup, 314 .cleanup = tcf_ipt_cleanup,
315 .lookup = tcf_hash_search,
316 .init = tcf_ipt_init, 315 .init = tcf_ipt_init,
317 .walk = tcf_generic_walker
318}; 316};
319 317
320MODULE_AUTHOR("Jamal Hadi Salim(2002-13)"); 318MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 977c10e0631b..252378121ce7 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -271,9 +271,7 @@ static struct tc_action_ops act_mirred_ops = {
271 .act = tcf_mirred, 271 .act = tcf_mirred,
272 .dump = tcf_mirred_dump, 272 .dump = tcf_mirred_dump,
273 .cleanup = tcf_mirred_cleanup, 273 .cleanup = tcf_mirred_cleanup,
274 .lookup = tcf_hash_search,
275 .init = tcf_mirred_init, 274 .init = tcf_mirred_init,
276 .walk = tcf_generic_walker
277}; 275};
278 276
279MODULE_AUTHOR("Jamal Hadi Salim(2002)"); 277MODULE_AUTHOR("Jamal Hadi Salim(2002)");
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 876f0ef29694..76869538d028 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -70,15 +70,15 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
70 &nat_idx_gen, &nat_hash_info); 70 &nat_idx_gen, &nat_hash_info);
71 if (IS_ERR(pc)) 71 if (IS_ERR(pc))
72 return PTR_ERR(pc); 72 return PTR_ERR(pc);
73 p = to_tcf_nat(pc);
74 ret = ACT_P_CREATED; 73 ret = ACT_P_CREATED;
75 } else { 74 } else {
76 p = to_tcf_nat(pc); 75 if (bind)
77 if (!ovr) { 76 return 0;
78 tcf_hash_release(pc, bind, &nat_hash_info); 77 tcf_hash_release(pc, bind, &nat_hash_info);
78 if (!ovr)
79 return -EEXIST; 79 return -EEXIST;
80 }
81 } 80 }
81 p = to_tcf_nat(pc);
82 82
83 spin_lock_bh(&p->tcf_lock); 83 spin_lock_bh(&p->tcf_lock);
84 p->old_addr = parm->old_addr; 84 p->old_addr = parm->old_addr;
@@ -308,9 +308,7 @@ static struct tc_action_ops act_nat_ops = {
308 .act = tcf_nat, 308 .act = tcf_nat,
309 .dump = tcf_nat_dump, 309 .dump = tcf_nat_dump,
310 .cleanup = tcf_nat_cleanup, 310 .cleanup = tcf_nat_cleanup,
311 .lookup = tcf_hash_search,
312 .init = tcf_nat_init, 311 .init = tcf_nat_init,
313 .walk = tcf_generic_walker
314}; 312};
315 313
316MODULE_DESCRIPTION("Stateless NAT actions"); 314MODULE_DESCRIPTION("Stateless NAT actions");
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 7ed78c9e505c..7aa2dcd989f8 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -84,10 +84,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
84 ret = ACT_P_CREATED; 84 ret = ACT_P_CREATED;
85 } else { 85 } else {
86 p = to_pedit(pc); 86 p = to_pedit(pc);
87 if (!ovr) { 87 tcf_hash_release(pc, bind, &pedit_hash_info);
88 tcf_hash_release(pc, bind, &pedit_hash_info); 88 if (bind)
89 return 0;
90 if (!ovr)
89 return -EEXIST; 91 return -EEXIST;
90 } 92
91 if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { 93 if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
92 keys = kmalloc(ksize, GFP_KERNEL); 94 keys = kmalloc(ksize, GFP_KERNEL);
93 if (keys == NULL) 95 if (keys == NULL)
@@ -243,9 +245,7 @@ static struct tc_action_ops act_pedit_ops = {
243 .act = tcf_pedit, 245 .act = tcf_pedit,
244 .dump = tcf_pedit_dump, 246 .dump = tcf_pedit_dump,
245 .cleanup = tcf_pedit_cleanup, 247 .cleanup = tcf_pedit_cleanup,
246 .lookup = tcf_hash_search,
247 .init = tcf_pedit_init, 248 .init = tcf_pedit_init,
248 .walk = tcf_generic_walker
249}; 249};
250 250
251MODULE_AUTHOR("Jamal Hadi Salim(2002-4)"); 251MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 272d8e924cf6..ef246d87e68b 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -177,10 +177,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
177 if (bind) { 177 if (bind) {
178 police->tcf_bindcnt += 1; 178 police->tcf_bindcnt += 1;
179 police->tcf_refcnt += 1; 179 police->tcf_refcnt += 1;
180 return 0;
180 } 181 }
181 if (ovr) 182 if (ovr)
182 goto override; 183 goto override;
183 return ret; 184 /* not replacing */
185 return -EEXIST;
184 } 186 }
185 } 187 }
186 188
@@ -407,7 +409,6 @@ static struct tc_action_ops act_police_ops = {
407 .act = tcf_act_police, 409 .act = tcf_act_police,
408 .dump = tcf_act_police_dump, 410 .dump = tcf_act_police_dump,
409 .cleanup = tcf_act_police_cleanup, 411 .cleanup = tcf_act_police_cleanup,
410 .lookup = tcf_hash_search,
411 .init = tcf_act_police_locate, 412 .init = tcf_act_police_locate,
412 .walk = tcf_act_police_walker 413 .walk = tcf_act_police_walker
413}; 414};
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 7725eb4ab756..f7b45ab85388 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -142,10 +142,13 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
142 ret = ACT_P_CREATED; 142 ret = ACT_P_CREATED;
143 } else { 143 } else {
144 d = to_defact(pc); 144 d = to_defact(pc);
145 if (!ovr) { 145
146 tcf_simp_release(d, bind); 146 if (bind)
147 return 0;
148 tcf_simp_release(d, bind);
149 if (!ovr)
147 return -EEXIST; 150 return -EEXIST;
148 } 151
149 reset_policy(d, defdata, parm); 152 reset_policy(d, defdata, parm);
150 } 153 }
151 154
@@ -201,7 +204,6 @@ static struct tc_action_ops act_simp_ops = {
201 .dump = tcf_simp_dump, 204 .dump = tcf_simp_dump,
202 .cleanup = tcf_simp_cleanup, 205 .cleanup = tcf_simp_cleanup,
203 .init = tcf_simp_init, 206 .init = tcf_simp_init,
204 .walk = tcf_generic_walker,
205}; 207};
206 208
207MODULE_AUTHOR("Jamal Hadi Salim(2005)"); 209MODULE_AUTHOR("Jamal Hadi Salim(2005)");
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index cb4221171f93..8fe9d25c3008 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -120,10 +120,11 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
120 ret = ACT_P_CREATED; 120 ret = ACT_P_CREATED;
121 } else { 121 } else {
122 d = to_skbedit(pc); 122 d = to_skbedit(pc);
123 if (!ovr) { 123 if (bind)
124 tcf_hash_release(pc, bind, &skbedit_hash_info); 124 return 0;
125 tcf_hash_release(pc, bind, &skbedit_hash_info);
126 if (!ovr)
125 return -EEXIST; 127 return -EEXIST;
126 }
127 } 128 }
128 129
129 spin_lock_bh(&d->tcf_lock); 130 spin_lock_bh(&d->tcf_lock);
@@ -203,7 +204,6 @@ static struct tc_action_ops act_skbedit_ops = {
203 .dump = tcf_skbedit_dump, 204 .dump = tcf_skbedit_dump,
204 .cleanup = tcf_skbedit_cleanup, 205 .cleanup = tcf_skbedit_cleanup,
205 .init = tcf_skbedit_init, 206 .init = tcf_skbedit_init,
206 .walk = tcf_generic_walker,
207}; 207};
208 208
209MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>"); 209MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>");
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 922a09406ba7..7fc899a943a8 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -126,7 +126,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
126 126
127 HARD_TX_LOCK(dev, txq, smp_processor_id()); 127 HARD_TX_LOCK(dev, txq, smp_processor_id());
128 if (!netif_xmit_frozen_or_stopped(txq)) 128 if (!netif_xmit_frozen_or_stopped(txq))
129 ret = dev_hard_start_xmit(skb, dev, txq, NULL); 129 ret = dev_hard_start_xmit(skb, dev, txq);
130 130
131 HARD_TX_UNLOCK(dev, txq); 131 HARD_TX_UNLOCK(dev, txq);
132 132
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 0e1e38b40025..717b2108f852 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1477,11 +1477,22 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1477 sch_tree_lock(sch); 1477 sch_tree_lock(sch);
1478 } 1478 }
1479 1479
1480 rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1481
1482 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1483
1484 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1485 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1486
1480 /* it used to be a nasty bug here, we have to check that node 1487 /* it used to be a nasty bug here, we have to check that node
1481 * is really leaf before changing cl->un.leaf ! 1488 * is really leaf before changing cl->un.leaf !
1482 */ 1489 */
1483 if (!cl->level) { 1490 if (!cl->level) {
1484 cl->quantum = hopt->rate.rate / q->rate2quantum; 1491 u64 quantum = cl->rate.rate_bytes_ps;
1492
1493 do_div(quantum, q->rate2quantum);
1494 cl->quantum = min_t(u64, quantum, INT_MAX);
1495
1485 if (!hopt->quantum && cl->quantum < 1000) { 1496 if (!hopt->quantum && cl->quantum < 1000) {
1486 pr_warning( 1497 pr_warning(
1487 "HTB: quantum of class %X is small. Consider r2q change.\n", 1498 "HTB: quantum of class %X is small. Consider r2q change.\n",
@@ -1500,13 +1511,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1500 cl->prio = TC_HTB_NUMPRIO - 1; 1511 cl->prio = TC_HTB_NUMPRIO - 1;
1501 } 1512 }
1502 1513
1503 rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1504
1505 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1506
1507 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1508 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1509
1510 cl->buffer = PSCHED_TICKS2NS(hopt->buffer); 1514 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1511 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer); 1515 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
1512 1516
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 75c94e59a3bd..bccd52b36e97 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -215,10 +215,10 @@ static bool loss_4state(struct netem_sched_data *q)
215 if (rnd < clg->a4) { 215 if (rnd < clg->a4) {
216 clg->state = 4; 216 clg->state = 4;
217 return true; 217 return true;
218 } else if (clg->a4 < rnd && rnd < clg->a1) { 218 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
219 clg->state = 3; 219 clg->state = 3;
220 return true; 220 return true;
221 } else if (clg->a1 < rnd) 221 } else if (clg->a1 + clg->a4 < rnd)
222 clg->state = 1; 222 clg->state = 1;
223 223
224 break; 224 break;
@@ -268,10 +268,11 @@ static bool loss_gilb_ell(struct netem_sched_data *q)
268 clg->state = 2; 268 clg->state = 2;
269 if (net_random() < clg->a4) 269 if (net_random() < clg->a4)
270 return true; 270 return true;
271 break;
271 case 2: 272 case 2:
272 if (net_random() < clg->a2) 273 if (net_random() < clg->a2)
273 clg->state = 1; 274 clg->state = 1;
274 if (clg->a3 > net_random()) 275 if (net_random() > clg->a3)
275 return true; 276 return true;
276 } 277 }
277 278
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 68f98595819c..887e672f9d7d 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -21,6 +21,7 @@
21#include <net/netlink.h> 21#include <net/netlink.h>
22#include <net/sch_generic.h> 22#include <net/sch_generic.h>
23#include <net/pkt_sched.h> 23#include <net/pkt_sched.h>
24#include <net/tcp.h>
24 25
25 26
26/* Simple Token Bucket Filter. 27/* Simple Token Bucket Filter.
@@ -117,6 +118,48 @@ struct tbf_sched_data {
117}; 118};
118 119
119 120
121/* Time to Length, convert time in ns to length in bytes
122 * to determinate how many bytes can be sent in given time.
123 */
124static u64 psched_ns_t2l(const struct psched_ratecfg *r,
125 u64 time_in_ns)
126{
127 /* The formula is :
128 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
129 */
130 u64 len = time_in_ns * r->rate_bytes_ps;
131
132 do_div(len, NSEC_PER_SEC);
133
134 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
135 do_div(len, 53);
136 len = len * 48;
137 }
138
139 if (len > r->overhead)
140 len -= r->overhead;
141 else
142 len = 0;
143
144 return len;
145}
146
147/*
148 * Return length of individual segments of a gso packet,
149 * including all headers (MAC, IP, TCP/UDP)
150 */
151static unsigned int skb_gso_seglen(const struct sk_buff *skb)
152{
153 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
154 const struct skb_shared_info *shinfo = skb_shinfo(skb);
155
156 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
157 hdr_len += tcp_hdrlen(skb);
158 else
159 hdr_len += sizeof(struct udphdr);
160 return hdr_len + shinfo->gso_size;
161}
162
120/* GSO packet is too big, segment it so that tbf can transmit 163/* GSO packet is too big, segment it so that tbf can transmit
121 * each segment in time 164 * each segment in time
122 */ 165 */
@@ -136,12 +179,8 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
136 while (segs) { 179 while (segs) {
137 nskb = segs->next; 180 nskb = segs->next;
138 segs->next = NULL; 181 segs->next = NULL;
139 if (likely(segs->len <= q->max_size)) { 182 qdisc_skb_cb(segs)->pkt_len = segs->len;
140 qdisc_skb_cb(segs)->pkt_len = segs->len; 183 ret = qdisc_enqueue(segs, q->qdisc);
141 ret = qdisc_enqueue(segs, q->qdisc);
142 } else {
143 ret = qdisc_reshape_fail(skb, sch);
144 }
145 if (ret != NET_XMIT_SUCCESS) { 184 if (ret != NET_XMIT_SUCCESS) {
146 if (net_xmit_drop_count(ret)) 185 if (net_xmit_drop_count(ret))
147 sch->qstats.drops++; 186 sch->qstats.drops++;
@@ -163,7 +202,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
163 int ret; 202 int ret;
164 203
165 if (qdisc_pkt_len(skb) > q->max_size) { 204 if (qdisc_pkt_len(skb) > q->max_size) {
166 if (skb_is_gso(skb)) 205 if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size)
167 return tbf_segment(skb, sch); 206 return tbf_segment(skb, sch);
168 return qdisc_reshape_fail(skb, sch); 207 return qdisc_reshape_fail(skb, sch);
169 } 208 }
@@ -276,10 +315,11 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
276 struct tbf_sched_data *q = qdisc_priv(sch); 315 struct tbf_sched_data *q = qdisc_priv(sch);
277 struct nlattr *tb[TCA_TBF_MAX + 1]; 316 struct nlattr *tb[TCA_TBF_MAX + 1];
278 struct tc_tbf_qopt *qopt; 317 struct tc_tbf_qopt *qopt;
279 struct qdisc_rate_table *rtab = NULL;
280 struct qdisc_rate_table *ptab = NULL;
281 struct Qdisc *child = NULL; 318 struct Qdisc *child = NULL;
282 int max_size, n; 319 struct psched_ratecfg rate;
320 struct psched_ratecfg peak;
321 u64 max_size;
322 s64 buffer, mtu;
283 u64 rate64 = 0, prate64 = 0; 323 u64 rate64 = 0, prate64 = 0;
284 324
285 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy); 325 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy);
@@ -291,33 +331,13 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
291 goto done; 331 goto done;
292 332
293 qopt = nla_data(tb[TCA_TBF_PARMS]); 333 qopt = nla_data(tb[TCA_TBF_PARMS]);
294 rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]); 334 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
295 if (rtab == NULL) 335 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
296 goto done; 336 tb[TCA_TBF_RTAB]));
297 337
298 if (qopt->peakrate.rate) { 338 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
299 if (qopt->peakrate.rate > qopt->rate.rate) 339 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
300 ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]); 340 tb[TCA_TBF_PTAB]));
301 if (ptab == NULL)
302 goto done;
303 }
304
305 for (n = 0; n < 256; n++)
306 if (rtab->data[n] > qopt->buffer)
307 break;
308 max_size = (n << qopt->rate.cell_log) - 1;
309 if (ptab) {
310 int size;
311
312 for (n = 0; n < 256; n++)
313 if (ptab->data[n] > qopt->mtu)
314 break;
315 size = (n << qopt->peakrate.cell_log) - 1;
316 if (size < max_size)
317 max_size = size;
318 }
319 if (max_size < 0)
320 goto done;
321 341
322 if (q->qdisc != &noop_qdisc) { 342 if (q->qdisc != &noop_qdisc) {
323 err = fifo_set_limit(q->qdisc, qopt->limit); 343 err = fifo_set_limit(q->qdisc, qopt->limit);
@@ -331,6 +351,39 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
331 } 351 }
332 } 352 }
333 353
354 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
355 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
356
357 if (tb[TCA_TBF_RATE64])
358 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
359 psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
360
361 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
362
363 if (qopt->peakrate.rate) {
364 if (tb[TCA_TBF_PRATE64])
365 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
366 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
367 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
368 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
369 peak.rate_bytes_ps, rate.rate_bytes_ps);
370 err = -EINVAL;
371 goto done;
372 }
373
374 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
375 }
376
377 if (max_size < psched_mtu(qdisc_dev(sch)))
378 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
379 max_size, qdisc_dev(sch)->name,
380 psched_mtu(qdisc_dev(sch)));
381
382 if (!max_size) {
383 err = -EINVAL;
384 goto done;
385 }
386
334 sch_tree_lock(sch); 387 sch_tree_lock(sch);
335 if (child) { 388 if (child) {
336 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); 389 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
@@ -344,13 +397,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
344 q->tokens = q->buffer; 397 q->tokens = q->buffer;
345 q->ptokens = q->mtu; 398 q->ptokens = q->mtu;
346 399
347 if (tb[TCA_TBF_RATE64]) 400 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
348 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]); 401 if (qopt->peakrate.rate) {
349 psched_ratecfg_precompute(&q->rate, &rtab->rate, rate64); 402 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
350 if (ptab) {
351 if (tb[TCA_TBF_PRATE64])
352 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
353 psched_ratecfg_precompute(&q->peak, &ptab->rate, prate64);
354 q->peak_present = true; 403 q->peak_present = true;
355 } else { 404 } else {
356 q->peak_present = false; 405 q->peak_present = false;
@@ -359,10 +408,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
359 sch_tree_unlock(sch); 408 sch_tree_unlock(sch);
360 err = 0; 409 err = 0;
361done: 410done:
362 if (rtab)
363 qdisc_put_rtab(rtab);
364 if (ptab)
365 qdisc_put_rtab(ptab);
366 return err; 411 return err;
367} 412}
368 413
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 68a27f9796d2..31ed008c8e13 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -154,8 +154,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
154 154
155 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; 155 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
156 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; 156 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
157 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = 157 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
158 min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ;
159 158
160 /* Initializes the timers */ 159 /* Initializes the timers */
161 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) 160 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
@@ -291,8 +290,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
291 asoc->peer.ipv6_address = 1; 290 asoc->peer.ipv6_address = 1;
292 INIT_LIST_HEAD(&asoc->asocs); 291 INIT_LIST_HEAD(&asoc->asocs);
293 292
294 asoc->autoclose = sp->autoclose;
295
296 asoc->default_stream = sp->default_stream; 293 asoc->default_stream = sp->default_stream;
297 asoc->default_ppid = sp->default_ppid; 294 asoc->default_ppid = sp->default_ppid;
298 asoc->default_flags = sp->default_flags; 295 asoc->default_flags = sp->default_flags;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index e650978daf27..0fb140f8f088 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -474,10 +474,11 @@ int sctp_packet_transmit(struct sctp_packet *packet)
474 * for a given destination transport address. 474 * for a given destination transport address.
475 */ 475 */
476 476
477 if (!tp->rto_pending) { 477 if (!chunk->resent && !tp->rto_pending) {
478 chunk->rtt_in_progress = 1; 478 chunk->rtt_in_progress = 1;
479 tp->rto_pending = 1; 479 tp->rto_pending = 1;
480 } 480 }
481
481 has_data = 1; 482 has_data = 1;
482 } 483 }
483 484
@@ -580,7 +581,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
580 unsigned long timeout; 581 unsigned long timeout;
581 582
582 /* Restart the AUTOCLOSE timer when sending data. */ 583 /* Restart the AUTOCLOSE timer when sending data. */
583 if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) { 584 if (sctp_state(asoc, ESTABLISHED) &&
585 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
584 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; 586 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
585 timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; 587 timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
586 588
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 94df75877869..59268f6e2c36 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -208,8 +208,6 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
208 INIT_LIST_HEAD(&q->retransmit); 208 INIT_LIST_HEAD(&q->retransmit);
209 INIT_LIST_HEAD(&q->sacked); 209 INIT_LIST_HEAD(&q->sacked);
210 INIT_LIST_HEAD(&q->abandoned); 210 INIT_LIST_HEAD(&q->abandoned);
211
212 q->empty = 1;
213} 211}
214 212
215/* Free the outqueue structure and any related pending chunks. 213/* Free the outqueue structure and any related pending chunks.
@@ -332,7 +330,6 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
332 SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); 330 SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
333 else 331 else
334 SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS); 332 SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
335 q->empty = 0;
336 break; 333 break;
337 } 334 }
338 } else { 335 } else {
@@ -446,6 +443,8 @@ void sctp_retransmit_mark(struct sctp_outq *q,
446 transport->rto_pending = 0; 443 transport->rto_pending = 0;
447 } 444 }
448 445
446 chunk->resent = 1;
447
449 /* Move the chunk to the retransmit queue. The chunks 448 /* Move the chunk to the retransmit queue. The chunks
450 * on the retransmit queue are always kept in order. 449 * on the retransmit queue are always kept in order.
451 */ 450 */
@@ -652,7 +651,6 @@ redo:
652 if (chunk->fast_retransmit == SCTP_NEED_FRTX) 651 if (chunk->fast_retransmit == SCTP_NEED_FRTX)
653 chunk->fast_retransmit = SCTP_DONT_FRTX; 652 chunk->fast_retransmit = SCTP_DONT_FRTX;
654 653
655 q->empty = 0;
656 q->asoc->stats.rtxchunks++; 654 q->asoc->stats.rtxchunks++;
657 break; 655 break;
658 } 656 }
@@ -1063,8 +1061,6 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
1063 1061
1064 sctp_transport_reset_timers(transport); 1062 sctp_transport_reset_timers(transport);
1065 1063
1066 q->empty = 0;
1067
1068 /* Only let one DATA chunk get bundled with a 1064 /* Only let one DATA chunk get bundled with a
1069 * COOKIE-ECHO chunk. 1065 * COOKIE-ECHO chunk.
1070 */ 1066 */
@@ -1273,29 +1269,17 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
1273 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn, 1269 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
1274 asoc->adv_peer_ack_point); 1270 asoc->adv_peer_ack_point);
1275 1271
1276 /* See if all chunks are acked. 1272 return sctp_outq_is_empty(q);
1277 * Make sure the empty queue handler will get run later.
1278 */
1279 q->empty = (list_empty(&q->out_chunk_list) &&
1280 list_empty(&q->retransmit));
1281 if (!q->empty)
1282 goto finish;
1283
1284 list_for_each_entry(transport, transport_list, transports) {
1285 q->empty = q->empty && list_empty(&transport->transmitted);
1286 if (!q->empty)
1287 goto finish;
1288 }
1289
1290 pr_debug("%s: sack queue is empty\n", __func__);
1291finish:
1292 return q->empty;
1293} 1273}
1294 1274
1295/* Is the outqueue empty? */ 1275/* Is the outqueue empty?
1276 * The queue is empty when we have not pending data, no in-flight data
1277 * and nothing pending retransmissions.
1278 */
1296int sctp_outq_is_empty(const struct sctp_outq *q) 1279int sctp_outq_is_empty(const struct sctp_outq *q)
1297{ 1280{
1298 return q->empty; 1281 return q->out_qlen == 0 && q->outstanding_bytes == 0 &&
1282 list_empty(&q->retransmit);
1299} 1283}
1300 1284
1301/******************************************************************** 1285/********************************************************************
@@ -1375,6 +1359,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1375 * instance). 1359 * instance).
1376 */ 1360 */
1377 if (!tchunk->tsn_gap_acked && 1361 if (!tchunk->tsn_gap_acked &&
1362 !tchunk->resent &&
1378 tchunk->rtt_in_progress) { 1363 tchunk->rtt_in_progress) {
1379 tchunk->rtt_in_progress = 0; 1364 tchunk->rtt_in_progress = 0;
1380 rtt = jiffies - tchunk->sent_at; 1365 rtt = jiffies - tchunk->sent_at;
@@ -1391,7 +1376,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1391 */ 1376 */
1392 if (!tchunk->tsn_gap_acked) { 1377 if (!tchunk->tsn_gap_acked) {
1393 tchunk->tsn_gap_acked = 1; 1378 tchunk->tsn_gap_acked = 1;
1394 *highest_new_tsn_in_sack = tsn; 1379 if (TSN_lt(*highest_new_tsn_in_sack, tsn))
1380 *highest_new_tsn_in_sack = tsn;
1395 bytes_acked += sctp_data_size(tchunk); 1381 bytes_acked += sctp_data_size(tchunk);
1396 if (!tchunk->transport) 1382 if (!tchunk->transport)
1397 migrate_bytes += sctp_data_size(tchunk); 1383 migrate_bytes += sctp_data_size(tchunk);
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index 53c452efb40b..5e68b94ee640 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -38,6 +38,7 @@
38#include <net/sctp/sctp.h> 38#include <net/sctp/sctp.h>
39#include <net/sctp/sm.h> 39#include <net/sctp/sm.h>
40 40
41MODULE_SOFTDEP("pre: sctp");
41MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>"); 42MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>");
42MODULE_DESCRIPTION("SCTP snooper"); 43MODULE_DESCRIPTION("SCTP snooper");
43MODULE_LICENSE("GPL"); 44MODULE_LICENSE("GPL");
@@ -182,6 +183,20 @@ static struct jprobe sctp_recv_probe = {
182 .entry = jsctp_sf_eat_sack, 183 .entry = jsctp_sf_eat_sack,
183}; 184};
184 185
186static __init int sctp_setup_jprobe(void)
187{
188 int ret = register_jprobe(&sctp_recv_probe);
189
190 if (ret) {
191 if (request_module("sctp"))
192 goto out;
193 ret = register_jprobe(&sctp_recv_probe);
194 }
195
196out:
197 return ret;
198}
199
185static __init int sctpprobe_init(void) 200static __init int sctpprobe_init(void)
186{ 201{
187 int ret = -ENOMEM; 202 int ret = -ENOMEM;
@@ -202,7 +217,7 @@ static __init int sctpprobe_init(void)
202 &sctpprobe_fops)) 217 &sctpprobe_fops))
203 goto free_kfifo; 218 goto free_kfifo;
204 219
205 ret = register_jprobe(&sctp_recv_probe); 220 ret = sctp_setup_jprobe();
206 if (ret) 221 if (ret)
207 goto remove_proc; 222 goto remove_proc;
208 223
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index dfe3f36ff2aa..a26065be7289 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -820,7 +820,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
820 SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS); 820 SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS);
821 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); 821 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
822 822
823 if (new_asoc->autoclose) 823 if (new_asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
824 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 824 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
825 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 825 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
826 826
@@ -908,7 +908,7 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(struct net *net,
908 SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); 908 SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
909 SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS); 909 SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS);
910 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); 910 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
911 if (asoc->autoclose) 911 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
912 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 912 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
913 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 913 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
914 914
@@ -2970,7 +2970,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net,
2970 if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM) 2970 if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM)
2971 force = SCTP_FORCE(); 2971 force = SCTP_FORCE();
2972 2972
2973 if (asoc->autoclose) { 2973 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
2974 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 2974 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
2975 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 2975 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
2976 } 2976 }
@@ -3878,7 +3878,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
3878 SCTP_CHUNK(chunk)); 3878 SCTP_CHUNK(chunk));
3879 3879
3880 /* Count this as receiving DATA. */ 3880 /* Count this as receiving DATA. */
3881 if (asoc->autoclose) { 3881 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
3882 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 3882 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
3883 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 3883 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
3884 } 3884 }
@@ -5267,7 +5267,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
5267 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 5267 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
5268 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); 5268 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
5269 5269
5270 if (asoc->autoclose) 5270 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
5271 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 5271 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
5272 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 5272 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
5273 5273
@@ -5346,7 +5346,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(
5346 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 5346 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
5347 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); 5347 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
5348 5348
5349 if (asoc->autoclose) 5349 if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
5350 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 5350 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
5351 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 5351 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
5352 5352
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 72046b9729a8..42b709c95cf3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2196,6 +2196,7 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
2196 unsigned int optlen) 2196 unsigned int optlen)
2197{ 2197{
2198 struct sctp_sock *sp = sctp_sk(sk); 2198 struct sctp_sock *sp = sctp_sk(sk);
2199 struct net *net = sock_net(sk);
2199 2200
2200 /* Applicable to UDP-style socket only */ 2201 /* Applicable to UDP-style socket only */
2201 if (sctp_style(sk, TCP)) 2202 if (sctp_style(sk, TCP))
@@ -2205,6 +2206,9 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
2205 if (copy_from_user(&sp->autoclose, optval, optlen)) 2206 if (copy_from_user(&sp->autoclose, optval, optlen))
2206 return -EFAULT; 2207 return -EFAULT;
2207 2208
2209 if (sp->autoclose > net->sctp.max_autoclose)
2210 sp->autoclose = net->sctp.max_autoclose;
2211
2208 return 0; 2212 return 0;
2209} 2213}
2210 2214
@@ -2811,6 +2815,8 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne
2811{ 2815{
2812 struct sctp_rtoinfo rtoinfo; 2816 struct sctp_rtoinfo rtoinfo;
2813 struct sctp_association *asoc; 2817 struct sctp_association *asoc;
2818 unsigned long rto_min, rto_max;
2819 struct sctp_sock *sp = sctp_sk(sk);
2814 2820
2815 if (optlen != sizeof (struct sctp_rtoinfo)) 2821 if (optlen != sizeof (struct sctp_rtoinfo))
2816 return -EINVAL; 2822 return -EINVAL;
@@ -2824,26 +2830,36 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne
2824 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 2830 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
2825 return -EINVAL; 2831 return -EINVAL;
2826 2832
2833 rto_max = rtoinfo.srto_max;
2834 rto_min = rtoinfo.srto_min;
2835
2836 if (rto_max)
2837 rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max;
2838 else
2839 rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max;
2840
2841 if (rto_min)
2842 rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min;
2843 else
2844 rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min;
2845
2846 if (rto_min > rto_max)
2847 return -EINVAL;
2848
2827 if (asoc) { 2849 if (asoc) {
2828 if (rtoinfo.srto_initial != 0) 2850 if (rtoinfo.srto_initial != 0)
2829 asoc->rto_initial = 2851 asoc->rto_initial =
2830 msecs_to_jiffies(rtoinfo.srto_initial); 2852 msecs_to_jiffies(rtoinfo.srto_initial);
2831 if (rtoinfo.srto_max != 0) 2853 asoc->rto_max = rto_max;
2832 asoc->rto_max = msecs_to_jiffies(rtoinfo.srto_max); 2854 asoc->rto_min = rto_min;
2833 if (rtoinfo.srto_min != 0)
2834 asoc->rto_min = msecs_to_jiffies(rtoinfo.srto_min);
2835 } else { 2855 } else {
2836 /* If there is no association or the association-id = 0 2856 /* If there is no association or the association-id = 0
2837 * set the values to the endpoint. 2857 * set the values to the endpoint.
2838 */ 2858 */
2839 struct sctp_sock *sp = sctp_sk(sk);
2840
2841 if (rtoinfo.srto_initial != 0) 2859 if (rtoinfo.srto_initial != 0)
2842 sp->rtoinfo.srto_initial = rtoinfo.srto_initial; 2860 sp->rtoinfo.srto_initial = rtoinfo.srto_initial;
2843 if (rtoinfo.srto_max != 0) 2861 sp->rtoinfo.srto_max = rto_max;
2844 sp->rtoinfo.srto_max = rtoinfo.srto_max; 2862 sp->rtoinfo.srto_min = rto_min;
2845 if (rtoinfo.srto_min != 0)
2846 sp->rtoinfo.srto_min = rtoinfo.srto_min;
2847 } 2863 }
2848 2864
2849 return 0; 2865 return 0;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 6b36561a1b3b..b0565afb61c7 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -56,11 +56,16 @@ extern long sysctl_sctp_mem[3];
56extern int sysctl_sctp_rmem[3]; 56extern int sysctl_sctp_rmem[3];
57extern int sysctl_sctp_wmem[3]; 57extern int sysctl_sctp_wmem[3];
58 58
59static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, 59static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
60 int write, 60 void __user *buffer, size_t *lenp,
61 loff_t *ppos);
62static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
63 void __user *buffer, size_t *lenp,
64 loff_t *ppos);
65static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
61 void __user *buffer, size_t *lenp, 66 void __user *buffer, size_t *lenp,
62
63 loff_t *ppos); 67 loff_t *ppos);
68
64static struct ctl_table sctp_table[] = { 69static struct ctl_table sctp_table[] = {
65 { 70 {
66 .procname = "sctp_mem", 71 .procname = "sctp_mem",
@@ -102,17 +107,17 @@ static struct ctl_table sctp_net_table[] = {
102 .data = &init_net.sctp.rto_min, 107 .data = &init_net.sctp.rto_min,
103 .maxlen = sizeof(unsigned int), 108 .maxlen = sizeof(unsigned int),
104 .mode = 0644, 109 .mode = 0644,
105 .proc_handler = proc_dointvec_minmax, 110 .proc_handler = proc_sctp_do_rto_min,
106 .extra1 = &one, 111 .extra1 = &one,
107 .extra2 = &timer_max 112 .extra2 = &init_net.sctp.rto_max
108 }, 113 },
109 { 114 {
110 .procname = "rto_max", 115 .procname = "rto_max",
111 .data = &init_net.sctp.rto_max, 116 .data = &init_net.sctp.rto_max,
112 .maxlen = sizeof(unsigned int), 117 .maxlen = sizeof(unsigned int),
113 .mode = 0644, 118 .mode = 0644,
114 .proc_handler = proc_dointvec_minmax, 119 .proc_handler = proc_sctp_do_rto_max,
115 .extra1 = &one, 120 .extra1 = &init_net.sctp.rto_min,
116 .extra2 = &timer_max 121 .extra2 = &timer_max
117 }, 122 },
118 { 123 {
@@ -294,8 +299,7 @@ static struct ctl_table sctp_net_table[] = {
294 { /* sentinel */ } 299 { /* sentinel */ }
295}; 300};
296 301
297static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, 302static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
298 int write,
299 void __user *buffer, size_t *lenp, 303 void __user *buffer, size_t *lenp,
300 loff_t *ppos) 304 loff_t *ppos)
301{ 305{
@@ -342,6 +346,60 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl,
342 return ret; 346 return ret;
343} 347}
344 348
349static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
350 void __user *buffer, size_t *lenp,
351 loff_t *ppos)
352{
353 struct net *net = current->nsproxy->net_ns;
354 int new_value;
355 struct ctl_table tbl;
356 unsigned int min = *(unsigned int *) ctl->extra1;
357 unsigned int max = *(unsigned int *) ctl->extra2;
358 int ret;
359
360 memset(&tbl, 0, sizeof(struct ctl_table));
361 tbl.maxlen = sizeof(unsigned int);
362
363 if (write)
364 tbl.data = &new_value;
365 else
366 tbl.data = &net->sctp.rto_min;
367 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
368 if (write) {
369 if (ret || new_value > max || new_value < min)
370 return -EINVAL;
371 net->sctp.rto_min = new_value;
372 }
373 return ret;
374}
375
376static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
377 void __user *buffer, size_t *lenp,
378 loff_t *ppos)
379{
380 struct net *net = current->nsproxy->net_ns;
381 int new_value;
382 struct ctl_table tbl;
383 unsigned int min = *(unsigned int *) ctl->extra1;
384 unsigned int max = *(unsigned int *) ctl->extra2;
385 int ret;
386
387 memset(&tbl, 0, sizeof(struct ctl_table));
388 tbl.maxlen = sizeof(unsigned int);
389
390 if (write)
391 tbl.data = &new_value;
392 else
393 tbl.data = &net->sctp.rto_max;
394 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
395 if (write) {
396 if (ret || new_value > max || new_value < min)
397 return -EINVAL;
398 net->sctp.rto_max = new_value;
399 }
400 return ret;
401}
402
345int sctp_sysctl_net_register(struct net *net) 403int sctp_sysctl_net_register(struct net *net)
346{ 404{
347 struct ctl_table *table; 405 struct ctl_table *table;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index e332efb124cc..efc46ffed1fd 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -573,7 +573,7 @@ void sctp_transport_burst_limited(struct sctp_transport *t)
573 u32 old_cwnd = t->cwnd; 573 u32 old_cwnd = t->cwnd;
574 u32 max_burst_bytes; 574 u32 max_burst_bytes;
575 575
576 if (t->burst_limited) 576 if (t->burst_limited || asoc->max_burst == 0)
577 return; 577 return;
578 578
579 max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu); 579 max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
diff --git a/net/socket.c b/net/socket.c
index 0b18693f2be6..e83c416708af 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1973,7 +1973,7 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
1973 if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) 1973 if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
1974 return -EFAULT; 1974 return -EFAULT;
1975 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) 1975 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
1976 return -EINVAL; 1976 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
1977 return 0; 1977 return 0;
1978} 1978}
1979 1979
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 97912b40c254..42fdfc634e56 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1517,7 +1517,7 @@ out:
1517static int 1517static int
1518gss_refresh_null(struct rpc_task *task) 1518gss_refresh_null(struct rpc_task *task)
1519{ 1519{
1520 return -EACCES; 1520 return 0;
1521} 1521}
1522 1522
1523static __be32 * 1523static __be32 *
diff --git a/net/tipc/core.c b/net/tipc/core.c
index fd4eeeaa972a..c6d3f75a9e1b 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -113,7 +113,6 @@ err:
113static void tipc_core_stop(void) 113static void tipc_core_stop(void)
114{ 114{
115 tipc_netlink_stop(); 115 tipc_netlink_stop();
116 tipc_handler_stop();
117 tipc_cfg_stop(); 116 tipc_cfg_stop();
118 tipc_subscr_stop(); 117 tipc_subscr_stop();
119 tipc_nametbl_stop(); 118 tipc_nametbl_stop();
@@ -146,9 +145,10 @@ static int tipc_core_start(void)
146 res = tipc_subscr_start(); 145 res = tipc_subscr_start();
147 if (!res) 146 if (!res)
148 res = tipc_cfg_init(); 147 res = tipc_cfg_init();
149 if (res) 148 if (res) {
149 tipc_handler_stop();
150 tipc_core_stop(); 150 tipc_core_stop();
151 151 }
152 return res; 152 return res;
153} 153}
154 154
@@ -178,6 +178,7 @@ static int __init tipc_init(void)
178 178
179static void __exit tipc_exit(void) 179static void __exit tipc_exit(void)
180{ 180{
181 tipc_handler_stop();
181 tipc_core_stop_net(); 182 tipc_core_stop_net();
182 tipc_core_stop(); 183 tipc_core_stop();
183 pr_info("Deactivated\n"); 184 pr_info("Deactivated\n");
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index b36f0fcd9bdf..e4bc8a296744 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -56,12 +56,13 @@ unsigned int tipc_k_signal(Handler routine, unsigned long argument)
56{ 56{
57 struct queue_item *item; 57 struct queue_item *item;
58 58
59 spin_lock_bh(&qitem_lock);
59 if (!handler_enabled) { 60 if (!handler_enabled) {
60 pr_err("Signal request ignored by handler\n"); 61 pr_err("Signal request ignored by handler\n");
62 spin_unlock_bh(&qitem_lock);
61 return -ENOPROTOOPT; 63 return -ENOPROTOOPT;
62 } 64 }
63 65
64 spin_lock_bh(&qitem_lock);
65 item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC); 66 item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
66 if (!item) { 67 if (!item) {
67 pr_err("Signal queue out of memory\n"); 68 pr_err("Signal queue out of memory\n");
@@ -112,10 +113,14 @@ void tipc_handler_stop(void)
112 struct list_head *l, *n; 113 struct list_head *l, *n;
113 struct queue_item *item; 114 struct queue_item *item;
114 115
115 if (!handler_enabled) 116 spin_lock_bh(&qitem_lock);
117 if (!handler_enabled) {
118 spin_unlock_bh(&qitem_lock);
116 return; 119 return;
117 120 }
118 handler_enabled = 0; 121 handler_enabled = 0;
122 spin_unlock_bh(&qitem_lock);
123
119 tasklet_kill(&tipc_tasklet); 124 tasklet_kill(&tipc_tasklet);
120 125
121 spin_lock_bh(&qitem_lock); 126 spin_lock_bh(&qitem_lock);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 69cd9bf3f561..13b987745820 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1498,6 +1498,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1498 int type; 1498 int type;
1499 1499
1500 head = head->next; 1500 head = head->next;
1501 buf->next = NULL;
1501 1502
1502 /* Ensure bearer is still enabled */ 1503 /* Ensure bearer is still enabled */
1503 if (unlikely(!b_ptr->active)) 1504 if (unlikely(!b_ptr->active))
diff --git a/net/tipc/port.c b/net/tipc/port.c
index c081a7632302..d43f3182b1d4 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -251,18 +251,15 @@ struct tipc_port *tipc_createport(struct sock *sk,
251 return p_ptr; 251 return p_ptr;
252} 252}
253 253
254int tipc_deleteport(u32 ref) 254int tipc_deleteport(struct tipc_port *p_ptr)
255{ 255{
256 struct tipc_port *p_ptr;
257 struct sk_buff *buf = NULL; 256 struct sk_buff *buf = NULL;
258 257
259 tipc_withdraw(ref, 0, NULL); 258 tipc_withdraw(p_ptr, 0, NULL);
260 p_ptr = tipc_port_lock(ref);
261 if (!p_ptr)
262 return -EINVAL;
263 259
264 tipc_ref_discard(ref); 260 spin_lock_bh(p_ptr->lock);
265 tipc_port_unlock(p_ptr); 261 tipc_ref_discard(p_ptr->ref);
262 spin_unlock_bh(p_ptr->lock);
266 263
267 k_cancel_timer(&p_ptr->timer); 264 k_cancel_timer(&p_ptr->timer);
268 if (p_ptr->connected) { 265 if (p_ptr->connected) {
@@ -704,47 +701,36 @@ int tipc_set_portimportance(u32 ref, unsigned int imp)
704} 701}
705 702
706 703
707int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) 704int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
705 struct tipc_name_seq const *seq)
708{ 706{
709 struct tipc_port *p_ptr;
710 struct publication *publ; 707 struct publication *publ;
711 u32 key; 708 u32 key;
712 int res = -EINVAL;
713 709
714 p_ptr = tipc_port_lock(ref); 710 if (p_ptr->connected)
715 if (!p_ptr)
716 return -EINVAL; 711 return -EINVAL;
712 key = p_ptr->ref + p_ptr->pub_count + 1;
713 if (key == p_ptr->ref)
714 return -EADDRINUSE;
717 715
718 if (p_ptr->connected)
719 goto exit;
720 key = ref + p_ptr->pub_count + 1;
721 if (key == ref) {
722 res = -EADDRINUSE;
723 goto exit;
724 }
725 publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper, 716 publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
726 scope, p_ptr->ref, key); 717 scope, p_ptr->ref, key);
727 if (publ) { 718 if (publ) {
728 list_add(&publ->pport_list, &p_ptr->publications); 719 list_add(&publ->pport_list, &p_ptr->publications);
729 p_ptr->pub_count++; 720 p_ptr->pub_count++;
730 p_ptr->published = 1; 721 p_ptr->published = 1;
731 res = 0; 722 return 0;
732 } 723 }
733exit: 724 return -EINVAL;
734 tipc_port_unlock(p_ptr);
735 return res;
736} 725}
737 726
738int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) 727int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
728 struct tipc_name_seq const *seq)
739{ 729{
740 struct tipc_port *p_ptr;
741 struct publication *publ; 730 struct publication *publ;
742 struct publication *tpubl; 731 struct publication *tpubl;
743 int res = -EINVAL; 732 int res = -EINVAL;
744 733
745 p_ptr = tipc_port_lock(ref);
746 if (!p_ptr)
747 return -EINVAL;
748 if (!seq) { 734 if (!seq) {
749 list_for_each_entry_safe(publ, tpubl, 735 list_for_each_entry_safe(publ, tpubl,
750 &p_ptr->publications, pport_list) { 736 &p_ptr->publications, pport_list) {
@@ -771,7 +757,6 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
771 } 757 }
772 if (list_empty(&p_ptr->publications)) 758 if (list_empty(&p_ptr->publications))
773 p_ptr->published = 0; 759 p_ptr->published = 0;
774 tipc_port_unlock(p_ptr);
775 return res; 760 return res;
776} 761}
777 762
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 912253597343..34f12bd4074e 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -116,7 +116,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err);
116 116
117void tipc_acknowledge(u32 port_ref, u32 ack); 117void tipc_acknowledge(u32 port_ref, u32 ack);
118 118
119int tipc_deleteport(u32 portref); 119int tipc_deleteport(struct tipc_port *p_ptr);
120 120
121int tipc_portimportance(u32 portref, unsigned int *importance); 121int tipc_portimportance(u32 portref, unsigned int *importance);
122int tipc_set_portimportance(u32 portref, unsigned int importance); 122int tipc_set_portimportance(u32 portref, unsigned int importance);
@@ -127,9 +127,9 @@ int tipc_set_portunreliable(u32 portref, unsigned int isunreliable);
127int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable); 127int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
128int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable); 128int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
129 129
130int tipc_publish(u32 portref, unsigned int scope, 130int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
131 struct tipc_name_seq const *name_seq); 131 struct tipc_name_seq const *name_seq);
132int tipc_withdraw(u32 portref, unsigned int scope, 132int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
133 struct tipc_name_seq const *name_seq); 133 struct tipc_name_seq const *name_seq);
134 134
135int tipc_connect(u32 portref, struct tipc_portid const *port); 135int tipc_connect(u32 portref, struct tipc_portid const *port);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3b61851bb927..e741416d1d24 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -354,7 +354,7 @@ static int release(struct socket *sock)
354 * Delete TIPC port; this ensures no more messages are queued 354 * Delete TIPC port; this ensures no more messages are queued
355 * (also disconnects an active connection & sends a 'FIN-' to peer) 355 * (also disconnects an active connection & sends a 'FIN-' to peer)
356 */ 356 */
357 res = tipc_deleteport(tport->ref); 357 res = tipc_deleteport(tport);
358 358
359 /* Discard any remaining (connection-based) messages in receive queue */ 359 /* Discard any remaining (connection-based) messages in receive queue */
360 __skb_queue_purge(&sk->sk_receive_queue); 360 __skb_queue_purge(&sk->sk_receive_queue);
@@ -386,30 +386,46 @@ static int release(struct socket *sock)
386 */ 386 */
387static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) 387static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
388{ 388{
389 struct sock *sk = sock->sk;
389 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 390 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
390 u32 portref = tipc_sk_port(sock->sk)->ref; 391 struct tipc_port *tport = tipc_sk_port(sock->sk);
392 int res = -EINVAL;
391 393
392 if (unlikely(!uaddr_len)) 394 lock_sock(sk);
393 return tipc_withdraw(portref, 0, NULL); 395 if (unlikely(!uaddr_len)) {
396 res = tipc_withdraw(tport, 0, NULL);
397 goto exit;
398 }
394 399
395 if (uaddr_len < sizeof(struct sockaddr_tipc)) 400 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
396 return -EINVAL; 401 res = -EINVAL;
397 if (addr->family != AF_TIPC) 402 goto exit;
398 return -EAFNOSUPPORT; 403 }
404 if (addr->family != AF_TIPC) {
405 res = -EAFNOSUPPORT;
406 goto exit;
407 }
399 408
400 if (addr->addrtype == TIPC_ADDR_NAME) 409 if (addr->addrtype == TIPC_ADDR_NAME)
401 addr->addr.nameseq.upper = addr->addr.nameseq.lower; 410 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
402 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) 411 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
403 return -EAFNOSUPPORT; 412 res = -EAFNOSUPPORT;
413 goto exit;
414 }
404 415
405 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) && 416 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
406 (addr->addr.nameseq.type != TIPC_TOP_SRV) && 417 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
407 (addr->addr.nameseq.type != TIPC_CFG_SRV)) 418 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
408 return -EACCES; 419 res = -EACCES;
420 goto exit;
421 }
409 422
410 return (addr->scope > 0) ? 423 res = (addr->scope > 0) ?
411 tipc_publish(portref, addr->scope, &addr->addr.nameseq) : 424 tipc_publish(tport, addr->scope, &addr->addr.nameseq) :
412 tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq); 425 tipc_withdraw(tport, -addr->scope, &addr->addr.nameseq);
426exit:
427 release_sock(sk);
428 return res;
413} 429}
414 430
415/** 431/**
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 01625ccc3ae6..a427623ee574 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -530,13 +530,17 @@ static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
530static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *, 530static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
531 struct msghdr *, size_t, int); 531 struct msghdr *, size_t, int);
532 532
533static void unix_set_peek_off(struct sock *sk, int val) 533static int unix_set_peek_off(struct sock *sk, int val)
534{ 534{
535 struct unix_sock *u = unix_sk(sk); 535 struct unix_sock *u = unix_sk(sk);
536 536
537 mutex_lock(&u->readlock); 537 if (mutex_lock_interruptible(&u->readlock))
538 return -EINTR;
539
538 sk->sk_peek_off = val; 540 sk->sk_peek_off = val;
539 mutex_unlock(&u->readlock); 541 mutex_unlock(&u->readlock);
542
543 return 0;
540} 544}
541 545
542 546
@@ -714,7 +718,9 @@ static int unix_autobind(struct socket *sock)
714 int err; 718 int err;
715 unsigned int retries = 0; 719 unsigned int retries = 0;
716 720
717 mutex_lock(&u->readlock); 721 err = mutex_lock_interruptible(&u->readlock);
722 if (err)
723 return err;
718 724
719 err = 0; 725 err = 0;
720 if (u->addr) 726 if (u->addr)
@@ -873,7 +879,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
873 goto out; 879 goto out;
874 addr_len = err; 880 addr_len = err;
875 881
876 mutex_lock(&u->readlock); 882 err = mutex_lock_interruptible(&u->readlock);
883 if (err)
884 goto out;
877 885
878 err = -EINVAL; 886 err = -EINVAL;
879 if (u->addr) 887 if (u->addr)
diff --git a/net/wireless/core.c b/net/wireless/core.c
index aff959e5a1b3..52b865fb7351 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -451,6 +451,15 @@ int wiphy_register(struct wiphy *wiphy)
451 int i; 451 int i;
452 u16 ifmodes = wiphy->interface_modes; 452 u16 ifmodes = wiphy->interface_modes;
453 453
454 /* support for 5/10 MHz is broken due to nl80211 API mess - disable */
455 wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_5_10_MHZ;
456
457 /*
458 * There are major locking problems in nl80211/mac80211 for CSA,
459 * disable for all drivers until this has been reworked.
460 */
461 wiphy->flags &= ~WIPHY_FLAG_HAS_CHANNEL_SWITCH;
462
454#ifdef CONFIG_PM 463#ifdef CONFIG_PM
455 if (WARN_ON(wiphy->wowlan && 464 if (WARN_ON(wiphy->wowlan &&
456 (wiphy->wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && 465 (wiphy->wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 9d797df56649..89737ee2669a 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -262,7 +262,7 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
262 262
263 /* try to find an IBSS channel if none requested ... */ 263 /* try to find an IBSS channel if none requested ... */
264 if (!wdev->wext.ibss.chandef.chan) { 264 if (!wdev->wext.ibss.chandef.chan) {
265 wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT; 265 struct ieee80211_channel *new_chan = NULL;
266 266
267 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 267 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
268 struct ieee80211_supported_band *sband; 268 struct ieee80211_supported_band *sband;
@@ -278,18 +278,19 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
278 continue; 278 continue;
279 if (chan->flags & IEEE80211_CHAN_DISABLED) 279 if (chan->flags & IEEE80211_CHAN_DISABLED)
280 continue; 280 continue;
281 wdev->wext.ibss.chandef.chan = chan; 281 new_chan = chan;
282 wdev->wext.ibss.chandef.center_freq1 =
283 chan->center_freq;
284 break; 282 break;
285 } 283 }
286 284
287 if (wdev->wext.ibss.chandef.chan) 285 if (new_chan)
288 break; 286 break;
289 } 287 }
290 288
291 if (!wdev->wext.ibss.chandef.chan) 289 if (!new_chan)
292 return -EINVAL; 290 return -EINVAL;
291
292 cfg80211_chandef_create(&wdev->wext.ibss.chandef, new_chan,
293 NL80211_CHAN_NO_HT);
293 } 294 }
294 295
295 /* don't join -- SSID is not there */ 296 /* don't join -- SSID is not there */
@@ -363,9 +364,8 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
363 return err; 364 return err;
364 365
365 if (chan) { 366 if (chan) {
366 wdev->wext.ibss.chandef.chan = chan; 367 cfg80211_chandef_create(&wdev->wext.ibss.chandef, chan,
367 wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT; 368 NL80211_CHAN_NO_HT);
368 wdev->wext.ibss.chandef.center_freq1 = freq;
369 wdev->wext.ibss.channel_fixed = true; 369 wdev->wext.ibss.channel_fixed = true;
370 } else { 370 } else {
371 /* cfg80211_ibss_wext_join will pick one if needed */ 371 /* cfg80211_ibss_wext_join will pick one if needed */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a1eb21073176..138dc3bb8b67 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2687,7 +2687,7 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
2687 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 2687 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
2688 NL80211_CMD_NEW_KEY); 2688 NL80211_CMD_NEW_KEY);
2689 if (!hdr) 2689 if (!hdr)
2690 return -ENOBUFS; 2690 goto nla_put_failure;
2691 2691
2692 cookie.msg = msg; 2692 cookie.msg = msg;
2693 cookie.idx = key_idx; 2693 cookie.idx = key_idx;
@@ -5349,6 +5349,10 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
5349 err = -EINVAL; 5349 err = -EINVAL;
5350 goto out_free; 5350 goto out_free;
5351 } 5351 }
5352
5353 if (!wiphy->bands[band])
5354 continue;
5355
5352 err = ieee80211_get_ratemask(wiphy->bands[band], 5356 err = ieee80211_get_ratemask(wiphy->bands[band],
5353 nla_data(attr), 5357 nla_data(attr),
5354 nla_len(attr), 5358 nla_len(attr),
@@ -9633,8 +9637,9 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
9633 nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie)) 9637 nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie))
9634 goto nla_put_failure; 9638 goto nla_put_failure;
9635 9639
9636 if (req->flags) 9640 if (req->flags &&
9637 nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags); 9641 nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags))
9642 goto nla_put_failure;
9638 9643
9639 return 0; 9644 return 0;
9640 nla_put_failure: 9645 nla_put_failure:
@@ -11093,6 +11098,8 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
11093 struct nlattr *reasons; 11098 struct nlattr *reasons;
11094 11099
11095 reasons = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS); 11100 reasons = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS);
11101 if (!reasons)
11102 goto free_msg;
11096 11103
11097 if (wakeup->disconnect && 11104 if (wakeup->disconnect &&
11098 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) 11105 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT))
@@ -11118,16 +11125,18 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
11118 wakeup->pattern_idx)) 11125 wakeup->pattern_idx))
11119 goto free_msg; 11126 goto free_msg;
11120 11127
11121 if (wakeup->tcp_match) 11128 if (wakeup->tcp_match &&
11122 nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH); 11129 nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH))
11130 goto free_msg;
11123 11131
11124 if (wakeup->tcp_connlost) 11132 if (wakeup->tcp_connlost &&
11125 nla_put_flag(msg, 11133 nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST))
11126 NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST); 11134 goto free_msg;
11127 11135
11128 if (wakeup->tcp_nomoretokens) 11136 if (wakeup->tcp_nomoretokens &&
11129 nla_put_flag(msg, 11137 nla_put_flag(msg,
11130 NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS); 11138 NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS))
11139 goto free_msg;
11131 11140
11132 if (wakeup->packet) { 11141 if (wakeup->packet) {
11133 u32 pkt_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211; 11142 u32 pkt_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211;
@@ -11263,24 +11272,29 @@ void cfg80211_ft_event(struct net_device *netdev,
11263 return; 11272 return;
11264 11273
11265 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FT_EVENT); 11274 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FT_EVENT);
11266 if (!hdr) { 11275 if (!hdr)
11267 nlmsg_free(msg); 11276 goto out;
11268 return;
11269 }
11270 11277
11271 nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 11278 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
11272 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 11279 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
11273 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, ft_event->target_ap); 11280 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, ft_event->target_ap))
11274 if (ft_event->ies) 11281 goto out;
11275 nla_put(msg, NL80211_ATTR_IE, ft_event->ies_len, ft_event->ies); 11282
11276 if (ft_event->ric_ies) 11283 if (ft_event->ies &&
11277 nla_put(msg, NL80211_ATTR_IE_RIC, ft_event->ric_ies_len, 11284 nla_put(msg, NL80211_ATTR_IE, ft_event->ies_len, ft_event->ies))
11278 ft_event->ric_ies); 11285 goto out;
11286 if (ft_event->ric_ies &&
11287 nla_put(msg, NL80211_ATTR_IE_RIC, ft_event->ric_ies_len,
11288 ft_event->ric_ies))
11289 goto out;
11279 11290
11280 genlmsg_end(msg, hdr); 11291 genlmsg_end(msg, hdr);
11281 11292
11282 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, 11293 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
11283 NL80211_MCGRP_MLME, GFP_KERNEL); 11294 NL80211_MCGRP_MLME, GFP_KERNEL);
11295 return;
11296 out:
11297 nlmsg_free(msg);
11284} 11298}
11285EXPORT_SYMBOL(cfg80211_ft_event); 11299EXPORT_SYMBOL(cfg80211_ft_event);
11286 11300
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index a271c27fac77..722da616438c 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -124,6 +124,10 @@ int ieee80211_radiotap_iterator_init(
124 /* find payload start allowing for extended bitmap(s) */ 124 /* find payload start allowing for extended bitmap(s) */
125 125
126 if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) { 126 if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) {
127 if ((unsigned long)iterator->_arg -
128 (unsigned long)iterator->_rtheader + sizeof(uint32_t) >
129 (unsigned long)iterator->_max_length)
130 return -EINVAL;
127 while (get_unaligned_le32(iterator->_arg) & 131 while (get_unaligned_le32(iterator->_arg) &
128 (1 << IEEE80211_RADIOTAP_EXT)) { 132 (1 << IEEE80211_RADIOTAP_EXT)) {
129 iterator->_arg += sizeof(uint32_t); 133 iterator->_arg += sizeof(uint32_t);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 65f800890d70..d3c5bd7c6b51 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -632,6 +632,16 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
632 } 632 }
633#endif 633#endif
634 634
635 if (!bss && (status == WLAN_STATUS_SUCCESS)) {
636 WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect);
637 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
638 wdev->ssid, wdev->ssid_len,
639 WLAN_CAPABILITY_ESS,
640 WLAN_CAPABILITY_ESS);
641 if (bss)
642 cfg80211_hold_bss(bss_from_pub(bss));
643 }
644
635 if (wdev->current_bss) { 645 if (wdev->current_bss) {
636 cfg80211_unhold_bss(wdev->current_bss); 646 cfg80211_unhold_bss(wdev->current_bss);
637 cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); 647 cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
@@ -649,16 +659,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
649 return; 659 return;
650 } 660 }
651 661
652 if (!bss) { 662 if (WARN_ON(!bss))
653 WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect); 663 return;
654 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
655 wdev->ssid, wdev->ssid_len,
656 WLAN_CAPABILITY_ESS,
657 WLAN_CAPABILITY_ESS);
658 if (WARN_ON(!bss))
659 return;
660 cfg80211_hold_bss(bss_from_pub(bss));
661 }
662 664
663 wdev->current_bss = bss_from_pub(bss); 665 wdev->current_bss = bss_from_pub(bss);
664 666
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 32b10f53d0b4..2dcb37736d84 100644
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -82,7 +82,9 @@ kallsyms()
82 kallsymopt="${kallsymopt} --all-symbols" 82 kallsymopt="${kallsymopt} --all-symbols"
83 fi 83 fi
84 84
85 kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET" 85 if [ -n "${CONFIG_ARM}" ] && [ -n "${CONFIG_PAGE_OFFSET}" ]; then
86 kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
87 fi
86 88
87 local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \ 89 local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
88 ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}" 90 ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index d0da66396f62..91280b82da08 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -364,7 +364,8 @@ if ($arch eq "x86_64") {
364} elsif ($arch eq "blackfin") { 364} elsif ($arch eq "blackfin") {
365 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s__mcount\$"; 365 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s__mcount\$";
366 $mcount_adjust = -4; 366 $mcount_adjust = -4;
367} elsif ($arch eq "tilegx") { 367} elsif ($arch eq "tilegx" || $arch eq "tile") {
368 # Default to the newer TILE-Gx architecture if only "tile" is given.
368 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s__mcount\$"; 369 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s__mcount\$";
369 $type = ".quad"; 370 $type = ".quad";
370 $alignment = 8; 371 $alignment = 8;
diff --git a/scripts/sortextable.c b/scripts/sortextable.c
index 5f7a8b663cb9..7941fbdfb050 100644
--- a/scripts/sortextable.c
+++ b/scripts/sortextable.c
@@ -31,6 +31,10 @@
31#include <tools/be_byteshift.h> 31#include <tools/be_byteshift.h>
32#include <tools/le_byteshift.h> 32#include <tools/le_byteshift.h>
33 33
34#ifndef EM_ARCOMPACT
35#define EM_ARCOMPACT 93
36#endif
37
34#ifndef EM_AARCH64 38#ifndef EM_AARCH64
35#define EM_AARCH64 183 39#define EM_AARCH64 183
36#endif 40#endif
@@ -268,6 +272,7 @@ do_file(char const *const fname)
268 case EM_S390: 272 case EM_S390:
269 custom_sort = sort_relative_table; 273 custom_sort = sort_relative_table;
270 break; 274 break;
275 case EM_ARCOMPACT:
271 case EM_ARM: 276 case EM_ARM:
272 case EM_AARCH64: 277 case EM_AARCH64:
273 case EM_MIPS: 278 case EM_MIPS:
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 77ca965ab684..b4af4ebc5be2 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -13,9 +13,7 @@
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 14
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/sched.h>
17#include <linux/rbtree.h> 16#include <linux/rbtree.h>
18#include <linux/cred.h>
19#include <linux/key-type.h> 17#include <linux/key-type.h>
20#include <linux/digsig.h> 18#include <linux/digsig.h>
21 19
@@ -23,19 +21,11 @@
23 21
24static struct key *keyring[INTEGRITY_KEYRING_MAX]; 22static struct key *keyring[INTEGRITY_KEYRING_MAX];
25 23
26#ifdef CONFIG_IMA_TRUSTED_KEYRING
27static const char *keyring_name[INTEGRITY_KEYRING_MAX] = {
28 ".evm",
29 ".module",
30 ".ima",
31};
32#else
33static const char *keyring_name[INTEGRITY_KEYRING_MAX] = { 24static const char *keyring_name[INTEGRITY_KEYRING_MAX] = {
34 "_evm", 25 "_evm",
35 "_module", 26 "_module",
36 "_ima", 27 "_ima",
37}; 28};
38#endif
39 29
40int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen, 30int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
41 const char *digest, int digestlen) 31 const char *digest, int digestlen)
@@ -45,7 +35,7 @@ int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
45 35
46 if (!keyring[id]) { 36 if (!keyring[id]) {
47 keyring[id] = 37 keyring[id] =
48 request_key(&key_type_keyring, keyring_name[id], NULL); 38 request_key(&key_type_keyring, keyring_name[id], NULL);
49 if (IS_ERR(keyring[id])) { 39 if (IS_ERR(keyring[id])) {
50 int err = PTR_ERR(keyring[id]); 40 int err = PTR_ERR(keyring[id]);
51 pr_err("no %s keyring: %d\n", keyring_name[id], err); 41 pr_err("no %s keyring: %d\n", keyring_name[id], err);
@@ -66,21 +56,3 @@ int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
66 56
67 return -EOPNOTSUPP; 57 return -EOPNOTSUPP;
68} 58}
69
70int integrity_init_keyring(const unsigned int id)
71{
72 const struct cred *cred = current_cred();
73 const struct user_struct *user = cred->user;
74
75 keyring[id] = keyring_alloc(keyring_name[id], KUIDT_INIT(0),
76 KGIDT_INIT(0), cred,
77 ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
78 KEY_USR_VIEW | KEY_USR_READ),
79 KEY_ALLOC_NOT_IN_QUOTA, user->uid_keyring);
80 if (!IS_ERR(keyring[id]))
81 set_bit(KEY_FLAG_TRUSTED_ONLY, &keyring[id]->flags);
82 else
83 pr_info("Can't allocate %s keyring (%ld)\n",
84 keyring_name[id], PTR_ERR(keyring[id]));
85 return 0;
86}
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index dad8d4ca2437..81a27971d884 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -123,11 +123,3 @@ config IMA_APPRAISE
123 For more information on integrity appraisal refer to: 123 For more information on integrity appraisal refer to:
124 <http://linux-ima.sourceforge.net> 124 <http://linux-ima.sourceforge.net>
125 If unsure, say N. 125 If unsure, say N.
126
127config IMA_TRUSTED_KEYRING
128 bool "Require all keys on the _ima keyring be signed"
129 depends on IMA_APPRAISE && SYSTEM_TRUSTED_KEYRING
130 default y
131 help
132 This option requires that all keys added to the _ima
133 keyring be signed by a key on the system trusted keyring.
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index bf03c6a16cc8..0356e1d437ca 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -26,7 +26,8 @@
26 26
27#include "../integrity.h" 27#include "../integrity.h"
28 28
29enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_ASCII }; 29enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
30 IMA_SHOW_ASCII };
30enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; 31enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
31 32
32/* digest size for IMA, fits SHA1 or MD5 */ 33/* digest size for IMA, fits SHA1 or MD5 */
@@ -97,7 +98,8 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
97 const char *op, struct inode *inode, 98 const char *op, struct inode *inode,
98 const unsigned char *filename); 99 const unsigned char *filename);
99int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash); 100int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash);
100int ima_calc_field_array_hash(struct ima_field_data *field_data, int num_fields, 101int ima_calc_field_array_hash(struct ima_field_data *field_data,
102 struct ima_template_desc *desc, int num_fields,
101 struct ima_digest_data *hash); 103 struct ima_digest_data *hash);
102int __init ima_calc_boot_aggregate(struct ima_digest_data *hash); 104int __init ima_calc_boot_aggregate(struct ima_digest_data *hash);
103void ima_add_violation(struct file *file, const unsigned char *filename, 105void ima_add_violation(struct file *file, const unsigned char *filename,
@@ -146,6 +148,7 @@ int ima_alloc_init_template(struct integrity_iint_cache *iint,
146 int xattr_len, struct ima_template_entry **entry); 148 int xattr_len, struct ima_template_entry **entry);
147int ima_store_template(struct ima_template_entry *entry, int violation, 149int ima_store_template(struct ima_template_entry *entry, int violation,
148 struct inode *inode, const unsigned char *filename); 150 struct inode *inode, const unsigned char *filename);
151void ima_free_template_entry(struct ima_template_entry *entry);
149const char *ima_d_path(struct path *path, char **pathbuf); 152const char *ima_d_path(struct path *path, char **pathbuf);
150 153
151/* rbtree tree calls to lookup, insert, delete 154/* rbtree tree calls to lookup, insert, delete
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 0e7540863fc2..c38bbce8c6a6 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -22,6 +22,19 @@
22#include "ima.h" 22#include "ima.h"
23 23
24/* 24/*
25 * ima_free_template_entry - free an existing template entry
26 */
27void ima_free_template_entry(struct ima_template_entry *entry)
28{
29 int i;
30
31 for (i = 0; i < entry->template_desc->num_fields; i++)
32 kfree(entry->template_data[i].data);
33
34 kfree(entry);
35}
36
37/*
25 * ima_alloc_init_template - create and initialize a new template entry 38 * ima_alloc_init_template - create and initialize a new template entry
26 */ 39 */
27int ima_alloc_init_template(struct integrity_iint_cache *iint, 40int ima_alloc_init_template(struct integrity_iint_cache *iint,
@@ -37,6 +50,7 @@ int ima_alloc_init_template(struct integrity_iint_cache *iint,
37 if (!*entry) 50 if (!*entry)
38 return -ENOMEM; 51 return -ENOMEM;
39 52
53 (*entry)->template_desc = template_desc;
40 for (i = 0; i < template_desc->num_fields; i++) { 54 for (i = 0; i < template_desc->num_fields; i++) {
41 struct ima_template_field *field = template_desc->fields[i]; 55 struct ima_template_field *field = template_desc->fields[i];
42 u32 len; 56 u32 len;
@@ -51,10 +65,9 @@ int ima_alloc_init_template(struct integrity_iint_cache *iint,
51 (*entry)->template_data_len += sizeof(len); 65 (*entry)->template_data_len += sizeof(len);
52 (*entry)->template_data_len += len; 66 (*entry)->template_data_len += len;
53 } 67 }
54 (*entry)->template_desc = template_desc;
55 return 0; 68 return 0;
56out: 69out:
57 kfree(*entry); 70 ima_free_template_entry(*entry);
58 *entry = NULL; 71 *entry = NULL;
59 return result; 72 return result;
60} 73}
@@ -94,6 +107,7 @@ int ima_store_template(struct ima_template_entry *entry,
94 /* this function uses default algo */ 107 /* this function uses default algo */
95 hash.hdr.algo = HASH_ALGO_SHA1; 108 hash.hdr.algo = HASH_ALGO_SHA1;
96 result = ima_calc_field_array_hash(&entry->template_data[0], 109 result = ima_calc_field_array_hash(&entry->template_data[0],
110 entry->template_desc,
97 num_fields, &hash.hdr); 111 num_fields, &hash.hdr);
98 if (result < 0) { 112 if (result < 0) {
99 integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, 113 integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
@@ -133,7 +147,7 @@ void ima_add_violation(struct file *file, const unsigned char *filename,
133 } 147 }
134 result = ima_store_template(entry, violation, inode, filename); 148 result = ima_store_template(entry, violation, inode, filename);
135 if (result < 0) 149 if (result < 0)
136 kfree(entry); 150 ima_free_template_entry(entry);
137err_out: 151err_out:
138 integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename, 152 integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
139 op, cause, result, 0); 153 op, cause, result, 0);
@@ -268,7 +282,7 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
268 if (!result || result == -EEXIST) 282 if (!result || result == -EEXIST)
269 iint->flags |= IMA_MEASURED; 283 iint->flags |= IMA_MEASURED;
270 if (result < 0) 284 if (result < 0)
271 kfree(entry); 285 ima_free_template_entry(entry);
272} 286}
273 287
274void ima_audit_measurement(struct integrity_iint_cache *iint, 288void ima_audit_measurement(struct integrity_iint_cache *iint,
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 46353ee517f6..734e9468aca0 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -381,14 +381,3 @@ int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name)
381 } 381 }
382 return result; 382 return result;
383} 383}
384
385#ifdef CONFIG_IMA_TRUSTED_KEYRING
386static int __init init_ima_keyring(void)
387{
388 int ret;
389
390 ret = integrity_init_keyring(INTEGRITY_KEYRING_IMA);
391 return 0;
392}
393late_initcall(init_ima_keyring);
394#endif
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 676e0292dfec..fdf60def52e9 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -140,6 +140,7 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
140 * Calculate the hash of template data 140 * Calculate the hash of template data
141 */ 141 */
142static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data, 142static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
143 struct ima_template_desc *td,
143 int num_fields, 144 int num_fields,
144 struct ima_digest_data *hash, 145 struct ima_digest_data *hash,
145 struct crypto_shash *tfm) 146 struct crypto_shash *tfm)
@@ -160,9 +161,13 @@ static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
160 return rc; 161 return rc;
161 162
162 for (i = 0; i < num_fields; i++) { 163 for (i = 0; i < num_fields; i++) {
163 rc = crypto_shash_update(&desc.shash, 164 if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
164 (const u8 *) &field_data[i].len, 165 rc = crypto_shash_update(&desc.shash,
165 sizeof(field_data[i].len)); 166 (const u8 *) &field_data[i].len,
167 sizeof(field_data[i].len));
168 if (rc)
169 break;
170 }
166 rc = crypto_shash_update(&desc.shash, field_data[i].data, 171 rc = crypto_shash_update(&desc.shash, field_data[i].data,
167 field_data[i].len); 172 field_data[i].len);
168 if (rc) 173 if (rc)
@@ -175,7 +180,8 @@ static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
175 return rc; 180 return rc;
176} 181}
177 182
178int ima_calc_field_array_hash(struct ima_field_data *field_data, int num_fields, 183int ima_calc_field_array_hash(struct ima_field_data *field_data,
184 struct ima_template_desc *desc, int num_fields,
179 struct ima_digest_data *hash) 185 struct ima_digest_data *hash)
180{ 186{
181 struct crypto_shash *tfm; 187 struct crypto_shash *tfm;
@@ -185,7 +191,8 @@ int ima_calc_field_array_hash(struct ima_field_data *field_data, int num_fields,
185 if (IS_ERR(tfm)) 191 if (IS_ERR(tfm))
186 return PTR_ERR(tfm); 192 return PTR_ERR(tfm);
187 193
188 rc = ima_calc_field_array_hash_tfm(field_data, num_fields, hash, tfm); 194 rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields,
195 hash, tfm);
189 196
190 ima_free_tfm(tfm); 197 ima_free_tfm(tfm);
191 198
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index d47a7c86a21d..db01125926bd 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -120,6 +120,7 @@ static int ima_measurements_show(struct seq_file *m, void *v)
120 struct ima_template_entry *e; 120 struct ima_template_entry *e;
121 int namelen; 121 int namelen;
122 u32 pcr = CONFIG_IMA_MEASURE_PCR_IDX; 122 u32 pcr = CONFIG_IMA_MEASURE_PCR_IDX;
123 bool is_ima_template = false;
123 int i; 124 int i;
124 125
125 /* get entry */ 126 /* get entry */
@@ -145,14 +146,21 @@ static int ima_measurements_show(struct seq_file *m, void *v)
145 ima_putc(m, e->template_desc->name, namelen); 146 ima_putc(m, e->template_desc->name, namelen);
146 147
147 /* 5th: template length (except for 'ima' template) */ 148 /* 5th: template length (except for 'ima' template) */
148 if (strcmp(e->template_desc->name, IMA_TEMPLATE_IMA_NAME) != 0) 149 if (strcmp(e->template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0)
150 is_ima_template = true;
151
152 if (!is_ima_template)
149 ima_putc(m, &e->template_data_len, 153 ima_putc(m, &e->template_data_len,
150 sizeof(e->template_data_len)); 154 sizeof(e->template_data_len));
151 155
152 /* 6th: template specific data */ 156 /* 6th: template specific data */
153 for (i = 0; i < e->template_desc->num_fields; i++) { 157 for (i = 0; i < e->template_desc->num_fields; i++) {
154 e->template_desc->fields[i]->field_show(m, IMA_SHOW_BINARY, 158 enum ima_show_type show = IMA_SHOW_BINARY;
155 &e->template_data[i]); 159 struct ima_template_field *field = e->template_desc->fields[i];
160
161 if (is_ima_template && strcmp(field->field_id, "d") == 0)
162 show = IMA_SHOW_BINARY_NO_FIELD_LEN;
163 field->field_show(m, show, &e->template_data[i]);
156 } 164 }
157 return 0; 165 return 0;
158} 166}
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 15f34bd40abe..37122768554a 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -63,7 +63,6 @@ static void __init ima_add_boot_aggregate(void)
63 result = ima_calc_boot_aggregate(&hash.hdr); 63 result = ima_calc_boot_aggregate(&hash.hdr);
64 if (result < 0) { 64 if (result < 0) {
65 audit_cause = "hashing_error"; 65 audit_cause = "hashing_error";
66 kfree(entry);
67 goto err_out; 66 goto err_out;
68 } 67 }
69 } 68 }
@@ -76,7 +75,7 @@ static void __init ima_add_boot_aggregate(void)
76 result = ima_store_template(entry, violation, NULL, 75 result = ima_store_template(entry, violation, NULL,
77 boot_aggregate_name); 76 boot_aggregate_name);
78 if (result < 0) 77 if (result < 0)
79 kfree(entry); 78 ima_free_template_entry(entry);
80 return; 79 return;
81err_out: 80err_out:
82 integrity_audit_msg(AUDIT_INTEGRITY_PCR, NULL, boot_aggregate_name, op, 81 integrity_audit_msg(AUDIT_INTEGRITY_PCR, NULL, boot_aggregate_name, op,
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
index 4e5da990630b..635695f6a185 100644
--- a/security/integrity/ima/ima_template.c
+++ b/security/integrity/ima/ima_template.c
@@ -90,7 +90,7 @@ static struct ima_template_field *lookup_template_field(const char *field_id)
90 return NULL; 90 return NULL;
91} 91}
92 92
93static int template_fmt_size(char *template_fmt) 93static int template_fmt_size(const char *template_fmt)
94{ 94{
95 char c; 95 char c;
96 int template_fmt_len = strlen(template_fmt); 96 int template_fmt_len = strlen(template_fmt);
@@ -106,22 +106,29 @@ static int template_fmt_size(char *template_fmt)
106 return j + 1; 106 return j + 1;
107} 107}
108 108
109static int template_desc_init_fields(char *template_fmt, 109static int template_desc_init_fields(const char *template_fmt,
110 struct ima_template_field ***fields, 110 struct ima_template_field ***fields,
111 int *num_fields) 111 int *num_fields)
112{ 112{
113 char *c, *template_fmt_ptr = template_fmt; 113 char *c, *template_fmt_copy, *template_fmt_ptr;
114 int template_num_fields = template_fmt_size(template_fmt); 114 int template_num_fields = template_fmt_size(template_fmt);
115 int i, result = 0; 115 int i, result = 0;
116 116
117 if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX) 117 if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX)
118 return -EINVAL; 118 return -EINVAL;
119 119
120 /* copying is needed as strsep() modifies the original buffer */
121 template_fmt_copy = kstrdup(template_fmt, GFP_KERNEL);
122 if (template_fmt_copy == NULL)
123 return -ENOMEM;
124
120 *fields = kzalloc(template_num_fields * sizeof(*fields), GFP_KERNEL); 125 *fields = kzalloc(template_num_fields * sizeof(*fields), GFP_KERNEL);
121 if (*fields == NULL) { 126 if (*fields == NULL) {
122 result = -ENOMEM; 127 result = -ENOMEM;
123 goto out; 128 goto out;
124 } 129 }
130
131 template_fmt_ptr = template_fmt_copy;
125 for (i = 0; (c = strsep(&template_fmt_ptr, "|")) != NULL && 132 for (i = 0; (c = strsep(&template_fmt_ptr, "|")) != NULL &&
126 i < template_num_fields; i++) { 133 i < template_num_fields; i++) {
127 struct ima_template_field *f = lookup_template_field(c); 134 struct ima_template_field *f = lookup_template_field(c);
@@ -133,10 +140,12 @@ static int template_desc_init_fields(char *template_fmt,
133 (*fields)[i] = f; 140 (*fields)[i] = f;
134 } 141 }
135 *num_fields = i; 142 *num_fields = i;
136 return 0;
137out: 143out:
138 kfree(*fields); 144 if (result < 0) {
139 *fields = NULL; 145 kfree(*fields);
146 *fields = NULL;
147 }
148 kfree(template_fmt_copy);
140 return result; 149 return result;
141} 150}
142 151
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
index 6d66ad6ed265..c38adcc910fb 100644
--- a/security/integrity/ima/ima_template_lib.c
+++ b/security/integrity/ima/ima_template_lib.c
@@ -109,9 +109,12 @@ static void ima_show_template_data_binary(struct seq_file *m,
109 enum data_formats datafmt, 109 enum data_formats datafmt,
110 struct ima_field_data *field_data) 110 struct ima_field_data *field_data)
111{ 111{
112 ima_putc(m, &field_data->len, sizeof(u32)); 112 if (show != IMA_SHOW_BINARY_NO_FIELD_LEN)
113 ima_putc(m, &field_data->len, sizeof(u32));
114
113 if (!field_data->len) 115 if (!field_data->len)
114 return; 116 return;
117
115 ima_putc(m, field_data->data, field_data->len); 118 ima_putc(m, field_data->data, field_data->len);
116} 119}
117 120
@@ -125,6 +128,7 @@ static void ima_show_template_field_data(struct seq_file *m,
125 ima_show_template_data_ascii(m, show, datafmt, field_data); 128 ima_show_template_data_ascii(m, show, datafmt, field_data);
126 break; 129 break;
127 case IMA_SHOW_BINARY: 130 case IMA_SHOW_BINARY:
131 case IMA_SHOW_BINARY_NO_FIELD_LEN:
128 ima_show_template_data_binary(m, show, datafmt, field_data); 132 ima_show_template_data_binary(m, show, datafmt, field_data);
129 break; 133 break;
130 default: 134 default:
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index b9e7c133734a..2fb5e53e927f 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -137,19 +137,12 @@ static inline int integrity_digsig_verify(const unsigned int id,
137#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS 137#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
138int asymmetric_verify(struct key *keyring, const char *sig, 138int asymmetric_verify(struct key *keyring, const char *sig,
139 int siglen, const char *data, int datalen); 139 int siglen, const char *data, int datalen);
140
141int integrity_init_keyring(const unsigned int id);
142#else 140#else
143static inline int asymmetric_verify(struct key *keyring, const char *sig, 141static inline int asymmetric_verify(struct key *keyring, const char *sig,
144 int siglen, const char *data, int datalen) 142 int siglen, const char *data, int datalen)
145{ 143{
146 return -EOPNOTSUPP; 144 return -EOPNOTSUPP;
147} 145}
148
149static int integrity_init_keyring(const unsigned int id)
150{
151 return 0;
152}
153#endif 146#endif
154 147
155#ifdef CONFIG_INTEGRITY_AUDIT 148#ifdef CONFIG_INTEGRITY_AUDIT
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index 7f44c3207a9b..8137b27d641d 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -70,7 +70,7 @@ int big_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
70 * 70 *
71 * TODO: Encrypt the stored data with a temporary key. 71 * TODO: Encrypt the stored data with a temporary key.
72 */ 72 */
73 file = shmem_file_setup("", datalen, 0); 73 file = shmem_kernel_file_setup("", datalen, 0);
74 if (IS_ERR(file)) { 74 if (IS_ERR(file)) {
75 ret = PTR_ERR(file); 75 ret = PTR_ERR(file);
76 goto err_quota; 76 goto err_quota;
diff --git a/security/keys/key.c b/security/keys/key.c
index 55d110f0aced..6e21c11e48bc 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -272,7 +272,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
272 } 272 }
273 273
274 /* allocate and initialise the key and its description */ 274 /* allocate and initialise the key and its description */
275 key = kmem_cache_alloc(key_jar, GFP_KERNEL); 275 key = kmem_cache_zalloc(key_jar, GFP_KERNEL);
276 if (!key) 276 if (!key)
277 goto no_memory_2; 277 goto no_memory_2;
278 278
@@ -293,18 +293,12 @@ struct key *key_alloc(struct key_type *type, const char *desc,
293 key->uid = uid; 293 key->uid = uid;
294 key->gid = gid; 294 key->gid = gid;
295 key->perm = perm; 295 key->perm = perm;
296 key->flags = 0;
297 key->expiry = 0;
298 key->payload.data = NULL;
299 key->security = NULL;
300 296
301 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) 297 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
302 key->flags |= 1 << KEY_FLAG_IN_QUOTA; 298 key->flags |= 1 << KEY_FLAG_IN_QUOTA;
303 if (flags & KEY_ALLOC_TRUSTED) 299 if (flags & KEY_ALLOC_TRUSTED)
304 key->flags |= 1 << KEY_FLAG_TRUSTED; 300 key->flags |= 1 << KEY_FLAG_TRUSTED;
305 301
306 memset(&key->type_data, 0, sizeof(key->type_data));
307
308#ifdef KEY_DEBUGGING 302#ifdef KEY_DEBUGGING
309 key->magic = KEY_DEBUG_MAGIC; 303 key->magic = KEY_DEBUG_MAGIC;
310#endif 304#endif
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 69f0cb7bab7e..d46cbc5e335e 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -160,7 +160,7 @@ static u64 mult_64x32_and_fold(u64 x, u32 y)
160static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key) 160static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key)
161{ 161{
162 const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP; 162 const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP;
163 const unsigned long level_mask = ASSOC_ARRAY_LEVEL_STEP_MASK; 163 const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK;
164 const char *description = index_key->description; 164 const char *description = index_key->description;
165 unsigned long hash, type; 165 unsigned long hash, type;
166 u32 piece; 166 u32 piece;
@@ -194,10 +194,10 @@ static unsigned long hash_key_type_and_desc(const struct keyring_index_key *inde
194 * ordinary keys by making sure the lowest level segment in the hash is 194 * ordinary keys by making sure the lowest level segment in the hash is
195 * zero for keyrings and non-zero otherwise. 195 * zero for keyrings and non-zero otherwise.
196 */ 196 */
197 if (index_key->type != &key_type_keyring && (hash & level_mask) == 0) 197 if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0)
198 return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1; 198 return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1;
199 if (index_key->type == &key_type_keyring && (hash & level_mask) != 0) 199 if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0)
200 return (hash + (hash << level_shift)) & ~level_mask; 200 return (hash + (hash << level_shift)) & ~fan_mask;
201 return hash; 201 return hash;
202} 202}
203 203
@@ -279,12 +279,11 @@ static bool keyring_compare_object(const void *object, const void *data)
279 * Compare the index keys of a pair of objects and determine the bit position 279 * Compare the index keys of a pair of objects and determine the bit position
280 * at which they differ - if they differ. 280 * at which they differ - if they differ.
281 */ 281 */
282static int keyring_diff_objects(const void *_a, const void *_b) 282static int keyring_diff_objects(const void *object, const void *data)
283{ 283{
284 const struct key *key_a = keyring_ptr_to_key(_a); 284 const struct key *key_a = keyring_ptr_to_key(object);
285 const struct key *key_b = keyring_ptr_to_key(_b);
286 const struct keyring_index_key *a = &key_a->index_key; 285 const struct keyring_index_key *a = &key_a->index_key;
287 const struct keyring_index_key *b = &key_b->index_key; 286 const struct keyring_index_key *b = data;
288 unsigned long seg_a, seg_b; 287 unsigned long seg_a, seg_b;
289 int level, i; 288 int level, i;
290 289
@@ -691,8 +690,8 @@ descend_to_node:
691 smp_read_barrier_depends(); 690 smp_read_barrier_depends();
692 ptr = ACCESS_ONCE(shortcut->next_node); 691 ptr = ACCESS_ONCE(shortcut->next_node);
693 BUG_ON(!assoc_array_ptr_is_node(ptr)); 692 BUG_ON(!assoc_array_ptr_is_node(ptr));
694 node = assoc_array_ptr_to_node(ptr);
695 } 693 }
694 node = assoc_array_ptr_to_node(ptr);
696 695
697begin_node: 696begin_node:
698 kdebug("begin_node"); 697 kdebug("begin_node");
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 794c3ca49eac..57b0b49f4e6e 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -53,6 +53,7 @@
53#include <net/ip.h> /* for local_port_range[] */ 53#include <net/ip.h> /* for local_port_range[] */
54#include <net/sock.h> 54#include <net/sock.h>
55#include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */ 55#include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */
56#include <net/inet_connection_sock.h>
56#include <net/net_namespace.h> 57#include <net/net_namespace.h>
57#include <net/netlabel.h> 58#include <net/netlabel.h>
58#include <linux/uaccess.h> 59#include <linux/uaccess.h>
@@ -95,10 +96,6 @@
95#include "audit.h" 96#include "audit.h"
96#include "avc_ss.h" 97#include "avc_ss.h"
97 98
98#define SB_TYPE_FMT "%s%s%s"
99#define SB_SUBTYPE(sb) (sb->s_subtype && sb->s_subtype[0])
100#define SB_TYPE_ARGS(sb) sb->s_type->name, SB_SUBTYPE(sb) ? "." : "", SB_SUBTYPE(sb) ? sb->s_subtype : ""
101
102extern struct security_operations *security_ops; 99extern struct security_operations *security_ops;
103 100
104/* SECMARK reference count */ 101/* SECMARK reference count */
@@ -237,6 +234,14 @@ static int inode_alloc_security(struct inode *inode)
237 return 0; 234 return 0;
238} 235}
239 236
237static void inode_free_rcu(struct rcu_head *head)
238{
239 struct inode_security_struct *isec;
240
241 isec = container_of(head, struct inode_security_struct, rcu);
242 kmem_cache_free(sel_inode_cache, isec);
243}
244
240static void inode_free_security(struct inode *inode) 245static void inode_free_security(struct inode *inode)
241{ 246{
242 struct inode_security_struct *isec = inode->i_security; 247 struct inode_security_struct *isec = inode->i_security;
@@ -247,8 +252,16 @@ static void inode_free_security(struct inode *inode)
247 list_del_init(&isec->list); 252 list_del_init(&isec->list);
248 spin_unlock(&sbsec->isec_lock); 253 spin_unlock(&sbsec->isec_lock);
249 254
250 inode->i_security = NULL; 255 /*
251 kmem_cache_free(sel_inode_cache, isec); 256 * The inode may still be referenced in a path walk and
257 * a call to selinux_inode_permission() can be made
258 * after inode_free_security() is called. Ideally, the VFS
259 * wouldn't do this, but fixing that is a much harder
260 * job. For now, simply free the i_security via RCU, and
261 * leave the current inode->i_security pointer intact.
262 * The inode will be freed after the RCU grace period too.
263 */
264 call_rcu(&isec->rcu, inode_free_rcu);
252} 265}
253 266
254static int file_alloc_security(struct file *file) 267static int file_alloc_security(struct file *file)
@@ -413,8 +426,8 @@ static int sb_finish_set_opts(struct super_block *sb)
413 the first boot of the SELinux kernel before we have 426 the first boot of the SELinux kernel before we have
414 assigned xattr values to the filesystem. */ 427 assigned xattr values to the filesystem. */
415 if (!root_inode->i_op->getxattr) { 428 if (!root_inode->i_op->getxattr) {
416 printk(KERN_WARNING "SELinux: (dev %s, type "SB_TYPE_FMT") has no " 429 printk(KERN_WARNING "SELinux: (dev %s, type %s) has no "
417 "xattr support\n", sb->s_id, SB_TYPE_ARGS(sb)); 430 "xattr support\n", sb->s_id, sb->s_type->name);
418 rc = -EOPNOTSUPP; 431 rc = -EOPNOTSUPP;
419 goto out; 432 goto out;
420 } 433 }
@@ -422,22 +435,22 @@ static int sb_finish_set_opts(struct super_block *sb)
422 if (rc < 0 && rc != -ENODATA) { 435 if (rc < 0 && rc != -ENODATA) {
423 if (rc == -EOPNOTSUPP) 436 if (rc == -EOPNOTSUPP)
424 printk(KERN_WARNING "SELinux: (dev %s, type " 437 printk(KERN_WARNING "SELinux: (dev %s, type "
425 SB_TYPE_FMT") has no security xattr handler\n", 438 "%s) has no security xattr handler\n",
426 sb->s_id, SB_TYPE_ARGS(sb)); 439 sb->s_id, sb->s_type->name);
427 else 440 else
428 printk(KERN_WARNING "SELinux: (dev %s, type " 441 printk(KERN_WARNING "SELinux: (dev %s, type "
429 SB_TYPE_FMT") getxattr errno %d\n", sb->s_id, 442 "%s) getxattr errno %d\n", sb->s_id,
430 SB_TYPE_ARGS(sb), -rc); 443 sb->s_type->name, -rc);
431 goto out; 444 goto out;
432 } 445 }
433 } 446 }
434 447
435 if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors)) 448 if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
436 printk(KERN_ERR "SELinux: initialized (dev %s, type "SB_TYPE_FMT"), unknown behavior\n", 449 printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n",
437 sb->s_id, SB_TYPE_ARGS(sb)); 450 sb->s_id, sb->s_type->name);
438 else 451 else
439 printk(KERN_DEBUG "SELinux: initialized (dev %s, type "SB_TYPE_FMT"), %s\n", 452 printk(KERN_DEBUG "SELinux: initialized (dev %s, type %s), %s\n",
440 sb->s_id, SB_TYPE_ARGS(sb), 453 sb->s_id, sb->s_type->name,
441 labeling_behaviors[sbsec->behavior-1]); 454 labeling_behaviors[sbsec->behavior-1]);
442 455
443 sbsec->flags |= SE_SBINITIALIZED; 456 sbsec->flags |= SE_SBINITIALIZED;
@@ -600,6 +613,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
600 const struct cred *cred = current_cred(); 613 const struct cred *cred = current_cred();
601 int rc = 0, i; 614 int rc = 0, i;
602 struct superblock_security_struct *sbsec = sb->s_security; 615 struct superblock_security_struct *sbsec = sb->s_security;
616 const char *name = sb->s_type->name;
603 struct inode *inode = sbsec->sb->s_root->d_inode; 617 struct inode *inode = sbsec->sb->s_root->d_inode;
604 struct inode_security_struct *root_isec = inode->i_security; 618 struct inode_security_struct *root_isec = inode->i_security;
605 u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0; 619 u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0;
@@ -658,8 +672,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
658 strlen(mount_options[i]), &sid); 672 strlen(mount_options[i]), &sid);
659 if (rc) { 673 if (rc) {
660 printk(KERN_WARNING "SELinux: security_context_to_sid" 674 printk(KERN_WARNING "SELinux: security_context_to_sid"
661 "(%s) failed for (dev %s, type "SB_TYPE_FMT") errno=%d\n", 675 "(%s) failed for (dev %s, type %s) errno=%d\n",
662 mount_options[i], sb->s_id, SB_TYPE_ARGS(sb), rc); 676 mount_options[i], sb->s_id, name, rc);
663 goto out; 677 goto out;
664 } 678 }
665 switch (flags[i]) { 679 switch (flags[i]) {
@@ -806,8 +820,7 @@ out:
806out_double_mount: 820out_double_mount:
807 rc = -EINVAL; 821 rc = -EINVAL;
808 printk(KERN_WARNING "SELinux: mount invalid. Same superblock, different " 822 printk(KERN_WARNING "SELinux: mount invalid. Same superblock, different "
809 "security settings for (dev %s, type "SB_TYPE_FMT")\n", sb->s_id, 823 "security settings for (dev %s, type %s)\n", sb->s_id, name);
810 SB_TYPE_ARGS(sb));
811 goto out; 824 goto out;
812} 825}
813 826
@@ -2480,8 +2493,8 @@ static int selinux_sb_remount(struct super_block *sb, void *data)
2480 rc = security_context_to_sid(mount_options[i], len, &sid); 2493 rc = security_context_to_sid(mount_options[i], len, &sid);
2481 if (rc) { 2494 if (rc) {
2482 printk(KERN_WARNING "SELinux: security_context_to_sid" 2495 printk(KERN_WARNING "SELinux: security_context_to_sid"
2483 "(%s) failed for (dev %s, type "SB_TYPE_FMT") errno=%d\n", 2496 "(%s) failed for (dev %s, type %s) errno=%d\n",
2484 mount_options[i], sb->s_id, SB_TYPE_ARGS(sb), rc); 2497 mount_options[i], sb->s_id, sb->s_type->name, rc);
2485 goto out_free_opts; 2498 goto out_free_opts;
2486 } 2499 }
2487 rc = -EINVAL; 2500 rc = -EINVAL;
@@ -2519,8 +2532,8 @@ out_free_secdata:
2519 return rc; 2532 return rc;
2520out_bad_option: 2533out_bad_option:
2521 printk(KERN_WARNING "SELinux: unable to change security options " 2534 printk(KERN_WARNING "SELinux: unable to change security options "
2522 "during remount (dev %s, type "SB_TYPE_FMT")\n", sb->s_id, 2535 "during remount (dev %s, type=%s)\n", sb->s_id,
2523 SB_TYPE_ARGS(sb)); 2536 sb->s_type->name);
2524 goto out_free_opts; 2537 goto out_free_opts;
2525} 2538}
2526 2539
@@ -3828,7 +3841,7 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
3828 u32 nlbl_sid; 3841 u32 nlbl_sid;
3829 u32 nlbl_type; 3842 u32 nlbl_type;
3830 3843
3831 err = selinux_skb_xfrm_sid(skb, &xfrm_sid); 3844 err = selinux_xfrm_skb_sid(skb, &xfrm_sid);
3832 if (unlikely(err)) 3845 if (unlikely(err))
3833 return -EACCES; 3846 return -EACCES;
3834 err = selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid); 3847 err = selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
@@ -3846,6 +3859,30 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
3846 return 0; 3859 return 0;
3847} 3860}
3848 3861
3862/**
3863 * selinux_conn_sid - Determine the child socket label for a connection
3864 * @sk_sid: the parent socket's SID
3865 * @skb_sid: the packet's SID
3866 * @conn_sid: the resulting connection SID
3867 *
3868 * If @skb_sid is valid then the user:role:type information from @sk_sid is
3869 * combined with the MLS information from @skb_sid in order to create
3870 * @conn_sid. If @skb_sid is not valid then then @conn_sid is simply a copy
3871 * of @sk_sid. Returns zero on success, negative values on failure.
3872 *
3873 */
3874static int selinux_conn_sid(u32 sk_sid, u32 skb_sid, u32 *conn_sid)
3875{
3876 int err = 0;
3877
3878 if (skb_sid != SECSID_NULL)
3879 err = security_sid_mls_copy(sk_sid, skb_sid, conn_sid);
3880 else
3881 *conn_sid = sk_sid;
3882
3883 return err;
3884}
3885
3849/* socket security operations */ 3886/* socket security operations */
3850 3887
3851static int socket_sockcreate_sid(const struct task_security_struct *tsec, 3888static int socket_sockcreate_sid(const struct task_security_struct *tsec,
@@ -4313,8 +4350,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
4313 } 4350 }
4314 err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER, 4351 err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
4315 PEER__RECV, &ad); 4352 PEER__RECV, &ad);
4316 if (err) 4353 if (err) {
4317 selinux_netlbl_err(skb, err, 0); 4354 selinux_netlbl_err(skb, err, 0);
4355 return err;
4356 }
4318 } 4357 }
4319 4358
4320 if (secmark_active) { 4359 if (secmark_active) {
@@ -4452,7 +4491,7 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
4452 struct sk_security_struct *sksec = sk->sk_security; 4491 struct sk_security_struct *sksec = sk->sk_security;
4453 int err; 4492 int err;
4454 u16 family = sk->sk_family; 4493 u16 family = sk->sk_family;
4455 u32 newsid; 4494 u32 connsid;
4456 u32 peersid; 4495 u32 peersid;
4457 4496
4458 /* handle mapped IPv4 packets arriving via IPv6 sockets */ 4497 /* handle mapped IPv4 packets arriving via IPv6 sockets */
@@ -4462,16 +4501,11 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
4462 err = selinux_skb_peerlbl_sid(skb, family, &peersid); 4501 err = selinux_skb_peerlbl_sid(skb, family, &peersid);
4463 if (err) 4502 if (err)
4464 return err; 4503 return err;
4465 if (peersid == SECSID_NULL) { 4504 err = selinux_conn_sid(sksec->sid, peersid, &connsid);
4466 req->secid = sksec->sid; 4505 if (err)
4467 req->peer_secid = SECSID_NULL; 4506 return err;
4468 } else { 4507 req->secid = connsid;
4469 err = security_sid_mls_copy(sksec->sid, peersid, &newsid); 4508 req->peer_secid = peersid;
4470 if (err)
4471 return err;
4472 req->secid = newsid;
4473 req->peer_secid = peersid;
4474 }
4475 4509
4476 return selinux_netlbl_inet_conn_request(req, family); 4510 return selinux_netlbl_inet_conn_request(req, family);
4477} 4511}
@@ -4731,6 +4765,7 @@ static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops,
4731static unsigned int selinux_ip_output(struct sk_buff *skb, 4765static unsigned int selinux_ip_output(struct sk_buff *skb,
4732 u16 family) 4766 u16 family)
4733{ 4767{
4768 struct sock *sk;
4734 u32 sid; 4769 u32 sid;
4735 4770
4736 if (!netlbl_enabled()) 4771 if (!netlbl_enabled())
@@ -4739,8 +4774,27 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
4739 /* we do this in the LOCAL_OUT path and not the POST_ROUTING path 4774 /* we do this in the LOCAL_OUT path and not the POST_ROUTING path
4740 * because we want to make sure we apply the necessary labeling 4775 * because we want to make sure we apply the necessary labeling
4741 * before IPsec is applied so we can leverage AH protection */ 4776 * before IPsec is applied so we can leverage AH protection */
4742 if (skb->sk) { 4777 sk = skb->sk;
4743 struct sk_security_struct *sksec = skb->sk->sk_security; 4778 if (sk) {
4779 struct sk_security_struct *sksec;
4780
4781 if (sk->sk_state == TCP_LISTEN)
4782 /* if the socket is the listening state then this
4783 * packet is a SYN-ACK packet which means it needs to
4784 * be labeled based on the connection/request_sock and
4785 * not the parent socket. unfortunately, we can't
4786 * lookup the request_sock yet as it isn't queued on
4787 * the parent socket until after the SYN-ACK is sent.
4788 * the "solution" is to simply pass the packet as-is
4789 * as any IP option based labeling should be copied
4790 * from the initial connection request (in the IP
4791 * layer). it is far from ideal, but until we get a
4792 * security label in the packet itself this is the
4793 * best we can do. */
4794 return NF_ACCEPT;
4795
4796 /* standard practice, label using the parent socket */
4797 sksec = sk->sk_security;
4744 sid = sksec->sid; 4798 sid = sksec->sid;
4745 } else 4799 } else
4746 sid = SECINITSID_KERNEL; 4800 sid = SECINITSID_KERNEL;
@@ -4810,27 +4864,36 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
4810 * as fast and as clean as possible. */ 4864 * as fast and as clean as possible. */
4811 if (!selinux_policycap_netpeer) 4865 if (!selinux_policycap_netpeer)
4812 return selinux_ip_postroute_compat(skb, ifindex, family); 4866 return selinux_ip_postroute_compat(skb, ifindex, family);
4867
4868 secmark_active = selinux_secmark_enabled();
4869 peerlbl_active = selinux_peerlbl_enabled();
4870 if (!secmark_active && !peerlbl_active)
4871 return NF_ACCEPT;
4872
4873 sk = skb->sk;
4874
4813#ifdef CONFIG_XFRM 4875#ifdef CONFIG_XFRM
4814 /* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec 4876 /* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec
4815 * packet transformation so allow the packet to pass without any checks 4877 * packet transformation so allow the packet to pass without any checks
4816 * since we'll have another chance to perform access control checks 4878 * since we'll have another chance to perform access control checks
4817 * when the packet is on it's final way out. 4879 * when the packet is on it's final way out.
4818 * NOTE: there appear to be some IPv6 multicast cases where skb->dst 4880 * NOTE: there appear to be some IPv6 multicast cases where skb->dst
4819 * is NULL, in this case go ahead and apply access control. */ 4881 * is NULL, in this case go ahead and apply access control.
4820 if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL) 4882 * NOTE: if this is a local socket (skb->sk != NULL) that is in the
4883 * TCP listening state we cannot wait until the XFRM processing
4884 * is done as we will miss out on the SA label if we do;
4885 * unfortunately, this means more work, but it is only once per
4886 * connection. */
4887 if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL &&
4888 !(sk != NULL && sk->sk_state == TCP_LISTEN))
4821 return NF_ACCEPT; 4889 return NF_ACCEPT;
4822#endif 4890#endif
4823 secmark_active = selinux_secmark_enabled();
4824 peerlbl_active = selinux_peerlbl_enabled();
4825 if (!secmark_active && !peerlbl_active)
4826 return NF_ACCEPT;
4827 4891
4828 /* if the packet is being forwarded then get the peer label from the
4829 * packet itself; otherwise check to see if it is from a local
4830 * application or the kernel, if from an application get the peer label
4831 * from the sending socket, otherwise use the kernel's sid */
4832 sk = skb->sk;
4833 if (sk == NULL) { 4892 if (sk == NULL) {
4893 /* Without an associated socket the packet is either coming
4894 * from the kernel or it is being forwarded; check the packet
4895 * to determine which and if the packet is being forwarded
4896 * query the packet directly to determine the security label. */
4834 if (skb->skb_iif) { 4897 if (skb->skb_iif) {
4835 secmark_perm = PACKET__FORWARD_OUT; 4898 secmark_perm = PACKET__FORWARD_OUT;
4836 if (selinux_skb_peerlbl_sid(skb, family, &peer_sid)) 4899 if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
@@ -4839,7 +4902,45 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
4839 secmark_perm = PACKET__SEND; 4902 secmark_perm = PACKET__SEND;
4840 peer_sid = SECINITSID_KERNEL; 4903 peer_sid = SECINITSID_KERNEL;
4841 } 4904 }
4905 } else if (sk->sk_state == TCP_LISTEN) {
4906 /* Locally generated packet but the associated socket is in the
4907 * listening state which means this is a SYN-ACK packet. In
4908 * this particular case the correct security label is assigned
4909 * to the connection/request_sock but unfortunately we can't
4910 * query the request_sock as it isn't queued on the parent
4911 * socket until after the SYN-ACK packet is sent; the only
4912 * viable choice is to regenerate the label like we do in
4913 * selinux_inet_conn_request(). See also selinux_ip_output()
4914 * for similar problems. */
4915 u32 skb_sid;
4916 struct sk_security_struct *sksec = sk->sk_security;
4917 if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
4918 return NF_DROP;
4919 /* At this point, if the returned skb peerlbl is SECSID_NULL
4920 * and the packet has been through at least one XFRM
4921 * transformation then we must be dealing with the "final"
4922 * form of labeled IPsec packet; since we've already applied
4923 * all of our access controls on this packet we can safely
4924 * pass the packet. */
4925 if (skb_sid == SECSID_NULL) {
4926 switch (family) {
4927 case PF_INET:
4928 if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
4929 return NF_ACCEPT;
4930 break;
4931 case PF_INET6:
4932 if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
4933 return NF_ACCEPT;
4934 default:
4935 return NF_DROP_ERR(-ECONNREFUSED);
4936 }
4937 }
4938 if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid))
4939 return NF_DROP;
4940 secmark_perm = PACKET__SEND;
4842 } else { 4941 } else {
4942 /* Locally generated packet, fetch the security label from the
4943 * associated socket. */
4843 struct sk_security_struct *sksec = sk->sk_security; 4944 struct sk_security_struct *sksec = sk->sk_security;
4844 peer_sid = sksec->sid; 4945 peer_sid = sksec->sid;
4845 secmark_perm = PACKET__SEND; 4946 secmark_perm = PACKET__SEND;
@@ -5503,11 +5604,11 @@ static int selinux_setprocattr(struct task_struct *p,
5503 /* Check for ptracing, and update the task SID if ok. 5604 /* Check for ptracing, and update the task SID if ok.
5504 Otherwise, leave SID unchanged and fail. */ 5605 Otherwise, leave SID unchanged and fail. */
5505 ptsid = 0; 5606 ptsid = 0;
5506 task_lock(p); 5607 rcu_read_lock();
5507 tracer = ptrace_parent(p); 5608 tracer = ptrace_parent(p);
5508 if (tracer) 5609 if (tracer)
5509 ptsid = task_sid(tracer); 5610 ptsid = task_sid(tracer);
5510 task_unlock(p); 5611 rcu_read_unlock();
5511 5612
5512 if (tracer) { 5613 if (tracer) {
5513 error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS, 5614 error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index b1dfe1049450..078e553f52f2 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -38,7 +38,10 @@ struct task_security_struct {
38 38
39struct inode_security_struct { 39struct inode_security_struct {
40 struct inode *inode; /* back pointer to inode object */ 40 struct inode *inode; /* back pointer to inode object */
41 struct list_head list; /* list of inode_security_struct */ 41 union {
42 struct list_head list; /* list of inode_security_struct */
43 struct rcu_head rcu; /* for freeing the inode_security_struct */
44 };
42 u32 task_sid; /* SID of creating task */ 45 u32 task_sid; /* SID of creating task */
43 u32 sid; /* SID of this object */ 46 u32 sid; /* SID of this object */
44 u16 sclass; /* security class of this object */ 47 u16 sclass; /* security class of this object */
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index 0dec76c64cf5..48c3cc94c168 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -39,6 +39,7 @@ int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
39int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb, 39int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
40 struct common_audit_data *ad, u8 proto); 40 struct common_audit_data *ad, u8 proto);
41int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall); 41int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
42int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid);
42 43
43static inline void selinux_xfrm_notify_policyload(void) 44static inline void selinux_xfrm_notify_policyload(void)
44{ 45{
@@ -79,11 +80,12 @@ static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid,
79static inline void selinux_xfrm_notify_policyload(void) 80static inline void selinux_xfrm_notify_policyload(void)
80{ 81{
81} 82}
82#endif
83 83
84static inline int selinux_skb_xfrm_sid(struct sk_buff *skb, u32 *sid) 84static inline int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
85{ 85{
86 return selinux_xfrm_decode_session(skb, sid, 0); 86 *sid = SECSID_NULL;
87 return 0;
87} 88}
89#endif
88 90
89#endif /* _SELINUX_XFRM_H_ */ 91#endif /* _SELINUX_XFRM_H_ */
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index ee470a0b5c27..d106733ad987 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -2334,50 +2334,16 @@ int security_fs_use(struct super_block *sb)
2334 struct ocontext *c; 2334 struct ocontext *c;
2335 struct superblock_security_struct *sbsec = sb->s_security; 2335 struct superblock_security_struct *sbsec = sb->s_security;
2336 const char *fstype = sb->s_type->name; 2336 const char *fstype = sb->s_type->name;
2337 const char *subtype = (sb->s_subtype && sb->s_subtype[0]) ? sb->s_subtype : NULL;
2338 struct ocontext *base = NULL;
2339 2337
2340 read_lock(&policy_rwlock); 2338 read_lock(&policy_rwlock);
2341 2339
2342 for (c = policydb.ocontexts[OCON_FSUSE]; c; c = c->next) { 2340 c = policydb.ocontexts[OCON_FSUSE];
2343 char *sub; 2341 while (c) {
2344 int baselen; 2342 if (strcmp(fstype, c->u.name) == 0)
2345
2346 baselen = strlen(fstype);
2347
2348 /* if base does not match, this is not the one */
2349 if (strncmp(fstype, c->u.name, baselen))
2350 continue;
2351
2352 /* if there is no subtype, this is the one! */
2353 if (!subtype)
2354 break;
2355
2356 /* skip past the base in this entry */
2357 sub = c->u.name + baselen;
2358
2359 /* entry is only a base. save it. keep looking for subtype */
2360 if (sub[0] == '\0') {
2361 base = c;
2362 continue;
2363 }
2364
2365 /* entry is not followed by a subtype, so it is not a match */
2366 if (sub[0] != '.')
2367 continue;
2368
2369 /* whew, we found a subtype of this fstype */
2370 sub++; /* move past '.' */
2371
2372 /* exact match of fstype AND subtype */
2373 if (!strcmp(subtype, sub))
2374 break; 2343 break;
2344 c = c->next;
2375 } 2345 }
2376 2346
2377 /* in case we had found an fstype match but no subtype match */
2378 if (!c)
2379 c = base;
2380
2381 if (c) { 2347 if (c) {
2382 sbsec->behavior = c->v.behavior; 2348 sbsec->behavior = c->v.behavior;
2383 if (!c->sid[0]) { 2349 if (!c->sid[0]) {
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index a91d205ec0c6..0462cb3ff0a7 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -209,19 +209,26 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
209 NULL) ? 0 : 1); 209 NULL) ? 0 : 1);
210} 210}
211 211
212/* 212static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb)
213 * LSM hook implementation that checks and/or returns the xfrm sid for the
214 * incoming packet.
215 */
216int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
217{ 213{
218 u32 sid_session = SECSID_NULL; 214 struct dst_entry *dst = skb_dst(skb);
219 struct sec_path *sp; 215 struct xfrm_state *x;
220 216
221 if (skb == NULL) 217 if (dst == NULL)
222 goto out; 218 return SECSID_NULL;
219 x = dst->xfrm;
220 if (x == NULL || !selinux_authorizable_xfrm(x))
221 return SECSID_NULL;
222
223 return x->security->ctx_sid;
224}
225
226static int selinux_xfrm_skb_sid_ingress(struct sk_buff *skb,
227 u32 *sid, int ckall)
228{
229 u32 sid_session = SECSID_NULL;
230 struct sec_path *sp = skb->sp;
223 231
224 sp = skb->sp;
225 if (sp) { 232 if (sp) {
226 int i; 233 int i;
227 234
@@ -248,6 +255,30 @@ out:
248} 255}
249 256
250/* 257/*
258 * LSM hook implementation that checks and/or returns the xfrm sid for the
259 * incoming packet.
260 */
261int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
262{
263 if (skb == NULL) {
264 *sid = SECSID_NULL;
265 return 0;
266 }
267 return selinux_xfrm_skb_sid_ingress(skb, sid, ckall);
268}
269
270int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
271{
272 int rc;
273
274 rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0);
275 if (rc == 0 && *sid == SECSID_NULL)
276 *sid = selinux_xfrm_skb_sid_egress(skb);
277
278 return rc;
279}
280
281/*
251 * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy. 282 * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy.
252 */ 283 */
253int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, 284int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
@@ -327,19 +358,22 @@ int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
327 return rc; 358 return rc;
328 359
329 ctx = kmalloc(sizeof(*ctx) + str_len, GFP_ATOMIC); 360 ctx = kmalloc(sizeof(*ctx) + str_len, GFP_ATOMIC);
330 if (!ctx) 361 if (!ctx) {
331 return -ENOMEM; 362 rc = -ENOMEM;
363 goto out;
364 }
332 365
333 ctx->ctx_doi = XFRM_SC_DOI_LSM; 366 ctx->ctx_doi = XFRM_SC_DOI_LSM;
334 ctx->ctx_alg = XFRM_SC_ALG_SELINUX; 367 ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
335 ctx->ctx_sid = secid; 368 ctx->ctx_sid = secid;
336 ctx->ctx_len = str_len; 369 ctx->ctx_len = str_len;
337 memcpy(ctx->ctx_str, ctx_str, str_len); 370 memcpy(ctx->ctx_str, ctx_str, str_len);
338 kfree(ctx_str);
339 371
340 x->security = ctx; 372 x->security = ctx;
341 atomic_inc(&selinux_xfrm_refcount); 373 atomic_inc(&selinux_xfrm_refcount);
342 return 0; 374out:
375 kfree(ctx_str);
376 return rc;
343} 377}
344 378
345/* 379/*
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c
index 872d59e35ee2..721d8fd45685 100644
--- a/sound/atmel/abdac.c
+++ b/sound/atmel/abdac.c
@@ -357,7 +357,8 @@ static int set_sample_rates(struct atmel_abdac *dac)
357 if (new_rate < 0) 357 if (new_rate < 0)
358 break; 358 break;
359 /* make sure we are below the ABDAC clock */ 359 /* make sure we are below the ABDAC clock */
360 if (new_rate <= clk_get_rate(dac->pclk)) { 360 if (index < MAX_NUM_RATES &&
361 new_rate <= clk_get_rate(dac->pclk)) {
361 dac->rates[index] = new_rate / 256; 362 dac->rates[index] = new_rate / 256;
362 index++; 363 index++;
363 } 364 }
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 6e03b465e44e..a2104671f51d 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1937,6 +1937,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
1937 case SNDRV_PCM_STATE_DISCONNECTED: 1937 case SNDRV_PCM_STATE_DISCONNECTED:
1938 err = -EBADFD; 1938 err = -EBADFD;
1939 goto _endloop; 1939 goto _endloop;
1940 case SNDRV_PCM_STATE_PAUSED:
1941 continue;
1940 } 1942 }
1941 if (!tout) { 1943 if (!tout) {
1942 snd_printd("%s write error (DMA or IRQ trouble?)\n", 1944 snd_printd("%s write error (DMA or IRQ trouble?)\n",
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
index d3226892ad6b..9048777228e2 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
@@ -434,17 +434,14 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
434 return; 434 return;
435 index = s->packet_index; 435 index = s->packet_index;
436 436
437 /* this module generate empty packet for 'no data' */
437 syt = calculate_syt(s, cycle); 438 syt = calculate_syt(s, cycle);
438 if (!(s->flags & CIP_BLOCKING)) { 439 if (!(s->flags & CIP_BLOCKING))
439 data_blocks = calculate_data_blocks(s); 440 data_blocks = calculate_data_blocks(s);
440 } else { 441 else if (syt != 0xffff)
441 if (syt != 0xffff) { 442 data_blocks = s->syt_interval;
442 data_blocks = s->syt_interval; 443 else
443 } else { 444 data_blocks = 0;
444 data_blocks = 0;
445 syt = 0xffffff;
446 }
447 }
448 445
449 buffer = s->buffer.packets[index].buffer; 446 buffer = s->buffer.packets[index].buffer;
450 buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) | 447 buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) |
diff --git a/sound/firewire/dice.c b/sound/firewire/dice.c
index 57bcd31fcc12..c0aa64941cee 100644
--- a/sound/firewire/dice.c
+++ b/sound/firewire/dice.c
@@ -1019,7 +1019,7 @@ static void dice_proc_read(struct snd_info_entry *entry,
1019 1019
1020 if (dice_proc_read_mem(dice, &tx_rx_header, sections[2], 2) < 0) 1020 if (dice_proc_read_mem(dice, &tx_rx_header, sections[2], 2) < 0)
1021 return; 1021 return;
1022 quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.tx)); 1022 quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.tx) / 4);
1023 for (stream = 0; stream < tx_rx_header.number; ++stream) { 1023 for (stream = 0; stream < tx_rx_header.number; ++stream) {
1024 if (dice_proc_read_mem(dice, &buf.tx, sections[2] + 2 + 1024 if (dice_proc_read_mem(dice, &buf.tx, sections[2] + 2 +
1025 stream * tx_rx_header.size, 1025 stream * tx_rx_header.size,
@@ -1045,7 +1045,7 @@ static void dice_proc_read(struct snd_info_entry *entry,
1045 1045
1046 if (dice_proc_read_mem(dice, &tx_rx_header, sections[4], 2) < 0) 1046 if (dice_proc_read_mem(dice, &tx_rx_header, sections[4], 2) < 0)
1047 return; 1047 return;
1048 quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.rx)); 1048 quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.rx) / 4);
1049 for (stream = 0; stream < tx_rx_header.number; ++stream) { 1049 for (stream = 0; stream < tx_rx_header.number; ++stream) {
1050 if (dice_proc_read_mem(dice, &buf.rx, sections[4] + 2 + 1050 if (dice_proc_read_mem(dice, &buf.rx, sections[4] + 2 +
1051 stream * tx_rx_header.size, 1051 stream * tx_rx_header.size,
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 77db69480c19..7aa9870040c1 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -698,7 +698,6 @@ struct hda_bus {
698 unsigned int in_reset:1; /* during reset operation */ 698 unsigned int in_reset:1; /* during reset operation */
699 unsigned int power_keep_link_on:1; /* don't power off HDA link */ 699 unsigned int power_keep_link_on:1; /* don't power off HDA link */
700 unsigned int no_response_fallback:1; /* don't fallback at RIRB error */ 700 unsigned int no_response_fallback:1; /* don't fallback at RIRB error */
701 unsigned int avoid_link_reset:1; /* don't reset link at runtime PM */
702 701
703 int primary_dig_out_type; /* primary digital out PCM type */ 702 int primary_dig_out_type; /* primary digital out PCM type */
704}; 703};
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 3067ed4fe3b2..c7f6d1cab606 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -474,6 +474,20 @@ static void invalidate_nid_path(struct hda_codec *codec, int idx)
474 memset(path, 0, sizeof(*path)); 474 memset(path, 0, sizeof(*path));
475} 475}
476 476
477/* return a DAC if paired to the given pin by codec driver */
478static hda_nid_t get_preferred_dac(struct hda_codec *codec, hda_nid_t pin)
479{
480 struct hda_gen_spec *spec = codec->spec;
481 const hda_nid_t *list = spec->preferred_dacs;
482
483 if (!list)
484 return 0;
485 for (; *list; list += 2)
486 if (*list == pin)
487 return list[1];
488 return 0;
489}
490
477/* look for an empty DAC slot */ 491/* look for an empty DAC slot */
478static hda_nid_t look_for_dac(struct hda_codec *codec, hda_nid_t pin, 492static hda_nid_t look_for_dac(struct hda_codec *codec, hda_nid_t pin,
479 bool is_digital) 493 bool is_digital)
@@ -1192,7 +1206,14 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs,
1192 continue; 1206 continue;
1193 } 1207 }
1194 1208
1195 dacs[i] = look_for_dac(codec, pin, false); 1209 dacs[i] = get_preferred_dac(codec, pin);
1210 if (dacs[i]) {
1211 if (is_dac_already_used(codec, dacs[i]))
1212 badness += bad->shared_primary;
1213 }
1214
1215 if (!dacs[i])
1216 dacs[i] = look_for_dac(codec, pin, false);
1196 if (!dacs[i] && !i) { 1217 if (!dacs[i] && !i) {
1197 /* try to steal the DAC of surrounds for the front */ 1218 /* try to steal the DAC of surrounds for the front */
1198 for (j = 1; j < num_outs; j++) { 1219 for (j = 1; j < num_outs; j++) {
@@ -2506,12 +2527,8 @@ static int create_out_jack_modes(struct hda_codec *codec, int num_pins,
2506 2527
2507 for (i = 0; i < num_pins; i++) { 2528 for (i = 0; i < num_pins; i++) {
2508 hda_nid_t pin = pins[i]; 2529 hda_nid_t pin = pins[i];
2509 if (pin == spec->hp_mic_pin) { 2530 if (pin == spec->hp_mic_pin)
2510 int ret = create_hp_mic_jack_mode(codec, pin);
2511 if (ret < 0)
2512 return ret;
2513 continue; 2531 continue;
2514 }
2515 if (get_out_jack_num_items(codec, pin) > 1) { 2532 if (get_out_jack_num_items(codec, pin) > 1) {
2516 struct snd_kcontrol_new *knew; 2533 struct snd_kcontrol_new *knew;
2517 char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; 2534 char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
@@ -2764,7 +2781,7 @@ static int hp_mic_jack_mode_put(struct snd_kcontrol *kcontrol,
2764 val &= ~(AC_PINCTL_VREFEN | PIN_HP); 2781 val &= ~(AC_PINCTL_VREFEN | PIN_HP);
2765 val |= get_vref_idx(vref_caps, idx) | PIN_IN; 2782 val |= get_vref_idx(vref_caps, idx) | PIN_IN;
2766 } else 2783 } else
2767 val = snd_hda_get_default_vref(codec, nid); 2784 val = snd_hda_get_default_vref(codec, nid) | PIN_IN;
2768 } 2785 }
2769 snd_hda_set_pin_ctl_cache(codec, nid, val); 2786 snd_hda_set_pin_ctl_cache(codec, nid, val);
2770 call_hp_automute(codec, NULL); 2787 call_hp_automute(codec, NULL);
@@ -2784,9 +2801,6 @@ static int create_hp_mic_jack_mode(struct hda_codec *codec, hda_nid_t pin)
2784 struct hda_gen_spec *spec = codec->spec; 2801 struct hda_gen_spec *spec = codec->spec;
2785 struct snd_kcontrol_new *knew; 2802 struct snd_kcontrol_new *knew;
2786 2803
2787 if (get_out_jack_num_items(codec, pin) <= 1 &&
2788 get_in_jack_num_items(codec, pin) <= 1)
2789 return 0; /* no need */
2790 knew = snd_hda_gen_add_kctl(spec, "Headphone Mic Jack Mode", 2804 knew = snd_hda_gen_add_kctl(spec, "Headphone Mic Jack Mode",
2791 &hp_mic_jack_mode_enum); 2805 &hp_mic_jack_mode_enum);
2792 if (!knew) 2806 if (!knew)
@@ -2815,6 +2829,42 @@ static int add_loopback_list(struct hda_gen_spec *spec, hda_nid_t mix, int idx)
2815 return 0; 2829 return 0;
2816} 2830}
2817 2831
2832/* return true if either a volume or a mute amp is found for the given
2833 * aamix path; the amp has to be either in the mixer node or its direct leaf
2834 */
2835static bool look_for_mix_leaf_ctls(struct hda_codec *codec, hda_nid_t mix_nid,
2836 hda_nid_t pin, unsigned int *mix_val,
2837 unsigned int *mute_val)
2838{
2839 int idx, num_conns;
2840 const hda_nid_t *list;
2841 hda_nid_t nid;
2842
2843 idx = snd_hda_get_conn_index(codec, mix_nid, pin, true);
2844 if (idx < 0)
2845 return false;
2846
2847 *mix_val = *mute_val = 0;
2848 if (nid_has_volume(codec, mix_nid, HDA_INPUT))
2849 *mix_val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT);
2850 if (nid_has_mute(codec, mix_nid, HDA_INPUT))
2851 *mute_val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT);
2852 if (*mix_val && *mute_val)
2853 return true;
2854
2855 /* check leaf node */
2856 num_conns = snd_hda_get_conn_list(codec, mix_nid, &list);
2857 if (num_conns < idx)
2858 return false;
2859 nid = list[idx];
2860 if (!*mix_val && nid_has_volume(codec, nid, HDA_OUTPUT))
2861 *mix_val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT);
2862 if (!*mute_val && nid_has_mute(codec, nid, HDA_OUTPUT))
2863 *mute_val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT);
2864
2865 return *mix_val || *mute_val;
2866}
2867
2818/* create input playback/capture controls for the given pin */ 2868/* create input playback/capture controls for the given pin */
2819static int new_analog_input(struct hda_codec *codec, int input_idx, 2869static int new_analog_input(struct hda_codec *codec, int input_idx,
2820 hda_nid_t pin, const char *ctlname, int ctlidx, 2870 hda_nid_t pin, const char *ctlname, int ctlidx,
@@ -2822,12 +2872,11 @@ static int new_analog_input(struct hda_codec *codec, int input_idx,
2822{ 2872{
2823 struct hda_gen_spec *spec = codec->spec; 2873 struct hda_gen_spec *spec = codec->spec;
2824 struct nid_path *path; 2874 struct nid_path *path;
2825 unsigned int val; 2875 unsigned int mix_val, mute_val;
2826 int err, idx; 2876 int err, idx;
2827 2877
2828 if (!nid_has_volume(codec, mix_nid, HDA_INPUT) && 2878 if (!look_for_mix_leaf_ctls(codec, mix_nid, pin, &mix_val, &mute_val))
2829 !nid_has_mute(codec, mix_nid, HDA_INPUT)) 2879 return 0;
2830 return 0; /* no need for analog loopback */
2831 2880
2832 path = snd_hda_add_new_path(codec, pin, mix_nid, 0); 2881 path = snd_hda_add_new_path(codec, pin, mix_nid, 0);
2833 if (!path) 2882 if (!path)
@@ -2836,20 +2885,18 @@ static int new_analog_input(struct hda_codec *codec, int input_idx,
2836 spec->loopback_paths[input_idx] = snd_hda_get_path_idx(codec, path); 2885 spec->loopback_paths[input_idx] = snd_hda_get_path_idx(codec, path);
2837 2886
2838 idx = path->idx[path->depth - 1]; 2887 idx = path->idx[path->depth - 1];
2839 if (nid_has_volume(codec, mix_nid, HDA_INPUT)) { 2888 if (mix_val) {
2840 val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT); 2889 err = __add_pb_vol_ctrl(spec, HDA_CTL_WIDGET_VOL, ctlname, ctlidx, mix_val);
2841 err = __add_pb_vol_ctrl(spec, HDA_CTL_WIDGET_VOL, ctlname, ctlidx, val);
2842 if (err < 0) 2890 if (err < 0)
2843 return err; 2891 return err;
2844 path->ctls[NID_PATH_VOL_CTL] = val; 2892 path->ctls[NID_PATH_VOL_CTL] = mix_val;
2845 } 2893 }
2846 2894
2847 if (nid_has_mute(codec, mix_nid, HDA_INPUT)) { 2895 if (mute_val) {
2848 val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT); 2896 err = __add_pb_sw_ctrl(spec, HDA_CTL_WIDGET_MUTE, ctlname, ctlidx, mute_val);
2849 err = __add_pb_sw_ctrl(spec, HDA_CTL_WIDGET_MUTE, ctlname, ctlidx, val);
2850 if (err < 0) 2897 if (err < 0)
2851 return err; 2898 return err;
2852 path->ctls[NID_PATH_MUTE_CTL] = val; 2899 path->ctls[NID_PATH_MUTE_CTL] = mute_val;
2853 } 2900 }
2854 2901
2855 path->active = true; 2902 path->active = true;
@@ -4271,6 +4318,26 @@ static unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec,
4271 return AC_PWRST_D3; 4318 return AC_PWRST_D3;
4272} 4319}
4273 4320
4321/* mute all aamix inputs initially; parse up to the first leaves */
4322static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix)
4323{
4324 int i, nums;
4325 const hda_nid_t *conn;
4326 bool has_amp;
4327
4328 nums = snd_hda_get_conn_list(codec, mix, &conn);
4329 has_amp = nid_has_mute(codec, mix, HDA_INPUT);
4330 for (i = 0; i < nums; i++) {
4331 if (has_amp)
4332 snd_hda_codec_amp_stereo(codec, mix,
4333 HDA_INPUT, i,
4334 0xff, HDA_AMP_MUTE);
4335 else if (nid_has_volume(codec, conn[i], HDA_OUTPUT))
4336 snd_hda_codec_amp_stereo(codec, conn[i],
4337 HDA_OUTPUT, 0,
4338 0xff, HDA_AMP_MUTE);
4339 }
4340}
4274 4341
4275/* 4342/*
4276 * Parse the given BIOS configuration and set up the hda_gen_spec 4343 * Parse the given BIOS configuration and set up the hda_gen_spec
@@ -4383,6 +4450,17 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
4383 if (err < 0) 4450 if (err < 0)
4384 return err; 4451 return err;
4385 4452
4453 /* create "Headphone Mic Jack Mode" if no input selection is
4454 * available (or user specifies add_jack_modes hint)
4455 */
4456 if (spec->hp_mic_pin &&
4457 (spec->auto_mic || spec->input_mux.num_items == 1 ||
4458 spec->add_jack_modes)) {
4459 err = create_hp_mic_jack_mode(codec, spec->hp_mic_pin);
4460 if (err < 0)
4461 return err;
4462 }
4463
4386 if (spec->add_jack_modes) { 4464 if (spec->add_jack_modes) {
4387 if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) { 4465 if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
4388 err = create_out_jack_modes(codec, cfg->line_outs, 4466 err = create_out_jack_modes(codec, cfg->line_outs,
@@ -4398,6 +4476,10 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
4398 } 4476 }
4399 } 4477 }
4400 4478
4479 /* mute all aamix input initially */
4480 if (spec->mixer_nid)
4481 mute_all_mixer_nid(codec, spec->mixer_nid);
4482
4401 dig_only: 4483 dig_only:
4402 parse_digital(codec); 4484 parse_digital(codec);
4403 4485
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 7e45cb44d151..0929a06df812 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -249,6 +249,9 @@ struct hda_gen_spec {
249 const struct badness_table *main_out_badness; 249 const struct badness_table *main_out_badness;
250 const struct badness_table *extra_out_badness; 250 const struct badness_table *extra_out_badness;
251 251
252 /* preferred pin/DAC pairs; an array of paired NIDs */
253 const hda_nid_t *preferred_dacs;
254
252 /* loopback mixing mode */ 255 /* loopback mixing mode */
253 bool aamix_mode; 256 bool aamix_mode;
254 257
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 7a09404579a7..956871d8b3d2 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2994,8 +2994,7 @@ static int azx_runtime_suspend(struct device *dev)
2994 STATESTS_INT_MASK); 2994 STATESTS_INT_MASK);
2995 2995
2996 azx_stop_chip(chip); 2996 azx_stop_chip(chip);
2997 if (!chip->bus->avoid_link_reset) 2997 azx_enter_link_reset(chip);
2998 azx_enter_link_reset(chip);
2999 azx_clear_irq_pending(chip); 2998 azx_clear_irq_pending(chip);
3000 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) 2999 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
3001 hda_display_power(false); 3000 hda_display_power(false);
@@ -3434,6 +3433,10 @@ static void check_probe_mask(struct azx *chip, int dev)
3434 * white/black-list for enable_msi 3433 * white/black-list for enable_msi
3435 */ 3434 */
3436static struct snd_pci_quirk msi_black_list[] = { 3435static struct snd_pci_quirk msi_black_list[] = {
3436 SND_PCI_QUIRK(0x103c, 0x2191, "HP", 0), /* AMD Hudson */
3437 SND_PCI_QUIRK(0x103c, 0x2192, "HP", 0), /* AMD Hudson */
3438 SND_PCI_QUIRK(0x103c, 0x21f7, "HP", 0), /* AMD Hudson */
3439 SND_PCI_QUIRK(0x103c, 0x21fa, "HP", 0), /* AMD Hudson */
3437 SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */ 3440 SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
3438 SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */ 3441 SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
3439 SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */ 3442 SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
@@ -3877,7 +3880,8 @@ static int azx_probe(struct pci_dev *pci,
3877 } 3880 }
3878 3881
3879 dev++; 3882 dev++;
3880 complete_all(&chip->probe_wait); 3883 if (chip->disabled)
3884 complete_all(&chip->probe_wait);
3881 return 0; 3885 return 0;
3882 3886
3883out_free: 3887out_free:
@@ -3954,10 +3958,10 @@ static int azx_probe_continue(struct azx *chip)
3954 if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME) || chip->use_vga_switcheroo) 3958 if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME) || chip->use_vga_switcheroo)
3955 pm_runtime_put_noidle(&pci->dev); 3959 pm_runtime_put_noidle(&pci->dev);
3956 3960
3957 return 0;
3958
3959out_free: 3961out_free:
3960 chip->init_failed = 1; 3962 if (err < 0)
3963 chip->init_failed = 1;
3964 complete_all(&chip->probe_wait);
3961 return err; 3965 return err;
3962} 3966}
3963 3967
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 1a83559f4cbd..699262a3e07a 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -147,6 +147,8 @@ static void ad_vmaster_eapd_hook(void *private_data, int enabled)
147 147
148 if (!spec->eapd_nid) 148 if (!spec->eapd_nid)
149 return; 149 return;
150 if (codec->inv_eapd)
151 enabled = !enabled;
150 snd_hda_codec_update_cache(codec, spec->eapd_nid, 0, 152 snd_hda_codec_update_cache(codec, spec->eapd_nid, 0,
151 AC_VERB_SET_EAPD_BTLENABLE, 153 AC_VERB_SET_EAPD_BTLENABLE,
152 enabled ? 0x02 : 0x00); 154 enabled ? 0x02 : 0x00);
@@ -338,6 +340,14 @@ static int patch_ad1986a(struct hda_codec *codec)
338{ 340{
339 int err; 341 int err;
340 struct ad198x_spec *spec; 342 struct ad198x_spec *spec;
343 static hda_nid_t preferred_pairs[] = {
344 0x1a, 0x03,
345 0x1b, 0x03,
346 0x1c, 0x04,
347 0x1d, 0x05,
348 0x1e, 0x03,
349 0
350 };
341 351
342 err = alloc_ad_spec(codec); 352 err = alloc_ad_spec(codec);
343 if (err < 0) 353 if (err < 0)
@@ -358,6 +368,11 @@ static int patch_ad1986a(struct hda_codec *codec)
358 * So, let's disable the shared stream. 368 * So, let's disable the shared stream.
359 */ 369 */
360 spec->gen.multiout.no_share_stream = 1; 370 spec->gen.multiout.no_share_stream = 1;
371 /* give fixed DAC/pin pairs */
372 spec->gen.preferred_dacs = preferred_pairs;
373
374 /* AD1986A can't manage the dynamic pin on/off smoothly */
375 spec->gen.auto_mute_via_amp = 1;
361 376
362 snd_hda_pick_fixup(codec, ad1986a_fixup_models, ad1986a_fixup_tbl, 377 snd_hda_pick_fixup(codec, ad1986a_fixup_models, ad1986a_fixup_tbl,
363 ad1986a_fixups); 378 ad1986a_fixups);
@@ -962,6 +977,7 @@ static void ad1884_fixup_hp_eapd(struct hda_codec *codec,
962 switch (action) { 977 switch (action) {
963 case HDA_FIXUP_ACT_PRE_PROBE: 978 case HDA_FIXUP_ACT_PRE_PROBE:
964 spec->gen.vmaster_mute.hook = ad1884_vmaster_hp_gpio_hook; 979 spec->gen.vmaster_mute.hook = ad1884_vmaster_hp_gpio_hook;
980 spec->gen.own_eapd_ctl = 1;
965 snd_hda_sequence_write_cache(codec, gpio_init_verbs); 981 snd_hda_sequence_write_cache(codec, gpio_init_verbs);
966 break; 982 break;
967 case HDA_FIXUP_ACT_PROBE: 983 case HDA_FIXUP_ACT_PROBE:
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index c205bb1747fd..3fbf2883e06e 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -2936,7 +2936,6 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
2936 SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO), 2936 SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
2937 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), 2937 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
2938 SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD), 2938 SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
2939 SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
2940 SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), 2939 SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
2941 SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS), 2940 SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS),
2942 SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS), 2941 SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS),
@@ -3244,9 +3243,29 @@ enum {
3244#if IS_ENABLED(CONFIG_THINKPAD_ACPI) 3243#if IS_ENABLED(CONFIG_THINKPAD_ACPI)
3245 3244
3246#include <linux/thinkpad_acpi.h> 3245#include <linux/thinkpad_acpi.h>
3246#include <acpi/acpi.h>
3247 3247
3248static int (*led_set_func)(int, bool); 3248static int (*led_set_func)(int, bool);
3249 3249
3250static acpi_status acpi_check_cb(acpi_handle handle, u32 lvl, void *context,
3251 void **rv)
3252{
3253 bool *found = context;
3254 *found = true;
3255 return AE_OK;
3256}
3257
3258static bool is_thinkpad(struct hda_codec *codec)
3259{
3260 bool found = false;
3261 if (codec->subsystem_id >> 16 != 0x17aa)
3262 return false;
3263 if (ACPI_SUCCESS(acpi_get_devices("LEN0068", acpi_check_cb, &found, NULL)) && found)
3264 return true;
3265 found = false;
3266 return ACPI_SUCCESS(acpi_get_devices("IBM0068", acpi_check_cb, &found, NULL)) && found;
3267}
3268
3250static void update_tpacpi_mute_led(void *private_data, int enabled) 3269static void update_tpacpi_mute_led(void *private_data, int enabled)
3251{ 3270{
3252 struct hda_codec *codec = private_data; 3271 struct hda_codec *codec = private_data;
@@ -3279,6 +3298,8 @@ static void cxt_fixup_thinkpad_acpi(struct hda_codec *codec,
3279 bool removefunc = false; 3298 bool removefunc = false;
3280 3299
3281 if (action == HDA_FIXUP_ACT_PROBE) { 3300 if (action == HDA_FIXUP_ACT_PROBE) {
3301 if (!is_thinkpad(codec))
3302 return;
3282 if (!led_set_func) 3303 if (!led_set_func)
3283 led_set_func = symbol_request(tpacpi_led_set); 3304 led_set_func = symbol_request(tpacpi_led_set);
3284 if (!led_set_func) { 3305 if (!led_set_func) {
@@ -3494,6 +3515,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
3494 SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), 3515 SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
3495 SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), 3516 SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
3496 SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), 3517 SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
3518 SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
3497 SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004), 3519 SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
3498 SND_PCI_QUIRK(0x1c06, 0x2012, "Lemote A1205", CXT_PINCFG_LEMOTE_A1205), 3520 SND_PCI_QUIRK(0x1c06, 0x2012, "Lemote A1205", CXT_PINCFG_LEMOTE_A1205),
3499 {} 3521 {}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 08407bed093e..f281c8068557 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1142,32 +1142,34 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
1142 1142
1143static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll); 1143static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
1144 1144
1145static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res) 1145static void jack_callback(struct hda_codec *codec, struct hda_jack_tbl *jack)
1146{ 1146{
1147 struct hdmi_spec *spec = codec->spec; 1147 struct hdmi_spec *spec = codec->spec;
1148 int pin_idx = pin_nid_to_pin_index(spec, jack->nid);
1149 if (pin_idx < 0)
1150 return;
1151
1152 if (hdmi_present_sense(get_pin(spec, pin_idx), 1))
1153 snd_hda_jack_report_sync(codec);
1154}
1155
1156static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
1157{
1148 int tag = res >> AC_UNSOL_RES_TAG_SHIFT; 1158 int tag = res >> AC_UNSOL_RES_TAG_SHIFT;
1149 int pin_nid;
1150 int pin_idx;
1151 struct hda_jack_tbl *jack; 1159 struct hda_jack_tbl *jack;
1152 int dev_entry = (res & AC_UNSOL_RES_DE) >> AC_UNSOL_RES_DE_SHIFT; 1160 int dev_entry = (res & AC_UNSOL_RES_DE) >> AC_UNSOL_RES_DE_SHIFT;
1153 1161
1154 jack = snd_hda_jack_tbl_get_from_tag(codec, tag); 1162 jack = snd_hda_jack_tbl_get_from_tag(codec, tag);
1155 if (!jack) 1163 if (!jack)
1156 return; 1164 return;
1157 pin_nid = jack->nid;
1158 jack->jack_dirty = 1; 1165 jack->jack_dirty = 1;
1159 1166
1160 _snd_printd(SND_PR_VERBOSE, 1167 _snd_printd(SND_PR_VERBOSE,
1161 "HDMI hot plug event: Codec=%d Pin=%d Device=%d Inactive=%d Presence_Detect=%d ELD_Valid=%d\n", 1168 "HDMI hot plug event: Codec=%d Pin=%d Device=%d Inactive=%d Presence_Detect=%d ELD_Valid=%d\n",
1162 codec->addr, pin_nid, dev_entry, !!(res & AC_UNSOL_RES_IA), 1169 codec->addr, jack->nid, dev_entry, !!(res & AC_UNSOL_RES_IA),
1163 !!(res & AC_UNSOL_RES_PD), !!(res & AC_UNSOL_RES_ELDV)); 1170 !!(res & AC_UNSOL_RES_PD), !!(res & AC_UNSOL_RES_ELDV));
1164 1171
1165 pin_idx = pin_nid_to_pin_index(spec, pin_nid); 1172 jack_callback(codec, jack);
1166 if (pin_idx < 0)
1167 return;
1168
1169 if (hdmi_present_sense(get_pin(spec, pin_idx), 1))
1170 snd_hda_jack_report_sync(codec);
1171} 1173}
1172 1174
1173static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res) 1175static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -2095,7 +2097,8 @@ static int generic_hdmi_init(struct hda_codec *codec)
2095 hda_nid_t pin_nid = per_pin->pin_nid; 2097 hda_nid_t pin_nid = per_pin->pin_nid;
2096 2098
2097 hdmi_init_pin(codec, pin_nid); 2099 hdmi_init_pin(codec, pin_nid);
2098 snd_hda_jack_detect_enable(codec, pin_nid, pin_nid); 2100 snd_hda_jack_detect_enable_callback(codec, pin_nid, pin_nid,
2101 codec->jackpoll_interval > 0 ? jack_callback : NULL);
2099 } 2102 }
2100 return 0; 2103 return 0;
2101} 2104}
@@ -2334,8 +2337,9 @@ static int simple_playback_build_controls(struct hda_codec *codec)
2334 int err; 2337 int err;
2335 2338
2336 per_cvt = get_cvt(spec, 0); 2339 per_cvt = get_cvt(spec, 0);
2337 err = snd_hda_create_spdif_out_ctls(codec, per_cvt->cvt_nid, 2340 err = snd_hda_create_dig_out_ctls(codec, per_cvt->cvt_nid,
2338 per_cvt->cvt_nid); 2341 per_cvt->cvt_nid,
2342 HDA_PCM_TYPE_HDMI);
2339 if (err < 0) 2343 if (err < 0)
2340 return err; 2344 return err;
2341 return simple_hdmi_build_jack(codec, 0); 2345 return simple_hdmi_build_jack(codec, 0);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 5e42059f10a1..c5646941539a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1780,8 +1780,11 @@ enum {
1780 ALC889_FIXUP_DAC_ROUTE, 1780 ALC889_FIXUP_DAC_ROUTE,
1781 ALC889_FIXUP_MBP_VREF, 1781 ALC889_FIXUP_MBP_VREF,
1782 ALC889_FIXUP_IMAC91_VREF, 1782 ALC889_FIXUP_IMAC91_VREF,
1783 ALC889_FIXUP_MBA21_VREF,
1783 ALC882_FIXUP_INV_DMIC, 1784 ALC882_FIXUP_INV_DMIC,
1784 ALC882_FIXUP_NO_PRIMARY_HP, 1785 ALC882_FIXUP_NO_PRIMARY_HP,
1786 ALC887_FIXUP_ASUS_BASS,
1787 ALC887_FIXUP_BASS_CHMAP,
1785}; 1788};
1786 1789
1787static void alc889_fixup_coef(struct hda_codec *codec, 1790static void alc889_fixup_coef(struct hda_codec *codec,
@@ -1882,17 +1885,13 @@ static void alc889_fixup_mbp_vref(struct hda_codec *codec,
1882 } 1885 }
1883} 1886}
1884 1887
1885/* Set VREF on speaker pins on imac91 */ 1888static void alc889_fixup_mac_pins(struct hda_codec *codec,
1886static void alc889_fixup_imac91_vref(struct hda_codec *codec, 1889 const hda_nid_t *nids, int num_nids)
1887 const struct hda_fixup *fix, int action)
1888{ 1890{
1889 struct alc_spec *spec = codec->spec; 1891 struct alc_spec *spec = codec->spec;
1890 static hda_nid_t nids[2] = { 0x18, 0x1a };
1891 int i; 1892 int i;
1892 1893
1893 if (action != HDA_FIXUP_ACT_INIT) 1894 for (i = 0; i < num_nids; i++) {
1894 return;
1895 for (i = 0; i < ARRAY_SIZE(nids); i++) {
1896 unsigned int val; 1895 unsigned int val;
1897 val = snd_hda_codec_get_pin_target(codec, nids[i]); 1896 val = snd_hda_codec_get_pin_target(codec, nids[i]);
1898 val |= AC_PINCTL_VREF_50; 1897 val |= AC_PINCTL_VREF_50;
@@ -1901,6 +1900,26 @@ static void alc889_fixup_imac91_vref(struct hda_codec *codec,
1901 spec->gen.keep_vref_in_automute = 1; 1900 spec->gen.keep_vref_in_automute = 1;
1902} 1901}
1903 1902
1903/* Set VREF on speaker pins on imac91 */
1904static void alc889_fixup_imac91_vref(struct hda_codec *codec,
1905 const struct hda_fixup *fix, int action)
1906{
1907 static hda_nid_t nids[2] = { 0x18, 0x1a };
1908
1909 if (action == HDA_FIXUP_ACT_INIT)
1910 alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids));
1911}
1912
1913/* Set VREF on speaker pins on mba21 */
1914static void alc889_fixup_mba21_vref(struct hda_codec *codec,
1915 const struct hda_fixup *fix, int action)
1916{
1917 static hda_nid_t nids[2] = { 0x18, 0x19 };
1918
1919 if (action == HDA_FIXUP_ACT_INIT)
1920 alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids));
1921}
1922
1904/* Don't take HP output as primary 1923/* Don't take HP output as primary
1905 * Strangely, the speaker output doesn't work on Vaio Z and some Vaio 1924 * Strangely, the speaker output doesn't work on Vaio Z and some Vaio
1906 * all-in-one desktop PCs (for example VGC-LN51JGB) through DAC 0x05 1925 * all-in-one desktop PCs (for example VGC-LN51JGB) through DAC 0x05
@@ -1915,6 +1934,9 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
1915 } 1934 }
1916} 1935}
1917 1936
1937static void alc_fixup_bass_chmap(struct hda_codec *codec,
1938 const struct hda_fixup *fix, int action);
1939
1918static const struct hda_fixup alc882_fixups[] = { 1940static const struct hda_fixup alc882_fixups[] = {
1919 [ALC882_FIXUP_ABIT_AW9D_MAX] = { 1941 [ALC882_FIXUP_ABIT_AW9D_MAX] = {
1920 .type = HDA_FIXUP_PINS, 1942 .type = HDA_FIXUP_PINS,
@@ -2097,6 +2119,12 @@ static const struct hda_fixup alc882_fixups[] = {
2097 .chained = true, 2119 .chained = true,
2098 .chain_id = ALC882_FIXUP_GPIO1, 2120 .chain_id = ALC882_FIXUP_GPIO1,
2099 }, 2121 },
2122 [ALC889_FIXUP_MBA21_VREF] = {
2123 .type = HDA_FIXUP_FUNC,
2124 .v.func = alc889_fixup_mba21_vref,
2125 .chained = true,
2126 .chain_id = ALC889_FIXUP_MBP_VREF,
2127 },
2100 [ALC882_FIXUP_INV_DMIC] = { 2128 [ALC882_FIXUP_INV_DMIC] = {
2101 .type = HDA_FIXUP_FUNC, 2129 .type = HDA_FIXUP_FUNC,
2102 .v.func = alc_fixup_inv_dmic_0x12, 2130 .v.func = alc_fixup_inv_dmic_0x12,
@@ -2105,6 +2133,19 @@ static const struct hda_fixup alc882_fixups[] = {
2105 .type = HDA_FIXUP_FUNC, 2133 .type = HDA_FIXUP_FUNC,
2106 .v.func = alc882_fixup_no_primary_hp, 2134 .v.func = alc882_fixup_no_primary_hp,
2107 }, 2135 },
2136 [ALC887_FIXUP_ASUS_BASS] = {
2137 .type = HDA_FIXUP_PINS,
2138 .v.pins = (const struct hda_pintbl[]) {
2139 {0x16, 0x99130130}, /* bass speaker */
2140 {}
2141 },
2142 .chained = true,
2143 .chain_id = ALC887_FIXUP_BASS_CHMAP,
2144 },
2145 [ALC887_FIXUP_BASS_CHMAP] = {
2146 .type = HDA_FIXUP_FUNC,
2147 .v.func = alc_fixup_bass_chmap,
2148 },
2108}; 2149};
2109 2150
2110static const struct snd_pci_quirk alc882_fixup_tbl[] = { 2151static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2138,6 +2179,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2138 SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V), 2179 SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V),
2139 SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC), 2180 SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
2140 SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601), 2181 SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
2182 SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
2141 SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), 2183 SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
2142 SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), 2184 SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
2143 SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), 2185 SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
@@ -2153,7 +2195,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2153 SND_PCI_QUIRK(0x106b, 0x3000, "iMac", ALC889_FIXUP_MBP_VREF), 2195 SND_PCI_QUIRK(0x106b, 0x3000, "iMac", ALC889_FIXUP_MBP_VREF),
2154 SND_PCI_QUIRK(0x106b, 0x3200, "iMac 7,1 Aluminum", ALC882_FIXUP_EAPD), 2196 SND_PCI_QUIRK(0x106b, 0x3200, "iMac 7,1 Aluminum", ALC882_FIXUP_EAPD),
2155 SND_PCI_QUIRK(0x106b, 0x3400, "MacBookAir 1,1", ALC889_FIXUP_MBP_VREF), 2197 SND_PCI_QUIRK(0x106b, 0x3400, "MacBookAir 1,1", ALC889_FIXUP_MBP_VREF),
2156 SND_PCI_QUIRK(0x106b, 0x3500, "MacBookAir 2,1", ALC889_FIXUP_MBP_VREF), 2198 SND_PCI_QUIRK(0x106b, 0x3500, "MacBookAir 2,1", ALC889_FIXUP_MBA21_VREF),
2157 SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889_FIXUP_MBP_VREF), 2199 SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889_FIXUP_MBP_VREF),
2158 SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF), 2200 SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF),
2159 SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_FIXUP_MACPRO_GPIO), 2201 SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_FIXUP_MACPRO_GPIO),
@@ -3268,6 +3310,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
3268 alc_write_coef_idx(codec, 0x18, 0x7388); 3310 alc_write_coef_idx(codec, 0x18, 0x7388);
3269 break; 3311 break;
3270 case 0x10ec0668: 3312 case 0x10ec0668:
3313 alc_write_coef_idx(codec, 0x11, 0x0001);
3271 alc_write_coef_idx(codec, 0x15, 0x0d60); 3314 alc_write_coef_idx(codec, 0x15, 0x0d60);
3272 alc_write_coef_idx(codec, 0xc3, 0x0000); 3315 alc_write_coef_idx(codec, 0xc3, 0x0000);
3273 break; 3316 break;
@@ -3296,6 +3339,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
3296 alc_write_coef_idx(codec, 0x18, 0x7388); 3339 alc_write_coef_idx(codec, 0x18, 0x7388);
3297 break; 3340 break;
3298 case 0x10ec0668: 3341 case 0x10ec0668:
3342 alc_write_coef_idx(codec, 0x11, 0x0001);
3299 alc_write_coef_idx(codec, 0x15, 0x0d50); 3343 alc_write_coef_idx(codec, 0x15, 0x0d50);
3300 alc_write_coef_idx(codec, 0xc3, 0x0000); 3344 alc_write_coef_idx(codec, 0xc3, 0x0000);
3301 break; 3345 break;
@@ -3581,11 +3625,6 @@ static void alc283_hp_automute_hook(struct hda_codec *codec,
3581 vref); 3625 vref);
3582} 3626}
3583 3627
3584static void alc283_chromebook_caps(struct hda_codec *codec)
3585{
3586 snd_hda_override_wcaps(codec, 0x03, 0);
3587}
3588
3589static void alc283_fixup_chromebook(struct hda_codec *codec, 3628static void alc283_fixup_chromebook(struct hda_codec *codec,
3590 const struct hda_fixup *fix, int action) 3629 const struct hda_fixup *fix, int action)
3591{ 3630{
@@ -3594,9 +3633,26 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
3594 3633
3595 switch (action) { 3634 switch (action) {
3596 case HDA_FIXUP_ACT_PRE_PROBE: 3635 case HDA_FIXUP_ACT_PRE_PROBE:
3597 alc283_chromebook_caps(codec); 3636 snd_hda_override_wcaps(codec, 0x03, 0);
3598 /* Disable AA-loopback as it causes white noise */ 3637 /* Disable AA-loopback as it causes white noise */
3599 spec->gen.mixer_nid = 0; 3638 spec->gen.mixer_nid = 0;
3639 break;
3640 case HDA_FIXUP_ACT_INIT:
3641 /* Enable Line1 input control by verb */
3642 val = alc_read_coef_idx(codec, 0x1a);
3643 alc_write_coef_idx(codec, 0x1a, val | (1 << 4));
3644 break;
3645 }
3646}
3647
3648static void alc283_fixup_sense_combo_jack(struct hda_codec *codec,
3649 const struct hda_fixup *fix, int action)
3650{
3651 struct alc_spec *spec = codec->spec;
3652 int val;
3653
3654 switch (action) {
3655 case HDA_FIXUP_ACT_PRE_PROBE:
3600 spec->gen.hp_automute_hook = alc283_hp_automute_hook; 3656 spec->gen.hp_automute_hook = alc283_hp_automute_hook;
3601 break; 3657 break;
3602 case HDA_FIXUP_ACT_INIT: 3658 case HDA_FIXUP_ACT_INIT:
@@ -3604,9 +3660,6 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
3604 /* Set to manual mode */ 3660 /* Set to manual mode */
3605 val = alc_read_coef_idx(codec, 0x06); 3661 val = alc_read_coef_idx(codec, 0x06);
3606 alc_write_coef_idx(codec, 0x06, val & ~0x000c); 3662 alc_write_coef_idx(codec, 0x06, val & ~0x000c);
3607 /* Enable Line1 input control by verb */
3608 val = alc_read_coef_idx(codec, 0x1a);
3609 alc_write_coef_idx(codec, 0x1a, val | (1 << 4));
3610 break; 3663 break;
3611 } 3664 }
3612} 3665}
@@ -3796,11 +3849,14 @@ enum {
3796 ALC269_FIXUP_ASUS_X101, 3849 ALC269_FIXUP_ASUS_X101,
3797 ALC271_FIXUP_AMIC_MIC2, 3850 ALC271_FIXUP_AMIC_MIC2,
3798 ALC271_FIXUP_HP_GATE_MIC_JACK, 3851 ALC271_FIXUP_HP_GATE_MIC_JACK,
3852 ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572,
3799 ALC269_FIXUP_ACER_AC700, 3853 ALC269_FIXUP_ACER_AC700,
3800 ALC269_FIXUP_LIMIT_INT_MIC_BOOST, 3854 ALC269_FIXUP_LIMIT_INT_MIC_BOOST,
3855 ALC269VB_FIXUP_ASUS_ZENBOOK,
3801 ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED, 3856 ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED,
3802 ALC269VB_FIXUP_ORDISSIMO_EVE2, 3857 ALC269VB_FIXUP_ORDISSIMO_EVE2,
3803 ALC283_FIXUP_CHROME_BOOK, 3858 ALC283_FIXUP_CHROME_BOOK,
3859 ALC283_FIXUP_SENSE_COMBO_JACK,
3804 ALC282_FIXUP_ASUS_TX300, 3860 ALC282_FIXUP_ASUS_TX300,
3805 ALC283_FIXUP_INT_MIC, 3861 ALC283_FIXUP_INT_MIC,
3806 ALC290_FIXUP_MONO_SPEAKERS, 3862 ALC290_FIXUP_MONO_SPEAKERS,
@@ -4056,6 +4112,12 @@ static const struct hda_fixup alc269_fixups[] = {
4056 .chained = true, 4112 .chained = true,
4057 .chain_id = ALC271_FIXUP_AMIC_MIC2, 4113 .chain_id = ALC271_FIXUP_AMIC_MIC2,
4058 }, 4114 },
4115 [ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572] = {
4116 .type = HDA_FIXUP_FUNC,
4117 .v.func = alc269_fixup_limit_int_mic_boost,
4118 .chained = true,
4119 .chain_id = ALC271_FIXUP_HP_GATE_MIC_JACK,
4120 },
4059 [ALC269_FIXUP_ACER_AC700] = { 4121 [ALC269_FIXUP_ACER_AC700] = {
4060 .type = HDA_FIXUP_PINS, 4122 .type = HDA_FIXUP_PINS,
4061 .v.pins = (const struct hda_pintbl[]) { 4123 .v.pins = (const struct hda_pintbl[]) {
@@ -4075,6 +4137,12 @@ static const struct hda_fixup alc269_fixups[] = {
4075 .chained = true, 4137 .chained = true,
4076 .chain_id = ALC269_FIXUP_THINKPAD_ACPI, 4138 .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
4077 }, 4139 },
4140 [ALC269VB_FIXUP_ASUS_ZENBOOK] = {
4141 .type = HDA_FIXUP_FUNC,
4142 .v.func = alc269_fixup_limit_int_mic_boost,
4143 .chained = true,
4144 .chain_id = ALC269VB_FIXUP_DMIC,
4145 },
4078 [ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED] = { 4146 [ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED] = {
4079 .type = HDA_FIXUP_FUNC, 4147 .type = HDA_FIXUP_FUNC,
4080 .v.func = alc269_fixup_limit_int_mic_boost, 4148 .v.func = alc269_fixup_limit_int_mic_boost,
@@ -4094,6 +4162,12 @@ static const struct hda_fixup alc269_fixups[] = {
4094 .type = HDA_FIXUP_FUNC, 4162 .type = HDA_FIXUP_FUNC,
4095 .v.func = alc283_fixup_chromebook, 4163 .v.func = alc283_fixup_chromebook,
4096 }, 4164 },
4165 [ALC283_FIXUP_SENSE_COMBO_JACK] = {
4166 .type = HDA_FIXUP_FUNC,
4167 .v.func = alc283_fixup_sense_combo_jack,
4168 .chained = true,
4169 .chain_id = ALC283_FIXUP_CHROME_BOOK,
4170 },
4097 [ALC282_FIXUP_ASUS_TX300] = { 4171 [ALC282_FIXUP_ASUS_TX300] = {
4098 .type = HDA_FIXUP_FUNC, 4172 .type = HDA_FIXUP_FUNC,
4099 .v.func = alc282_fixup_asus_tx300, 4173 .v.func = alc282_fixup_asus_tx300,
@@ -4141,6 +4215,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4141 SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK), 4215 SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
4142 SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), 4216 SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
4143 SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC), 4217 SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
4218 SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
4144 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), 4219 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
4145 SND_PCI_QUIRK(0x1028, 0x05bd, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4220 SND_PCI_QUIRK(0x1028, 0x05bd, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
4146 SND_PCI_QUIRK(0x1028, 0x05be, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4221 SND_PCI_QUIRK(0x1028, 0x05be, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
@@ -4172,11 +4247,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4172 SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4247 SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4173 SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4248 SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4174 SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4249 SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4250 SND_PCI_QUIRK(0x1028, 0x0610, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4175 SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4251 SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4176 SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4252 SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4177 SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS), 4253 SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
4178 SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4254 SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4255 SND_PCI_QUIRK(0x1028, 0x0629, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4256 SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS),
4257 SND_PCI_QUIRK(0x1028, 0x063e, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4179 SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4258 SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4259 SND_PCI_QUIRK(0x1028, 0x0640, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4180 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4260 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
4181 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4261 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
4182 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 4262 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -4184,13 +4264,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4184 SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4264 SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4185 SND_PCI_QUIRK(0x103c, 0x1983, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4265 SND_PCI_QUIRK(0x103c, 0x1983, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4186 SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED), 4266 SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
4187 SND_PCI_QUIRK(0x103c, 0x21ed, "HP Falco Chromebook", ALC283_FIXUP_CHROME_BOOK),
4188 SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED), 4267 SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED),
4189 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 4268 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
4190 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4269 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4191 SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4270 SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4192 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC), 4271 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
4193 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_DMIC), 4272 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK),
4194 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), 4273 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
4195 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), 4274 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
4196 SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), 4275 SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
@@ -4292,6 +4371,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
4292 {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, 4371 {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
4293 {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, 4372 {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
4294 {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"}, 4373 {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
4374 {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-chrome"},
4375 {.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
4295 {} 4376 {}
4296}; 4377};
4297 4378
@@ -4467,6 +4548,7 @@ enum {
4467 ALC861_FIXUP_AMP_VREF_0F, 4548 ALC861_FIXUP_AMP_VREF_0F,
4468 ALC861_FIXUP_NO_JACK_DETECT, 4549 ALC861_FIXUP_NO_JACK_DETECT,
4469 ALC861_FIXUP_ASUS_A6RP, 4550 ALC861_FIXUP_ASUS_A6RP,
4551 ALC660_FIXUP_ASUS_W7J,
4470}; 4552};
4471 4553
4472/* On some laptops, VREF of pin 0x0f is abused for controlling the main amp */ 4554/* On some laptops, VREF of pin 0x0f is abused for controlling the main amp */
@@ -4516,10 +4598,22 @@ static const struct hda_fixup alc861_fixups[] = {
4516 .v.func = alc861_fixup_asus_amp_vref_0f, 4598 .v.func = alc861_fixup_asus_amp_vref_0f,
4517 .chained = true, 4599 .chained = true,
4518 .chain_id = ALC861_FIXUP_NO_JACK_DETECT, 4600 .chain_id = ALC861_FIXUP_NO_JACK_DETECT,
4601 },
4602 [ALC660_FIXUP_ASUS_W7J] = {
4603 .type = HDA_FIXUP_VERBS,
4604 .v.verbs = (const struct hda_verb[]) {
4605 /* ASUS W7J needs a magic pin setup on unused NID 0x10
4606 * for enabling outputs
4607 */
4608 {0x10, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24},
4609 { }
4610 },
4519 } 4611 }
4520}; 4612};
4521 4613
4522static const struct snd_pci_quirk alc861_fixup_tbl[] = { 4614static const struct snd_pci_quirk alc861_fixup_tbl[] = {
4615 SND_PCI_QUIRK(0x1043, 0x1253, "ASUS W7J", ALC660_FIXUP_ASUS_W7J),
4616 SND_PCI_QUIRK(0x1043, 0x1263, "ASUS Z35HL", ALC660_FIXUP_ASUS_W7J),
4523 SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP), 4617 SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP),
4524 SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F), 4618 SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F),
4525 SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT), 4619 SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT),
@@ -4715,7 +4809,7 @@ static const struct snd_pcm_chmap_elem asus_pcm_2_1_chmaps[] = {
4715}; 4809};
4716 4810
4717/* override the 2.1 chmap */ 4811/* override the 2.1 chmap */
4718static void alc662_fixup_bass_chmap(struct hda_codec *codec, 4812static void alc_fixup_bass_chmap(struct hda_codec *codec,
4719 const struct hda_fixup *fix, int action) 4813 const struct hda_fixup *fix, int action)
4720{ 4814{
4721 if (action == HDA_FIXUP_ACT_BUILD) { 4815 if (action == HDA_FIXUP_ACT_BUILD) {
@@ -4923,7 +5017,7 @@ static const struct hda_fixup alc662_fixups[] = {
4923 }, 5017 },
4924 [ALC662_FIXUP_BASS_CHMAP] = { 5018 [ALC662_FIXUP_BASS_CHMAP] = {
4925 .type = HDA_FIXUP_FUNC, 5019 .type = HDA_FIXUP_FUNC,
4926 .v.func = alc662_fixup_bass_chmap, 5020 .v.func = alc_fixup_bass_chmap,
4927 .chained = true, 5021 .chained = true,
4928 .chain_id = ALC662_FIXUP_ASUS_MODE4 5022 .chain_id = ALC662_FIXUP_ASUS_MODE4
4929 }, 5023 },
@@ -4936,7 +5030,7 @@ static const struct hda_fixup alc662_fixups[] = {
4936 }, 5030 },
4937 [ALC662_FIXUP_BASS_1A_CHMAP] = { 5031 [ALC662_FIXUP_BASS_1A_CHMAP] = {
4938 .type = HDA_FIXUP_FUNC, 5032 .type = HDA_FIXUP_FUNC,
4939 .v.func = alc662_fixup_bass_chmap, 5033 .v.func = alc_fixup_bass_chmap,
4940 .chained = true, 5034 .chained = true,
4941 .chain_id = ALC662_FIXUP_BASS_1A, 5035 .chain_id = ALC662_FIXUP_BASS_1A,
4942 }, 5036 },
@@ -4952,8 +5046,11 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
4952 SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), 5046 SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
4953 SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 5047 SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
4954 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 5048 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5049 SND_PCI_QUIRK(0x1028, 0x0623, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5050 SND_PCI_QUIRK(0x1028, 0x0624, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
4955 SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 5051 SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
4956 SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 5052 SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5053 SND_PCI_QUIRK(0x1028, 0x0628, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
4957 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), 5054 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
4958 SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP), 5055 SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP),
4959 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP), 5056 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP),
@@ -5118,6 +5215,7 @@ static int patch_alc662(struct hda_codec *codec)
5118 case 0x10ec0272: 5215 case 0x10ec0272:
5119 case 0x10ec0663: 5216 case 0x10ec0663:
5120 case 0x10ec0665: 5217 case 0x10ec0665:
5218 case 0x10ec0668:
5121 set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT); 5219 set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
5122 break; 5220 break;
5123 case 0x10ec0273: 5221 case 0x10ec0273:
@@ -5175,6 +5273,7 @@ static int patch_alc680(struct hda_codec *codec)
5175 */ 5273 */
5176static const struct hda_codec_preset snd_hda_preset_realtek[] = { 5274static const struct hda_codec_preset snd_hda_preset_realtek[] = {
5177 { .id = 0x10ec0221, .name = "ALC221", .patch = patch_alc269 }, 5275 { .id = 0x10ec0221, .name = "ALC221", .patch = patch_alc269 },
5276 { .id = 0x10ec0231, .name = "ALC231", .patch = patch_alc269 },
5178 { .id = 0x10ec0233, .name = "ALC233", .patch = patch_alc269 }, 5277 { .id = 0x10ec0233, .name = "ALC233", .patch = patch_alc269 },
5179 { .id = 0x10ec0255, .name = "ALC255", .patch = patch_alc269 }, 5278 { .id = 0x10ec0255, .name = "ALC255", .patch = patch_alc269 },
5180 { .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 }, 5279 { .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 },
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index d2cc0041d9d3..088a5afbd1b9 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -2094,7 +2094,8 @@ static void stac92hd83xxx_fixup_hp_mic_led(struct hda_codec *codec,
2094 2094
2095 if (action == HDA_FIXUP_ACT_PRE_PROBE) { 2095 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
2096 spec->mic_mute_led_gpio = 0x08; /* GPIO3 */ 2096 spec->mic_mute_led_gpio = 0x08; /* GPIO3 */
2097 codec->bus->avoid_link_reset = 1; 2097 /* resetting controller clears GPIO, so we need to keep on */
2098 codec->bus->power_keep_link_on = 1;
2098 } 2099 }
2099} 2100}
2100 2101
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 8697cedccd21..1ead3c977a51 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -648,7 +648,7 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream,
648 648
649 dma_params = ssc_p->dma_params[dir]; 649 dma_params = ssc_p->dma_params[dir];
650 650
651 ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable); 651 ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable);
652 ssc_writel(ssc_p->ssc->regs, IDR, dma_params->mask->ssc_error); 652 ssc_writel(ssc_p->ssc->regs, IDR, dma_params->mask->ssc_error);
653 653
654 pr_debug("%s enabled SSC_SR=0x%08x\n", 654 pr_debug("%s enabled SSC_SR=0x%08x\n",
@@ -657,6 +657,33 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream,
657 return 0; 657 return 0;
658} 658}
659 659
660static int atmel_ssc_trigger(struct snd_pcm_substream *substream,
661 int cmd, struct snd_soc_dai *dai)
662{
663 struct atmel_ssc_info *ssc_p = &ssc_info[dai->id];
664 struct atmel_pcm_dma_params *dma_params;
665 int dir;
666
667 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
668 dir = 0;
669 else
670 dir = 1;
671
672 dma_params = ssc_p->dma_params[dir];
673
674 switch (cmd) {
675 case SNDRV_PCM_TRIGGER_START:
676 case SNDRV_PCM_TRIGGER_RESUME:
677 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
678 ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable);
679 break;
680 default:
681 ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable);
682 break;
683 }
684
685 return 0;
686}
660 687
661#ifdef CONFIG_PM 688#ifdef CONFIG_PM
662static int atmel_ssc_suspend(struct snd_soc_dai *cpu_dai) 689static int atmel_ssc_suspend(struct snd_soc_dai *cpu_dai)
@@ -731,6 +758,7 @@ static const struct snd_soc_dai_ops atmel_ssc_dai_ops = {
731 .startup = atmel_ssc_startup, 758 .startup = atmel_ssc_startup,
732 .shutdown = atmel_ssc_shutdown, 759 .shutdown = atmel_ssc_shutdown,
733 .prepare = atmel_ssc_prepare, 760 .prepare = atmel_ssc_prepare,
761 .trigger = atmel_ssc_trigger,
734 .hw_params = atmel_ssc_hw_params, 762 .hw_params = atmel_ssc_hw_params,
735 .set_fmt = atmel_ssc_set_dai_fmt, 763 .set_fmt = atmel_ssc_set_dai_fmt,
736 .set_clkdiv = atmel_ssc_set_dai_clkdiv, 764 .set_clkdiv = atmel_ssc_set_dai_clkdiv,
diff --git a/sound/soc/atmel/sam9x5_wm8731.c b/sound/soc/atmel/sam9x5_wm8731.c
index 992ae38d5a15..7d6a9055874b 100644
--- a/sound/soc/atmel/sam9x5_wm8731.c
+++ b/sound/soc/atmel/sam9x5_wm8731.c
@@ -97,6 +97,8 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev)
97 goto out; 97 goto out;
98 } 98 }
99 99
100 snd_soc_card_set_drvdata(card, priv);
101
100 card->dev = &pdev->dev; 102 card->dev = &pdev->dev;
101 card->owner = THIS_MODULE; 103 card->owner = THIS_MODULE;
102 card->dai_link = dai; 104 card->dai_link = dai;
@@ -107,7 +109,7 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev)
107 dai->stream_name = "WM8731 PCM"; 109 dai->stream_name = "WM8731 PCM";
108 dai->codec_dai_name = "wm8731-hifi"; 110 dai->codec_dai_name = "wm8731-hifi";
109 dai->init = sam9x5_wm8731_init; 111 dai->init = sam9x5_wm8731_init;
110 dai->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF 112 dai->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF
111 | SND_SOC_DAIFMT_CBM_CFM; 113 | SND_SOC_DAIFMT_CBM_CFM;
112 114
113 ret = snd_soc_of_parse_card_name(card, "atmel,model"); 115 ret = snd_soc_of_parse_card_name(card, "atmel,model");
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index c3c7396a6181..0ab2dc296474 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -248,19 +248,6 @@ ARIZONA_MIXER_CONTROLS("SPKDAT1R", ARIZONA_OUT5RMIX_INPUT_1_SOURCE),
248ARIZONA_MIXER_CONTROLS("SPKDAT2L", ARIZONA_OUT6LMIX_INPUT_1_SOURCE), 248ARIZONA_MIXER_CONTROLS("SPKDAT2L", ARIZONA_OUT6LMIX_INPUT_1_SOURCE),
249ARIZONA_MIXER_CONTROLS("SPKDAT2R", ARIZONA_OUT6RMIX_INPUT_1_SOURCE), 249ARIZONA_MIXER_CONTROLS("SPKDAT2R", ARIZONA_OUT6RMIX_INPUT_1_SOURCE),
250 250
251SOC_SINGLE("HPOUT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_1L,
252 ARIZONA_OUT1_OSR_SHIFT, 1, 0),
253SOC_SINGLE("HPOUT2 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_2L,
254 ARIZONA_OUT2_OSR_SHIFT, 1, 0),
255SOC_SINGLE("HPOUT3 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_3L,
256 ARIZONA_OUT3_OSR_SHIFT, 1, 0),
257SOC_SINGLE("Speaker High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_4L,
258 ARIZONA_OUT4_OSR_SHIFT, 1, 0),
259SOC_SINGLE("SPKDAT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_5L,
260 ARIZONA_OUT5_OSR_SHIFT, 1, 0),
261SOC_SINGLE("SPKDAT2 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_6L,
262 ARIZONA_OUT6_OSR_SHIFT, 1, 0),
263
264SOC_DOUBLE_R("HPOUT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_1L, 251SOC_DOUBLE_R("HPOUT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_1L,
265 ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_MUTE_SHIFT, 1, 1), 252 ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_MUTE_SHIFT, 1, 1),
266SOC_DOUBLE_R("HPOUT2 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_2L, 253SOC_DOUBLE_R("HPOUT2 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_2L,
@@ -293,18 +280,6 @@ SOC_DOUBLE_R_TLV("SPKDAT2 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_6L,
293 ARIZONA_DAC_DIGITAL_VOLUME_6R, ARIZONA_OUT6L_VOL_SHIFT, 280 ARIZONA_DAC_DIGITAL_VOLUME_6R, ARIZONA_OUT6L_VOL_SHIFT,
294 0xbf, 0, digital_tlv), 281 0xbf, 0, digital_tlv),
295 282
296SOC_DOUBLE_R_RANGE_TLV("HPOUT1 Volume", ARIZONA_OUTPUT_PATH_CONFIG_1L,
297 ARIZONA_OUTPUT_PATH_CONFIG_1R,
298 ARIZONA_OUT1L_PGA_VOL_SHIFT,
299 0x34, 0x40, 0, ana_tlv),
300SOC_DOUBLE_R_RANGE_TLV("HPOUT2 Volume", ARIZONA_OUTPUT_PATH_CONFIG_2L,
301 ARIZONA_OUTPUT_PATH_CONFIG_2R,
302 ARIZONA_OUT2L_PGA_VOL_SHIFT,
303 0x34, 0x40, 0, ana_tlv),
304SOC_DOUBLE_R_RANGE_TLV("HPOUT3 Volume", ARIZONA_OUTPUT_PATH_CONFIG_3L,
305 ARIZONA_OUTPUT_PATH_CONFIG_3R,
306 ARIZONA_OUT3L_PGA_VOL_SHIFT, 0x34, 0x40, 0, ana_tlv),
307
308SOC_DOUBLE("SPKDAT1 Switch", ARIZONA_PDM_SPK1_CTRL_1, ARIZONA_SPK1L_MUTE_SHIFT, 283SOC_DOUBLE("SPKDAT1 Switch", ARIZONA_PDM_SPK1_CTRL_1, ARIZONA_SPK1L_MUTE_SHIFT,
309 ARIZONA_SPK1R_MUTE_SHIFT, 1, 1), 284 ARIZONA_SPK1R_MUTE_SHIFT, 1, 1),
310SOC_DOUBLE("SPKDAT2 Switch", ARIZONA_PDM_SPK2_CTRL_1, ARIZONA_SPK2L_MUTE_SHIFT, 285SOC_DOUBLE("SPKDAT2 Switch", ARIZONA_PDM_SPK2_CTRL_1, ARIZONA_SPK2L_MUTE_SHIFT,
@@ -1037,7 +1012,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
1037 { "AEC Loopback", "HPOUT3L", "OUT3L" }, 1012 { "AEC Loopback", "HPOUT3L", "OUT3L" },
1038 { "AEC Loopback", "HPOUT3R", "OUT3R" }, 1013 { "AEC Loopback", "HPOUT3R", "OUT3R" },
1039 { "HPOUT3L", NULL, "OUT3L" }, 1014 { "HPOUT3L", NULL, "OUT3L" },
1040 { "HPOUT3R", NULL, "OUT3L" }, 1015 { "HPOUT3R", NULL, "OUT3R" },
1041 1016
1042 { "AEC Loopback", "SPKOUTL", "OUT4L" }, 1017 { "AEC Loopback", "SPKOUTL", "OUT4L" },
1043 { "SPKOUTLN", NULL, "OUT4L" }, 1018 { "SPKOUTLN", NULL, "OUT4L" },
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 456bb8c6d759..bc7472c968e3 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -447,10 +447,10 @@ static int wm8731_set_dai_fmt(struct snd_soc_dai *codec_dai,
447 iface |= 0x0001; 447 iface |= 0x0001;
448 break; 448 break;
449 case SND_SOC_DAIFMT_DSP_A: 449 case SND_SOC_DAIFMT_DSP_A:
450 iface |= 0x0003; 450 iface |= 0x0013;
451 break; 451 break;
452 case SND_SOC_DAIFMT_DSP_B: 452 case SND_SOC_DAIFMT_DSP_B:
453 iface |= 0x0013; 453 iface |= 0x0003;
454 break; 454 break;
455 default: 455 default:
456 return -EINVAL; 456 return -EINVAL;
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 3938fb1c203e..53bbfac6a83a 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -1444,7 +1444,7 @@ static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
1444 1444
1445 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 1445 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
1446 case SND_SOC_DAIFMT_DSP_B: 1446 case SND_SOC_DAIFMT_DSP_B:
1447 aif1 |= WM8904_AIF_LRCLK_INV; 1447 aif1 |= 0x3 | WM8904_AIF_LRCLK_INV;
1448 case SND_SOC_DAIFMT_DSP_A: 1448 case SND_SOC_DAIFMT_DSP_A:
1449 aif1 |= 0x3; 1449 aif1 |= 0x3;
1450 break; 1450 break;
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 543c5c2631b6..0f17ed3e29f4 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2439,7 +2439,20 @@ static void wm8962_configure_bclk(struct snd_soc_codec *codec)
2439 snd_soc_update_bits(codec, WM8962_CLOCKING_4, 2439 snd_soc_update_bits(codec, WM8962_CLOCKING_4,
2440 WM8962_SYSCLK_RATE_MASK, clocking4); 2440 WM8962_SYSCLK_RATE_MASK, clocking4);
2441 2441
2442 /* DSPCLK_DIV can be only generated correctly after enabling SYSCLK.
2443 * So we here provisionally enable it and then disable it afterward
2444 * if current bias_level hasn't reached SND_SOC_BIAS_ON.
2445 */
2446 if (codec->dapm.bias_level != SND_SOC_BIAS_ON)
2447 snd_soc_update_bits(codec, WM8962_CLOCKING2,
2448 WM8962_SYSCLK_ENA_MASK, WM8962_SYSCLK_ENA);
2449
2442 dspclk = snd_soc_read(codec, WM8962_CLOCKING1); 2450 dspclk = snd_soc_read(codec, WM8962_CLOCKING1);
2451
2452 if (codec->dapm.bias_level != SND_SOC_BIAS_ON)
2453 snd_soc_update_bits(codec, WM8962_CLOCKING2,
2454 WM8962_SYSCLK_ENA_MASK, 0);
2455
2443 if (dspclk < 0) { 2456 if (dspclk < 0) {
2444 dev_err(codec->dev, "Failed to read DSPCLK: %d\n", dspclk); 2457 dev_err(codec->dev, "Failed to read DSPCLK: %d\n", dspclk);
2445 return; 2458 return;
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 253c88bb7a4c..4f05fb88bddf 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -1259,6 +1259,8 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
1259 1259
1260 /* disable POBCTRL, SOFT_ST and BUFDCOPEN */ 1260 /* disable POBCTRL, SOFT_ST and BUFDCOPEN */
1261 snd_soc_write(codec, WM8990_ANTIPOP2, 0x0); 1261 snd_soc_write(codec, WM8990_ANTIPOP2, 0x0);
1262
1263 codec->cache_sync = 1;
1262 break; 1264 break;
1263 } 1265 }
1264 1266
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 46ec0e9744d4..4fbcab63e61f 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -1474,13 +1474,17 @@ static int wm_adsp2_ena(struct wm_adsp *dsp)
1474 return ret; 1474 return ret;
1475 1475
1476 /* Wait for the RAM to start, should be near instantaneous */ 1476 /* Wait for the RAM to start, should be near instantaneous */
1477 count = 0; 1477 for (count = 0; count < 10; ++count) {
1478 do {
1479 ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1, 1478 ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1,
1480 &val); 1479 &val);
1481 if (ret != 0) 1480 if (ret != 0)
1482 return ret; 1481 return ret;
1483 } while (!(val & ADSP2_RAM_RDY) && ++count < 10); 1482
1483 if (val & ADSP2_RAM_RDY)
1484 break;
1485
1486 msleep(1);
1487 }
1484 1488
1485 if (!(val & ADSP2_RAM_RDY)) { 1489 if (!(val & ADSP2_RAM_RDY)) {
1486 adsp_err(dsp, "Failed to start DSP RAM\n"); 1490 adsp_err(dsp, "Failed to start DSP RAM\n");
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
index 61e48852b9e8..3fd76bc391de 100644
--- a/sound/soc/fsl/imx-wm8962.c
+++ b/sound/soc/fsl/imx-wm8962.c
@@ -130,8 +130,6 @@ static int imx_wm8962_set_bias_level(struct snd_soc_card *card,
130 break; 130 break;
131 } 131 }
132 132
133 dapm->bias_level = level;
134
135 return 0; 133 return 0;
136} 134}
137 135
diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c
index eb4373840bb6..3665f612819d 100644
--- a/sound/soc/fsl/pcm030-audio-fabric.c
+++ b/sound/soc/fsl/pcm030-audio-fabric.c
@@ -69,7 +69,6 @@ static int pcm030_fabric_probe(struct platform_device *op)
69 return -ENOMEM; 69 return -ENOMEM;
70 70
71 card->dev = &op->dev; 71 card->dev = &op->dev;
72 platform_set_drvdata(op, pdata);
73 72
74 pdata->card = card; 73 pdata->card = card;
75 74
@@ -98,6 +97,8 @@ static int pcm030_fabric_probe(struct platform_device *op)
98 if (ret) 97 if (ret)
99 dev_err(&op->dev, "snd_soc_register_card() failed: %d\n", ret); 98 dev_err(&op->dev, "snd_soc_register_card() failed: %d\n", ret);
100 99
100 platform_set_drvdata(op, pdata);
101
101 return ret; 102 return ret;
102} 103}
103 104
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index d34d91743e3f..3920a5e8125f 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -33,6 +33,10 @@
33 SNDRV_PCM_FMTBIT_S24_LE | \ 33 SNDRV_PCM_FMTBIT_S24_LE | \
34 SNDRV_PCM_FMTBIT_S32_LE) 34 SNDRV_PCM_FMTBIT_S32_LE)
35 35
36#define KIRKWOOD_SPDIF_FORMATS \
37 (SNDRV_PCM_FMTBIT_S16_LE | \
38 SNDRV_PCM_FMTBIT_S24_LE)
39
36static int kirkwood_i2s_set_fmt(struct snd_soc_dai *cpu_dai, 40static int kirkwood_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
37 unsigned int fmt) 41 unsigned int fmt)
38{ 42{
@@ -244,15 +248,15 @@ static int kirkwood_i2s_play_trigger(struct snd_pcm_substream *substream,
244 ctl); 248 ctl);
245 } 249 }
246 250
247 if (dai->id == 0)
248 ctl &= ~KIRKWOOD_PLAYCTL_SPDIF_EN; /* i2s */
249 else
250 ctl &= ~KIRKWOOD_PLAYCTL_I2S_EN; /* spdif */
251
252 switch (cmd) { 251 switch (cmd) {
253 case SNDRV_PCM_TRIGGER_START: 252 case SNDRV_PCM_TRIGGER_START:
254 /* configure */ 253 /* configure */
255 ctl = priv->ctl_play; 254 ctl = priv->ctl_play;
255 if (dai->id == 0)
256 ctl &= ~KIRKWOOD_PLAYCTL_SPDIF_EN; /* i2s */
257 else
258 ctl &= ~KIRKWOOD_PLAYCTL_I2S_EN; /* spdif */
259
256 value = ctl & ~KIRKWOOD_PLAYCTL_ENABLE_MASK; 260 value = ctl & ~KIRKWOOD_PLAYCTL_ENABLE_MASK;
257 writel(value, priv->io + KIRKWOOD_PLAYCTL); 261 writel(value, priv->io + KIRKWOOD_PLAYCTL);
258 262
@@ -449,14 +453,14 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai[2] = {
449 .channels_max = 2, 453 .channels_max = 2,
450 .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | 454 .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
451 SNDRV_PCM_RATE_96000, 455 SNDRV_PCM_RATE_96000,
452 .formats = KIRKWOOD_I2S_FORMATS, 456 .formats = KIRKWOOD_SPDIF_FORMATS,
453 }, 457 },
454 .capture = { 458 .capture = {
455 .channels_min = 1, 459 .channels_min = 1,
456 .channels_max = 2, 460 .channels_max = 2,
457 .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | 461 .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
458 SNDRV_PCM_RATE_96000, 462 SNDRV_PCM_RATE_96000,
459 .formats = KIRKWOOD_I2S_FORMATS, 463 .formats = KIRKWOOD_SPDIF_FORMATS,
460 }, 464 },
461 .ops = &kirkwood_i2s_dai_ops, 465 .ops = &kirkwood_i2s_dai_ops,
462 }, 466 },
@@ -469,17 +473,17 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = {
469 .playback = { 473 .playback = {
470 .channels_min = 1, 474 .channels_min = 1,
471 .channels_max = 2, 475 .channels_max = 2,
472 .rates = SNDRV_PCM_RATE_8000_192000 | 476 .rates = SNDRV_PCM_RATE_CONTINUOUS,
473 SNDRV_PCM_RATE_CONTINUOUS | 477 .rate_min = 5512,
474 SNDRV_PCM_RATE_KNOT, 478 .rate_max = 192000,
475 .formats = KIRKWOOD_I2S_FORMATS, 479 .formats = KIRKWOOD_I2S_FORMATS,
476 }, 480 },
477 .capture = { 481 .capture = {
478 .channels_min = 1, 482 .channels_min = 1,
479 .channels_max = 2, 483 .channels_max = 2,
480 .rates = SNDRV_PCM_RATE_8000_192000 | 484 .rates = SNDRV_PCM_RATE_CONTINUOUS,
481 SNDRV_PCM_RATE_CONTINUOUS | 485 .rate_min = 5512,
482 SNDRV_PCM_RATE_KNOT, 486 .rate_max = 192000,
483 .formats = KIRKWOOD_I2S_FORMATS, 487 .formats = KIRKWOOD_I2S_FORMATS,
484 }, 488 },
485 .ops = &kirkwood_i2s_dai_ops, 489 .ops = &kirkwood_i2s_dai_ops,
@@ -490,18 +494,18 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = {
490 .playback = { 494 .playback = {
491 .channels_min = 1, 495 .channels_min = 1,
492 .channels_max = 2, 496 .channels_max = 2,
493 .rates = SNDRV_PCM_RATE_8000_192000 | 497 .rates = SNDRV_PCM_RATE_CONTINUOUS,
494 SNDRV_PCM_RATE_CONTINUOUS | 498 .rate_min = 5512,
495 SNDRV_PCM_RATE_KNOT, 499 .rate_max = 192000,
496 .formats = KIRKWOOD_I2S_FORMATS, 500 .formats = KIRKWOOD_SPDIF_FORMATS,
497 }, 501 },
498 .capture = { 502 .capture = {
499 .channels_min = 1, 503 .channels_min = 1,
500 .channels_max = 2, 504 .channels_max = 2,
501 .rates = SNDRV_PCM_RATE_8000_192000 | 505 .rates = SNDRV_PCM_RATE_CONTINUOUS,
502 SNDRV_PCM_RATE_CONTINUOUS | 506 .rate_min = 5512,
503 SNDRV_PCM_RATE_KNOT, 507 .rate_max = 192000,
504 .formats = KIRKWOOD_I2S_FORMATS, 508 .formats = KIRKWOOD_SPDIF_FORMATS,
505 }, 509 },
506 .ops = &kirkwood_i2s_dai_ops, 510 .ops = &kirkwood_i2s_dai_ops,
507 }, 511 },
diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c
index 6d216cb6c19b..3fde9e402710 100644
--- a/sound/soc/omap/n810.c
+++ b/sound/soc/omap/n810.c
@@ -100,12 +100,12 @@ static int n810_startup(struct snd_pcm_substream *substream)
100 SNDRV_PCM_HW_PARAM_CHANNELS, 2, 2); 100 SNDRV_PCM_HW_PARAM_CHANNELS, 2, 2);
101 101
102 n810_ext_control(&codec->dapm); 102 n810_ext_control(&codec->dapm);
103 return clk_enable(sys_clkout2); 103 return clk_prepare_enable(sys_clkout2);
104} 104}
105 105
106static void n810_shutdown(struct snd_pcm_substream *substream) 106static void n810_shutdown(struct snd_pcm_substream *substream)
107{ 107{
108 clk_disable(sys_clkout2); 108 clk_disable_unprepare(sys_clkout2);
109} 109}
110 110
111static int n810_hw_params(struct snd_pcm_substream *substream, 111static int n810_hw_params(struct snd_pcm_substream *substream,
diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig
index 14011d90d70a..ff60e11ecb56 100644
--- a/sound/soc/sh/Kconfig
+++ b/sound/soc/sh/Kconfig
@@ -37,6 +37,7 @@ config SND_SOC_SH4_SIU
37config SND_SOC_RCAR 37config SND_SOC_RCAR
38 tristate "R-Car series SRU/SCU/SSIU/SSI support" 38 tristate "R-Car series SRU/SCU/SSIU/SSI support"
39 select SND_SIMPLE_CARD 39 select SND_SIMPLE_CARD
40 select REGMAP
40 help 41 help
41 This option enables R-Car SUR/SCU/SSIU/SSI sound support 42 This option enables R-Car SUR/SCU/SSIU/SSI sound support
42 43
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 4e53d87e881d..a66783e13a9c 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3212,11 +3212,11 @@ int snd_soc_bytes_get(struct snd_kcontrol *kcontrol,
3212 break; 3212 break;
3213 case 2: 3213 case 2:
3214 ((u16 *)(&ucontrol->value.bytes.data))[0] 3214 ((u16 *)(&ucontrol->value.bytes.data))[0]
3215 &= ~params->mask; 3215 &= cpu_to_be16(~params->mask);
3216 break; 3216 break;
3217 case 4: 3217 case 4:
3218 ((u32 *)(&ucontrol->value.bytes.data))[0] 3218 ((u32 *)(&ucontrol->value.bytes.data))[0]
3219 &= ~params->mask; 3219 &= cpu_to_be32(~params->mask);
3220 break; 3220 break;
3221 default: 3221 default:
3222 return -EINVAL; 3222 return -EINVAL;
diff --git a/sound/soc/soc-devres.c b/sound/soc/soc-devres.c
index b1d732255c02..3449c1e909ae 100644
--- a/sound/soc/soc-devres.c
+++ b/sound/soc/soc-devres.c
@@ -66,7 +66,7 @@ static void devm_card_release(struct device *dev, void *res)
66 */ 66 */
67int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card) 67int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card)
68{ 68{
69 struct device **ptr; 69 struct snd_soc_card **ptr;
70 int ret; 70 int ret;
71 71
72 ptr = devres_alloc(devm_card_release, sizeof(*ptr), GFP_KERNEL); 72 ptr = devres_alloc(devm_card_release, sizeof(*ptr), GFP_KERNEL);
@@ -75,7 +75,7 @@ int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card)
75 75
76 ret = snd_soc_register_card(card); 76 ret = snd_soc_register_card(card);
77 if (ret == 0) { 77 if (ret == 0) {
78 *ptr = dev; 78 *ptr = card;
79 devres_add(dev, ptr); 79 devres_add(dev, ptr);
80 } else { 80 } else {
81 devres_free(ptr); 81 devres_free(ptr);
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index cbc9c96ce1f4..41949af3baae 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -305,6 +305,20 @@ static void dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
305 } 305 }
306} 306}
307 307
308static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm)
309{
310 unsigned int i;
311
312 for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE;
313 i++) {
314 if (!pcm->chan[i])
315 continue;
316 dma_release_channel(pcm->chan[i]);
317 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
318 break;
319 }
320}
321
308/** 322/**
309 * snd_dmaengine_pcm_register - Register a dmaengine based PCM device 323 * snd_dmaengine_pcm_register - Register a dmaengine based PCM device
310 * @dev: The parent device for the PCM device 324 * @dev: The parent device for the PCM device
@@ -315,6 +329,7 @@ int snd_dmaengine_pcm_register(struct device *dev,
315 const struct snd_dmaengine_pcm_config *config, unsigned int flags) 329 const struct snd_dmaengine_pcm_config *config, unsigned int flags)
316{ 330{
317 struct dmaengine_pcm *pcm; 331 struct dmaengine_pcm *pcm;
332 int ret;
318 333
319 pcm = kzalloc(sizeof(*pcm), GFP_KERNEL); 334 pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
320 if (!pcm) 335 if (!pcm)
@@ -326,11 +341,20 @@ int snd_dmaengine_pcm_register(struct device *dev,
326 dmaengine_pcm_request_chan_of(pcm, dev); 341 dmaengine_pcm_request_chan_of(pcm, dev);
327 342
328 if (flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE) 343 if (flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
329 return snd_soc_add_platform(dev, &pcm->platform, 344 ret = snd_soc_add_platform(dev, &pcm->platform,
330 &dmaengine_no_residue_pcm_platform); 345 &dmaengine_no_residue_pcm_platform);
331 else 346 else
332 return snd_soc_add_platform(dev, &pcm->platform, 347 ret = snd_soc_add_platform(dev, &pcm->platform,
333 &dmaengine_pcm_platform); 348 &dmaengine_pcm_platform);
349 if (ret)
350 goto err_free_dma;
351
352 return 0;
353
354err_free_dma:
355 dmaengine_pcm_release_chan(pcm);
356 kfree(pcm);
357 return ret;
334} 358}
335EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register); 359EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register);
336 360
@@ -345,7 +369,6 @@ void snd_dmaengine_pcm_unregister(struct device *dev)
345{ 369{
346 struct snd_soc_platform *platform; 370 struct snd_soc_platform *platform;
347 struct dmaengine_pcm *pcm; 371 struct dmaengine_pcm *pcm;
348 unsigned int i;
349 372
350 platform = snd_soc_lookup_platform(dev); 373 platform = snd_soc_lookup_platform(dev);
351 if (!platform) 374 if (!platform)
@@ -353,15 +376,8 @@ void snd_dmaengine_pcm_unregister(struct device *dev)
353 376
354 pcm = soc_platform_to_pcm(platform); 377 pcm = soc_platform_to_pcm(platform);
355 378
356 for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) {
357 if (pcm->chan[i]) {
358 dma_release_channel(pcm->chan[i]);
359 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
360 break;
361 }
362 }
363
364 snd_soc_remove_platform(platform); 379 snd_soc_remove_platform(platform);
380 dmaengine_pcm_release_chan(pcm);
365 kfree(pcm); 381 kfree(pcm);
366} 382}
367EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister); 383EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 42782c01e413..891b9a9bcbf8 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -148,12 +148,12 @@ static void soc_pcm_apply_msb(struct snd_pcm_substream *substream,
148 } 148 }
149} 149}
150 150
151static void soc_pcm_init_runtime_hw(struct snd_pcm_hardware *hw, 151static void soc_pcm_init_runtime_hw(struct snd_pcm_runtime *runtime,
152 struct snd_soc_pcm_stream *codec_stream, 152 struct snd_soc_pcm_stream *codec_stream,
153 struct snd_soc_pcm_stream *cpu_stream) 153 struct snd_soc_pcm_stream *cpu_stream)
154{ 154{
155 hw->rate_min = max(codec_stream->rate_min, cpu_stream->rate_min); 155 struct snd_pcm_hardware *hw = &runtime->hw;
156 hw->rate_max = max(codec_stream->rate_max, cpu_stream->rate_max); 156
157 hw->channels_min = max(codec_stream->channels_min, 157 hw->channels_min = max(codec_stream->channels_min,
158 cpu_stream->channels_min); 158 cpu_stream->channels_min);
159 hw->channels_max = min(codec_stream->channels_max, 159 hw->channels_max = min(codec_stream->channels_max,
@@ -166,6 +166,13 @@ static void soc_pcm_init_runtime_hw(struct snd_pcm_hardware *hw,
166 if (cpu_stream->rates 166 if (cpu_stream->rates
167 & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS)) 167 & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))
168 hw->rates |= codec_stream->rates; 168 hw->rates |= codec_stream->rates;
169
170 snd_pcm_limit_hw_rates(runtime);
171
172 hw->rate_min = max(hw->rate_min, cpu_stream->rate_min);
173 hw->rate_min = max(hw->rate_min, codec_stream->rate_min);
174 hw->rate_max = min_not_zero(hw->rate_max, cpu_stream->rate_max);
175 hw->rate_max = min_not_zero(hw->rate_max, codec_stream->rate_max);
169} 176}
170 177
171/* 178/*
@@ -235,15 +242,14 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
235 242
236 /* Check that the codec and cpu DAIs are compatible */ 243 /* Check that the codec and cpu DAIs are compatible */
237 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 244 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
238 soc_pcm_init_runtime_hw(&runtime->hw, &codec_dai_drv->playback, 245 soc_pcm_init_runtime_hw(runtime, &codec_dai_drv->playback,
239 &cpu_dai_drv->playback); 246 &cpu_dai_drv->playback);
240 } else { 247 } else {
241 soc_pcm_init_runtime_hw(&runtime->hw, &codec_dai_drv->capture, 248 soc_pcm_init_runtime_hw(runtime, &codec_dai_drv->capture,
242 &cpu_dai_drv->capture); 249 &cpu_dai_drv->capture);
243 } 250 }
244 251
245 ret = -EINVAL; 252 ret = -EINVAL;
246 snd_pcm_limit_hw_rates(runtime);
247 if (!runtime->hw.rates) { 253 if (!runtime->hw.rates) {
248 printk(KERN_ERR "ASoC: %s <-> %s No matching rates\n", 254 printk(KERN_ERR "ASoC: %s <-> %s No matching rates\n",
249 codec_dai->name, cpu_dai->name); 255 codec_dai->name, cpu_dai->name);
@@ -594,12 +600,13 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
594 struct snd_soc_platform *platform = rtd->platform; 600 struct snd_soc_platform *platform = rtd->platform;
595 struct snd_soc_dai *cpu_dai = rtd->cpu_dai; 601 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
596 struct snd_soc_dai *codec_dai = rtd->codec_dai; 602 struct snd_soc_dai *codec_dai = rtd->codec_dai;
597 struct snd_soc_codec *codec = rtd->codec; 603 bool playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
598 604
599 mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass); 605 mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
600 606
601 /* apply codec digital mute */ 607 /* apply codec digital mute */
602 if (!codec->active) 608 if ((playback && codec_dai->playback_active == 1) ||
609 (!playback && codec_dai->capture_active == 1))
603 snd_soc_dai_digital_mute(codec_dai, 1, substream->stream); 610 snd_soc_dai_digital_mute(codec_dai, 1, substream->stream);
604 611
605 /* free any machine hw params */ 612 /* free any machine hw params */
diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c
index 364bf6a907e1..8c819f811470 100644
--- a/sound/soc/tegra/tegra20_i2s.c
+++ b/sound/soc/tegra/tegra20_i2s.c
@@ -74,7 +74,7 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
74 unsigned int fmt) 74 unsigned int fmt)
75{ 75{
76 struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai); 76 struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai);
77 unsigned int mask, val; 77 unsigned int mask = 0, val = 0;
78 78
79 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 79 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
80 case SND_SOC_DAIFMT_NB_NF: 80 case SND_SOC_DAIFMT_NB_NF:
@@ -83,10 +83,10 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
83 return -EINVAL; 83 return -EINVAL;
84 } 84 }
85 85
86 mask = TEGRA20_I2S_CTRL_MASTER_ENABLE; 86 mask |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
87 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 87 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
88 case SND_SOC_DAIFMT_CBS_CFS: 88 case SND_SOC_DAIFMT_CBS_CFS:
89 val = TEGRA20_I2S_CTRL_MASTER_ENABLE; 89 val |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
90 break; 90 break;
91 case SND_SOC_DAIFMT_CBM_CFM: 91 case SND_SOC_DAIFMT_CBM_CFM:
92 break; 92 break;
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
index 08bc6931c7c7..8c7c1028e579 100644
--- a/sound/soc/tegra/tegra20_spdif.c
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -67,15 +67,15 @@ static int tegra20_spdif_hw_params(struct snd_pcm_substream *substream,
67{ 67{
68 struct device *dev = dai->dev; 68 struct device *dev = dai->dev;
69 struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai); 69 struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai);
70 unsigned int mask, val; 70 unsigned int mask = 0, val = 0;
71 int ret, spdifclock; 71 int ret, spdifclock;
72 72
73 mask = TEGRA20_SPDIF_CTRL_PACK | 73 mask |= TEGRA20_SPDIF_CTRL_PACK |
74 TEGRA20_SPDIF_CTRL_BIT_MODE_MASK; 74 TEGRA20_SPDIF_CTRL_BIT_MODE_MASK;
75 switch (params_format(params)) { 75 switch (params_format(params)) {
76 case SNDRV_PCM_FORMAT_S16_LE: 76 case SNDRV_PCM_FORMAT_S16_LE:
77 val = TEGRA20_SPDIF_CTRL_PACK | 77 val |= TEGRA20_SPDIF_CTRL_PACK |
78 TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT; 78 TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT;
79 break; 79 break;
80 default: 80 default:
81 return -EINVAL; 81 return -EINVAL;
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index 231a785b3921..02247fee1cf7 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -118,7 +118,7 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
118 unsigned int fmt) 118 unsigned int fmt)
119{ 119{
120 struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai); 120 struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
121 unsigned int mask, val; 121 unsigned int mask = 0, val = 0;
122 122
123 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 123 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
124 case SND_SOC_DAIFMT_NB_NF: 124 case SND_SOC_DAIFMT_NB_NF:
@@ -127,10 +127,10 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
127 return -EINVAL; 127 return -EINVAL;
128 } 128 }
129 129
130 mask = TEGRA30_I2S_CTRL_MASTER_ENABLE; 130 mask |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
131 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 131 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
132 case SND_SOC_DAIFMT_CBS_CFS: 132 case SND_SOC_DAIFMT_CBS_CFS:
133 val = TEGRA30_I2S_CTRL_MASTER_ENABLE; 133 val |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
134 break; 134 break;
135 case SND_SOC_DAIFMT_CBM_CFM: 135 case SND_SOC_DAIFMT_CBM_CFM:
136 break; 136 break;
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index b9ba0fcc45df..83aabea259d7 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -636,8 +636,22 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep,
636 if (usb_pipein(ep->pipe) || 636 if (usb_pipein(ep->pipe) ||
637 snd_usb_endpoint_implicit_feedback_sink(ep)) { 637 snd_usb_endpoint_implicit_feedback_sink(ep)) {
638 638
639 urb_packs = packs_per_ms;
640 /*
641 * Wireless devices can poll at a max rate of once per 4ms.
642 * For dataintervals less than 5, increase the packet count to
643 * allow the host controller to use bursting to fill in the
644 * gaps.
645 */
646 if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_WIRELESS) {
647 int interval = ep->datainterval;
648 while (interval < 5) {
649 urb_packs <<= 1;
650 ++interval;
651 }
652 }
639 /* make capture URBs <= 1 ms and smaller than a period */ 653 /* make capture URBs <= 1 ms and smaller than a period */
640 urb_packs = min(max_packs_per_urb, packs_per_ms); 654 urb_packs = min(max_packs_per_urb, urb_packs);
641 while (urb_packs > 1 && urb_packs * maxsize >= period_bytes) 655 while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
642 urb_packs >>= 1; 656 urb_packs >>= 1;
643 ep->nurbs = MAX_URBS; 657 ep->nurbs = MAX_URBS;
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 3454262358b3..f4b12c216f1c 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -1603,7 +1603,7 @@ static int snd_microii_controls_create(struct usb_mixer_interface *mixer)
1603 return err; 1603 return err;
1604 } 1604 }
1605 1605
1606 return err; 1606 return 0;
1607} 1607}
1608 1608
1609int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer) 1609int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 0362d575de7d..217c82ee3665 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -1606,6 +1606,24 @@ process_arg(struct event_format *event, struct print_arg *arg, char **tok)
1606static enum event_type 1606static enum event_type
1607process_op(struct event_format *event, struct print_arg *arg, char **tok); 1607process_op(struct event_format *event, struct print_arg *arg, char **tok);
1608 1608
1609/*
1610 * For __print_symbolic() and __print_flags, we need to completely
1611 * evaluate the first argument, which defines what to print next.
1612 */
1613static enum event_type
1614process_field_arg(struct event_format *event, struct print_arg *arg, char **tok)
1615{
1616 enum event_type type;
1617
1618 type = process_arg(event, arg, tok);
1619
1620 while (type == EVENT_OP) {
1621 type = process_op(event, arg, tok);
1622 }
1623
1624 return type;
1625}
1626
1609static enum event_type 1627static enum event_type
1610process_cond(struct event_format *event, struct print_arg *top, char **tok) 1628process_cond(struct event_format *event, struct print_arg *top, char **tok)
1611{ 1629{
@@ -2371,7 +2389,7 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok)
2371 goto out_free; 2389 goto out_free;
2372 } 2390 }
2373 2391
2374 type = process_arg(event, field, &token); 2392 type = process_field_arg(event, field, &token);
2375 2393
2376 /* Handle operations in the first argument */ 2394 /* Handle operations in the first argument */
2377 while (type == EVENT_OP) 2395 while (type == EVENT_OP)
@@ -2424,7 +2442,8 @@ process_symbols(struct event_format *event, struct print_arg *arg, char **tok)
2424 goto out_free; 2442 goto out_free;
2425 } 2443 }
2426 2444
2427 type = process_arg(event, field, &token); 2445 type = process_field_arg(event, field, &token);
2446
2428 if (test_type_token(type, token, EVENT_DELIM, ",")) 2447 if (test_type_token(type, token, EVENT_DELIM, ","))
2429 goto out_free_field; 2448 goto out_free_field;
2430 2449
@@ -3446,7 +3465,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
3446 * is in the bottom half of the 32 bit field. 3465 * is in the bottom half of the 32 bit field.
3447 */ 3466 */
3448 offset &= 0xffff; 3467 offset &= 0xffff;
3449 val = (unsigned long long)(data + offset); 3468 val = (unsigned long long)((unsigned long)data + offset);
3450 break; 3469 break;
3451 default: /* not sure what to do there */ 3470 default: /* not sure what to do there */
3452 return 0; 3471 return 0;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 369c03648f88..1cd035708931 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2078,8 +2078,10 @@ static int process_group_desc(struct perf_file_section *section __maybe_unused,
2078 if (evsel->idx == (int) desc[i].leader_idx) { 2078 if (evsel->idx == (int) desc[i].leader_idx) {
2079 evsel->leader = evsel; 2079 evsel->leader = evsel;
2080 /* {anon_group} is a dummy name */ 2080 /* {anon_group} is a dummy name */
2081 if (strcmp(desc[i].name, "{anon_group}")) 2081 if (strcmp(desc[i].name, "{anon_group}")) {
2082 evsel->group_name = desc[i].name; 2082 evsel->group_name = desc[i].name;
2083 desc[i].name = NULL;
2084 }
2083 evsel->nr_members = desc[i].nr_members; 2085 evsel->nr_members = desc[i].nr_members;
2084 2086
2085 if (i >= nr_groups || nr > 0) { 2087 if (i >= nr_groups || nr > 0) {
@@ -2105,7 +2107,7 @@ static int process_group_desc(struct perf_file_section *section __maybe_unused,
2105 2107
2106 ret = 0; 2108 ret = 0;
2107out_free: 2109out_free:
2108 while ((int) --i >= 0) 2110 for (i = 0; i < nr_groups; i++)
2109 free(desc[i].name); 2111 free(desc[i].name);
2110 free(desc); 2112 free(desc);
2111 2113
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index cd8e2f592719..49eaf1d7d89d 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -70,14 +70,13 @@ int thread__set_comm(struct thread *thread, const char *str, u64 timestamp)
70 /* Override latest entry if it had no specific time coverage */ 70 /* Override latest entry if it had no specific time coverage */
71 if (!curr->start) { 71 if (!curr->start) {
72 comm__override(curr, str, timestamp); 72 comm__override(curr, str, timestamp);
73 return 0; 73 } else {
74 new = comm__new(str, timestamp);
75 if (!new)
76 return -ENOMEM;
77 list_add(&new->list, &thread->comm_list);
74 } 78 }
75 79
76 new = comm__new(str, timestamp);
77 if (!new)
78 return -ENOMEM;
79
80 list_add(&new->list, &thread->comm_list);
81 thread->comm_set = true; 80 thread->comm_set = true;
82 81
83 return 0; 82 return 0;
diff --git a/tools/power/cpupower/man/cpupower-idle-info.1 b/tools/power/cpupower/man/cpupower-idle-info.1
index 4178effd9e99..7b3646adb92f 100644
--- a/tools/power/cpupower/man/cpupower-idle-info.1
+++ b/tools/power/cpupower/man/cpupower-idle-info.1
@@ -87,4 +87,5 @@ Thomas Renninger <trenn@suse.de>
87.fi 87.fi
88.SH "SEE ALSO" 88.SH "SEE ALSO"
89.LP 89.LP
90cpupower(1), cpupower\-monitor(1), cpupower\-info(1), cpupower\-set(1) 90cpupower(1), cpupower\-monitor(1), cpupower\-info(1), cpupower\-set(1),
91cpupower\-idle\-set(1)
diff --git a/tools/power/cpupower/man/cpupower-idle-set.1 b/tools/power/cpupower/man/cpupower-idle-set.1
new file mode 100644
index 000000000000..6b1607272a5b
--- /dev/null
+++ b/tools/power/cpupower/man/cpupower-idle-set.1
@@ -0,0 +1,71 @@
1.TH "CPUPOWER-IDLE-SET" "1" "0.1" "" "cpupower Manual"
2.SH "NAME"
3.LP
4cpupower idle\-set \- Utility to set cpu idle state specific kernel options
5.SH "SYNTAX"
6.LP
7cpupower [ \-c cpulist ] idle\-info [\fIoptions\fP]
8.SH "DESCRIPTION"
9.LP
10The cpupower idle\-set subcommand allows to set cpu idle, also called cpu
11sleep state, specific options offered by the kernel. One example is disabling
12sleep states. This can be handy for power vs performance tuning.
13.SH "OPTIONS"
14.LP
15.TP
16\fB\-d\fR \fB\-\-disable\fR
17Disable a specific processor sleep state.
18.TP
19\fB\-e\fR \fB\-\-enable\fR
20Enable a specific processor sleep state.
21
22.SH "REMARKS"
23.LP
24Cpuidle Governors Policy on Disabling Sleep States
25
26.RS 4
27Depending on the used cpuidle governor, implementing the kernel policy
28how to choose sleep states, subsequent sleep states on this core, might get
29disabled as well.
30
31There are two cpuidle governors ladder and menu. While the ladder
32governor is always available, if CONFIG_CPU_IDLE is selected, the
33menu governor additionally requires CONFIG_NO_HZ.
34
35The behavior and the effect of the disable variable depends on the
36implementation of a particular governor. In the ladder governor, for
37example, it is not coherent, i.e. if one is disabling a light state,
38then all deeper states are disabled as well. Likewise, if one enables a
39deep state but a lighter state still is disabled, then this has no effect.
40.RE
41.LP
42Disabling the Lightest Sleep State may not have any Affect
43
44.RS 4
45If criteria are not met to enter deeper sleep states and the lightest sleep
46state is chosen when idle, the kernel may still enter this sleep state,
47irrespective of whether it is disabled or not. This is also reflected in
48the usage count of the disabled sleep state when using the cpupower idle-info
49command.
50.RE
51.LP
52Selecting specific CPU Cores
53
54.RS 4
55By default processor sleep states of all CPU cores are set. Please refer
56to the cpupower(1) manpage in the \-\-cpu option section how to disable
57C-states of specific cores.
58.RE
59.SH "FILES"
60.nf
61\fI/sys/devices/system/cpu/cpu*/cpuidle/state*\fP
62\fI/sys/devices/system/cpu/cpuidle/*\fP
63.fi
64.SH "AUTHORS"
65.nf
66Thomas Renninger <trenn@suse.de>
67.fi
68.SH "SEE ALSO"
69.LP
70cpupower(1), cpupower\-monitor(1), cpupower\-info(1), cpupower\-set(1),
71cpupower\-idle\-info(1)
diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
index dc4de3762111..bcf1d2f0b791 100644
--- a/tools/power/cpupower/utils/cpupower-set.c
+++ b/tools/power/cpupower/utils/cpupower-set.c
@@ -18,9 +18,9 @@
18#include "helpers/bitmask.h" 18#include "helpers/bitmask.h"
19 19
20static struct option set_opts[] = { 20static struct option set_opts[] = {
21 { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, 21 { .name = "perf-bias", .has_arg = required_argument, .flag = NULL, .val = 'b'},
22 { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, 22 { .name = "sched-mc", .has_arg = required_argument, .flag = NULL, .val = 'm'},
23 { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, 23 { .name = "sched-smt", .has_arg = required_argument, .flag = NULL, .val = 's'},
24 { }, 24 { },
25}; 25};
26 26
diff --git a/tools/power/cpupower/utils/helpers/sysfs.c b/tools/power/cpupower/utils/helpers/sysfs.c
index 5cdc600e8152..851c7a16ca49 100644
--- a/tools/power/cpupower/utils/helpers/sysfs.c
+++ b/tools/power/cpupower/utils/helpers/sysfs.c
@@ -278,7 +278,7 @@ static char *sysfs_idlestate_get_one_string(unsigned int cpu,
278int sysfs_is_idlestate_disabled(unsigned int cpu, 278int sysfs_is_idlestate_disabled(unsigned int cpu,
279 unsigned int idlestate) 279 unsigned int idlestate)
280{ 280{
281 if (sysfs_get_idlestate_count(cpu) < idlestate) 281 if (sysfs_get_idlestate_count(cpu) <= idlestate)
282 return -1; 282 return -1;
283 283
284 if (!sysfs_idlestate_file_exists(cpu, idlestate, 284 if (!sysfs_idlestate_file_exists(cpu, idlestate,
@@ -303,7 +303,7 @@ int sysfs_idlestate_disable(unsigned int cpu,
303 char value[SYSFS_PATH_MAX]; 303 char value[SYSFS_PATH_MAX];
304 int bytes_written; 304 int bytes_written;
305 305
306 if (sysfs_get_idlestate_count(cpu) < idlestate) 306 if (sysfs_get_idlestate_count(cpu) <= idlestate)
307 return -1; 307 return -1;
308 308
309 if (!sysfs_idlestate_file_exists(cpu, idlestate, 309 if (!sysfs_idlestate_file_exists(cpu, idlestate,
diff --git a/tools/usb/Makefile b/tools/usb/Makefile
index 396d6c44e9d7..acf2165c04e6 100644
--- a/tools/usb/Makefile
+++ b/tools/usb/Makefile
@@ -3,11 +3,12 @@
3CC = $(CROSS_COMPILE)gcc 3CC = $(CROSS_COMPILE)gcc
4PTHREAD_LIBS = -lpthread 4PTHREAD_LIBS = -lpthread
5WARNINGS = -Wall -Wextra 5WARNINGS = -Wall -Wextra
6CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) -I../include 6CFLAGS = $(WARNINGS) -g -I../include
7LDFLAGS = $(PTHREAD_LIBS)
7 8
8all: testusb ffs-test 9all: testusb ffs-test
9%: %.c 10%: %.c
10 $(CC) $(CFLAGS) -o $@ $^ 11 $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS)
11 12
12clean: 13clean:
13 $(RM) testusb ffs-test 14 $(RM) testusb ffs-test
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a0aa84b5941a..4f588bc94186 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1898,6 +1898,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
1898 int r; 1898 int r;
1899 struct kvm_vcpu *vcpu, *v; 1899 struct kvm_vcpu *vcpu, *v;
1900 1900
1901 if (id >= KVM_MAX_VCPUS)
1902 return -EINVAL;
1903
1901 vcpu = kvm_arch_vcpu_create(kvm, id); 1904 vcpu = kvm_arch_vcpu_create(kvm, id);
1902 if (IS_ERR(vcpu)) 1905 if (IS_ERR(vcpu))
1903 return PTR_ERR(vcpu); 1906 return PTR_ERR(vcpu);