aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-09-26 19:08:27 -0400
committerDavid S. Miller <davem@davemloft.net>2015-09-26 19:08:27 -0400
commit4963ed48f2c20196d51a447ee87dc2815584fee4 (patch)
treea1902f466dafa00453889a4f1e66b00249ce0529
parent4d54d86546f62c7c4a0fe3b36a64c5e3b98ce1a9 (diff)
parent518a7cb6980cd640c7f979d29021ad870f60d7d7 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: net/ipv4/arp.c The net/ipv4/arp.c conflict was one commit adding a new local variable while another commit was deleting one. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/devicetree/bindings/arm/gic-v3.txt5
-rw-r--r--Documentation/devicetree/bindings/arm/idle-states.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio.txt4
-rw-r--r--Documentation/devicetree/bindings/iio/accel/bma180.txt8
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt36
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt36
-rw-r--r--Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt3
-rw-r--r--Documentation/devicetree/bindings/regulator/pbias-regulator.txt7
-rw-r--r--Documentation/devicetree/bindings/spi/spi-mt65xx.txt16
-rw-r--r--Documentation/devicetree/bindings/thermal/thermal.txt27
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt35
-rw-r--r--Documentation/devicetree/bindings/watchdog/lpc18xx-wdt.txt19
-rw-r--r--Documentation/gpio/board.txt40
-rw-r--r--Documentation/gpio/consumer.txt3
-rw-r--r--Documentation/hwmon/nct67754
-rw-r--r--Documentation/networking/vrf.txt96
-rw-r--r--Documentation/static-keys.txt4
-rw-r--r--Documentation/sysctl/net.txt16
-rw-r--r--Documentation/thermal/power_allocator.txt2
-rw-r--r--Documentation/thermal/sysfs-api.txt6
-rw-r--r--Documentation/watchdog/src/watchdog-test.c22
-rw-r--r--MAINTAINERS42
-rw-r--r--Makefile4
-rw-r--r--arch/alpha/include/asm/io.h4
-rw-r--r--arch/alpha/kernel/irq.c2
-rw-r--r--arch/alpha/kernel/pci.c7
-rw-r--r--arch/alpha/lib/udelay.c1
-rw-r--r--arch/arc/kernel/mcip.c2
-rw-r--r--arch/arc/plat-axs10x/axs10x.c2
-rw-r--r--arch/arm/Makefile8
-rw-r--r--arch/arm/common/it8152.c2
-rw-r--r--arch/arm/common/locomo.c2
-rw-r--r--arch/arm/common/sa1111.c6
-rw-r--r--arch/arm/include/asm/assembler.h5
-rw-r--r--arch/arm/include/asm/bug.h1
-rw-r--r--arch/arm/include/asm/domain.h6
-rw-r--r--arch/arm/include/asm/hardware/it8152.h2
-rw-r--r--arch/arm/include/asm/hw_irq.h6
-rw-r--r--arch/arm/include/asm/kvm_host.h10
-rw-r--r--arch/arm/include/asm/mach/irq.h4
-rw-r--r--arch/arm/include/asm/thread_info.h1
-rw-r--r--arch/arm/kernel/irq.c20
-rw-r--r--arch/arm/kernel/kgdb.c8
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/signal.c15
-rw-r--r--arch/arm/kvm/Kconfig11
-rw-r--r--arch/arm/kvm/arm.c2
-rw-r--r--arch/arm/kvm/interrupts_head.S6
-rw-r--r--arch/arm/kvm/mmu.c6
-rw-r--r--arch/arm/kvm/psci.c12
-rw-r--r--arch/arm/mach-dove/irq.c6
-rw-r--r--arch/arm/mach-footbridge/isa-irq.c5
-rw-r--r--arch/arm/mach-gemini/gpio.c2
-rw-r--r--arch/arm/mach-imx/3ds_debugboard.c2
-rw-r--r--arch/arm/mach-imx/mach-mx31ads.c2
-rw-r--r--arch/arm/mach-iop13xx/msi.c2
-rw-r--r--arch/arm/mach-lpc32xx/irq.c4
-rw-r--r--arch/arm/mach-netx/generic.c3
-rw-r--r--arch/arm/mach-omap1/fpga.c2
-rw-r--r--arch/arm/mach-omap2/prm_common.c2
-rw-r--r--arch/arm/mach-pxa/balloon3.c2
-rw-r--r--arch/arm/mach-pxa/cm-x2xx-pci.c5
-rw-r--r--arch/arm/mach-pxa/lpd270.c2
-rw-r--r--arch/arm/mach-pxa/pcm990-baseboard.c2
-rw-r--r--arch/arm/mach-pxa/viper.c2
-rw-r--r--arch/arm/mach-pxa/zeus.c2
-rw-r--r--arch/arm/mach-rpc/ecard.c3
-rw-r--r--arch/arm/mach-s3c24xx/bast-irq.c4
-rw-r--r--arch/arm/mach-s3c64xx/common.c8
-rw-r--r--arch/arm/mach-sa1100/neponset.c2
-rw-r--r--arch/arm/mm/dma-mapping.c4
-rw-r--r--arch/arm/nwfpe/entry.S3
-rw-r--r--arch/arm/plat-orion/gpio.c2
-rw-r--r--arch/arm/xen/hypercall.S15
-rw-r--r--arch/arm64/Kconfig17
-rw-r--r--arch/arm64/Makefile4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi2
-rw-r--r--arch/arm64/include/asm/hardirq.h5
-rw-r--r--arch/arm64/include/asm/kvm_arm.h11
-rw-r--r--arch/arm64/include/asm/kvm_asm.h4
-rw-r--r--arch/arm64/include/asm/kvm_host.h10
-rw-r--r--arch/arm64/include/asm/pgtable.h12
-rw-r--r--arch/arm64/kernel/debug-monitors.c2
-rw-r--r--arch/arm64/kernel/head.S5
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c2
-rw-r--r--arch/arm64/kernel/module.c2
-rw-r--r--arch/arm64/kernel/signal32.c47
-rw-r--r--arch/arm64/kvm/Kconfig11
-rw-r--r--arch/arm64/kvm/hyp.S31
-rw-r--r--arch/arm64/kvm/sys_regs.c15
-rw-r--r--arch/arm64/mm/dma-mapping.c2
-rw-r--r--arch/avr32/mach-at32ap/extint.c2
-rw-r--r--arch/avr32/mach-at32ap/pio.c2
-rw-r--r--arch/blackfin/include/asm/irq_handler.h4
-rw-r--r--arch/blackfin/kernel/irqchip.c2
-rw-r--r--arch/blackfin/mach-bf537/ints-priority.c10
-rw-r--r--arch/blackfin/mach-common/ints-priority.c5
-rw-r--r--arch/c6x/platforms/megamod-pic.c2
-rw-r--r--arch/cris/Kconfig12
-rw-r--r--arch/cris/arch-v10/kernel/entry.S8
-rw-r--r--arch/cris/arch-v10/lib/dmacopy.c42
-rw-r--r--arch/cris/arch-v10/lib/old_checksum.c86
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig16
-rw-r--r--arch/cris/arch-v32/drivers/axisflashmap.c9
-rw-r--r--arch/cris/arch-v32/drivers/mach-a3/gpio.c4
-rw-r--r--arch/cris/arch-v32/drivers/mach-fs/gpio.c3
-rw-r--r--arch/cris/arch-v32/kernel/entry.S19
-rw-r--r--arch/cris/arch-v32/kernel/process.c4
-rw-r--r--arch/cris/arch-v32/kernel/signal.c1
-rw-r--r--arch/cris/arch-v32/mach-fs/pinmux.c8
-rw-r--r--arch/cris/configs/artpec_3_defconfig5
-rw-r--r--arch/cris/configs/etraxfs_defconfig1
-rw-r--r--arch/cris/include/arch-v32/arch/bug.h11
-rw-r--r--arch/cris/include/arch-v32/arch/irqflags.h2
-rw-r--r--arch/cris/include/asm/Kbuild17
-rw-r--r--arch/cris/include/asm/mmu_context.h9
-rw-r--r--arch/cris/include/asm/stacktrace.h8
-rw-r--r--arch/cris/include/asm/types.h12
-rw-r--r--arch/cris/include/asm/unistd.h2
-rw-r--r--arch/cris/include/uapi/asm/Kbuild5
-rw-r--r--arch/cris/include/uapi/asm/auxvec.h4
-rw-r--r--arch/cris/include/uapi/asm/bitsperlong.h1
-rw-r--r--arch/cris/include/uapi/asm/elf.h (renamed from arch/cris/include/asm/elf.h)9
-rw-r--r--arch/cris/include/uapi/asm/elf_v10.h (renamed from arch/cris/include/arch-v10/arch/elf.h)5
-rw-r--r--arch/cris/include/uapi/asm/elf_v32.h (renamed from arch/cris/include/arch-v32/arch/elf.h)5
-rw-r--r--arch/cris/include/uapi/asm/errno.h6
-rw-r--r--arch/cris/include/uapi/asm/fcntl.h1
-rw-r--r--arch/cris/include/uapi/asm/ioctl.h1
-rw-r--r--arch/cris/include/uapi/asm/ipcbuf.h1
-rw-r--r--arch/cris/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/cris/include/uapi/asm/mman.h1
-rw-r--r--arch/cris/include/uapi/asm/msgbuf.h33
-rw-r--r--arch/cris/include/uapi/asm/poll.h1
-rw-r--r--arch/cris/include/uapi/asm/ptrace.h6
-rw-r--r--arch/cris/include/uapi/asm/ptrace_v10.h (renamed from arch/cris/include/arch-v10/arch/ptrace.h)0
-rw-r--r--arch/cris/include/uapi/asm/ptrace_v32.h (renamed from arch/cris/include/arch-v32/arch/ptrace.h)0
-rw-r--r--arch/cris/include/uapi/asm/resource.h6
-rw-r--r--arch/cris/include/uapi/asm/sembuf.h25
-rw-r--r--arch/cris/include/uapi/asm/shmbuf.h42
-rw-r--r--arch/cris/include/uapi/asm/siginfo.h6
-rw-r--r--arch/cris/include/uapi/asm/socket.h92
-rw-r--r--arch/cris/include/uapi/asm/sockios.h13
-rw-r--r--arch/cris/include/uapi/asm/statfs.h6
-rw-r--r--arch/cris/include/uapi/asm/types.h1
-rw-r--r--arch/cris/include/uapi/asm/unistd.h8
-rw-r--r--arch/cris/kernel/Makefile1
-rw-r--r--arch/cris/kernel/irq.c6
-rw-r--r--arch/cris/kernel/stacktrace.c76
-rw-r--r--arch/frv/mb93090-mb00/pci-vdk.c2
-rw-r--r--arch/ia64/include/asm/unistd.h2
-rw-r--r--arch/ia64/include/uapi/asm/unistd.h2
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/pci/pci.c5
-rw-r--r--arch/m68k/amiga/amiints.c8
-rw-r--r--arch/m68k/coldfire/intc-5272.c6
-rw-r--r--arch/m68k/include/asm/irq.h3
-rw-r--r--arch/m68k/include/asm/mac_via.h2
-rw-r--r--arch/m68k/mac/baboon.c2
-rw-r--r--arch/m68k/mac/oss.c4
-rw-r--r--arch/m68k/mac/psc.c2
-rw-r--r--arch/m68k/mac/via.c6
-rw-r--r--arch/metag/kernel/irq.c4
-rw-r--r--arch/microblaze/pci/pci-common.c9
-rw-r--r--arch/mips/alchemy/common/irq.c4
-rw-r--r--arch/mips/alchemy/devboards/bcsr.c2
-rw-r--r--arch/mips/ath25/ar2315.c2
-rw-r--r--arch/mips/ath25/ar5312.c2
-rw-r--r--arch/mips/ath79/irq.c8
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c2
-rw-r--r--arch/mips/include/asm/kvm_host.h2
-rw-r--r--arch/mips/include/asm/netlogic/common.h4
-rw-r--r--arch/mips/jz4740/gpio.c2
-rw-r--r--arch/mips/kvm/mips.c1
-rw-r--r--arch/mips/netlogic/common/smp.c4
-rw-r--r--arch/mips/pci/pci-ar2315.c2
-rw-r--r--arch/mips/pci/pci-ar71xx.c2
-rw-r--r--arch/mips/pci/pci-ar724x.c2
-rw-r--r--arch/mips/pci/pci-rt3883.c2
-rw-r--r--arch/mips/pci/pci.c6
-rw-r--r--arch/mips/ralink/irq.c2
-rw-r--r--arch/mn10300/unit-asb2305/pci.c1
-rw-r--r--arch/powerpc/boot/Makefile3
-rw-r--r--arch/powerpc/include/asm/kvm_host.h2
-rw-r--r--arch/powerpc/include/asm/qe_ic.h23
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/powerpc/include/asm/tsi108_pci.h2
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h2
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c8
-rw-r--r--arch/powerpc/kernel/setup_32.c6
-rw-r--r--arch/powerpc/kvm/book3s.c7
-rw-r--r--arch/powerpc/kvm/book3s_hv.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S1
-rw-r--r--arch/powerpc/kvm/booke.c1
-rw-r--r--arch/powerpc/lib/copy_32.S11
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c3
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads_cpld.c5
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c2
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads-pci-pic.c2
-rw-r--r--arch/powerpc/platforms/85xx/common.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c5
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c2
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.c3
-rw-r--r--arch/powerpc/platforms/86xx/pic.c2
-rw-r--r--arch/powerpc/platforms/8xx/m8xx_setup.c2
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c2
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c3
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c2
-rw-r--r--arch/powerpc/platforms/chrp/setup.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/mvme5100.c2
-rw-r--r--arch/powerpc/platforms/pasemi/msi.c5
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c16
-rw-r--r--arch/powerpc/platforms/powernv/pci.c5
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c4
-rw-r--r--arch/powerpc/platforms/pseries/setup.c2
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c4
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c5
-rw-r--r--arch/powerpc/sysdev/ge/ge_pic.c2
-rw-r--r--arch/powerpc/sysdev/ge/ge_pic.h2
-rw-r--r--arch/powerpc/sysdev/ipic.c4
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/powerpc/sysdev/mpic_u3msi.c5
-rw-r--r--arch/powerpc/sysdev/ppc4xx_msi.c5
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c4
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c2
-rw-r--r--arch/powerpc/sysdev/uic.c2
-rw-r--r--arch/powerpc/sysdev/xics/ics-opal.c2
-rw-r--r--arch/powerpc/sysdev/xics/ics-rtas.c2
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c2
-rw-r--r--arch/s390/configs/zfcpdump_defconfig5
-rw-r--r--arch/s390/include/asm/kvm_host.h2
-rw-r--r--arch/s390/include/asm/unistd.h20
-rw-r--r--arch/s390/include/uapi/asm/unistd.h21
-rw-r--r--arch/s390/kernel/compat_signal.c27
-rw-r--r--arch/s390/kernel/compat_wrapper.c70
-rw-r--r--arch/s390/kernel/entry.S2
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c12
-rw-r--r--arch/s390/kernel/swsusp.S38
-rw-r--r--arch/s390/kernel/syscalls.S121
-rw-r--r--arch/s390/kernel/vtime.c12
-rw-r--r--arch/s390/kvm/kvm-s390.c3
-rw-r--r--arch/sh/boards/mach-se/7343/irq.c2
-rw-r--r--arch/sh/boards/mach-se/7722/irq.c2
-rw-r--r--arch/sh/boards/mach-se/7724/irq.c2
-rw-r--r--arch/sh/boards/mach-x3proto/gpio.c2
-rw-r--r--arch/sh/cchips/hd6446x/hd64461.c2
-rw-r--r--arch/sparc/kernel/leon_kernel.c2
-rw-r--r--arch/sparc/kernel/leon_pci_grpci1.c2
-rw-r--r--arch/sparc/kernel/leon_pci_grpci2.c2
-rw-r--r--arch/tile/kernel/pci_gx.c5
-rw-r--r--arch/unicore32/kernel/irq.c2
-rw-r--r--arch/x86/Kconfig23
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl1
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl1
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/include/asm/efi.h10
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/paravirt_types.h1
-rw-r--r--arch/x86/include/asm/qspinlock.h19
-rw-r--r--arch/x86/kernel/alternative.c5
-rw-r--r--arch/x86/kernel/apic/apic.c7
-rw-r--r--arch/x86/kernel/apic/io_apic.c5
-rw-r--r--arch/x86/kernel/apic/vector.c4
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c5
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_bts.c1
-rw-r--r--arch/x86/kernel/irq_32.c19
-rw-r--r--arch/x86/kernel/irq_64.c2
-rw-r--r--arch/x86/kernel/ldt.c4
-rw-r--r--arch/x86/kernel/pci-dma.c2
-rw-r--r--arch/x86/kernel/tsc.c17
-rw-r--r--arch/x86/kernel/vm86_32.c27
-rw-r--r--arch/x86/kvm/mmu.c25
-rw-r--r--arch/x86/kvm/svm.c4
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c3
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/mm/srat.c5
-rw-r--r--arch/x86/pci/common.c1
-rw-r--r--arch/xtensa/kernel/pci.c4
-rw-r--r--block/bio-integrity.c5
-rw-r--r--block/blk-cgroup.c3
-rw-r--r--block/blk-integrity.c3
-rw-r--r--block/blk-map.c26
-rw-r--r--block/blk-merge.c59
-rw-r--r--block/bounce.c4
-rw-r--r--crypto/testmgr.c5
-rw-r--r--drivers/acpi/bus.c12
-rw-r--r--drivers/acpi/int340x_thermal.c9
-rw-r--r--drivers/acpi/thermal.c12
-rw-r--r--drivers/atm/he.c7
-rw-r--r--drivers/atm/solos-pci.c12
-rw-r--r--drivers/base/platform-msi.c18
-rw-r--r--drivers/base/power/domain.c38
-rw-r--r--drivers/base/power/opp.c28
-rw-r--r--drivers/block/null_blk.c36
-rw-r--r--drivers/block/rbd.c6
-rw-r--r--drivers/block/zram/zcomp.c12
-rw-r--r--drivers/clk/clk.c3
-rw-r--r--drivers/clk/h8300/clk-h8s2678.c1
-rw-r--r--drivers/clk/hisilicon/Kconfig8
-rw-r--r--drivers/clk/hisilicon/Makefile3
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c9
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c6
-rw-r--r--drivers/clk/st/clkgen-fsyn.c8
-rw-r--r--drivers/clk/st/clkgen-pll.c12
-rw-r--r--drivers/clk/tegra/clk-dfll.c8
-rw-r--r--drivers/cpufreq/Kconfig.arm1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c3
-rw-r--r--drivers/cpufreq/cpufreq-dt.c39
-rw-r--r--drivers/cpufreq/cpufreq.c9
-rw-r--r--drivers/cpufreq/intel_pstate.c38
-rw-r--r--drivers/cpuidle/coupled.c22
-rw-r--r--drivers/cpuidle/cpuidle.h6
-rw-r--r--drivers/cpuidle/driver.c4
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c2
-rw-r--r--drivers/devfreq/devfreq.c12
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c3
-rw-r--r--drivers/devfreq/governor_simpleondemand.c33
-rw-r--r--drivers/devfreq/tegra-devfreq.c8
-rw-r--r--drivers/dma/ipu/ipu_irq.c2
-rw-r--r--drivers/edac/sb_edac.c72
-rw-r--r--drivers/firmware/efi/libstub/efistub.h4
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-altera.c6
-rw-r--r--drivers/gpio/gpio-bcm-kona.c2
-rw-r--r--drivers/gpio/gpio-brcmstb.c2
-rw-r--r--drivers/gpio/gpio-davinci.c3
-rw-r--r--drivers/gpio/gpio-dwapb.c2
-rw-r--r--drivers/gpio/gpio-ep93xx.c5
-rw-r--r--drivers/gpio/gpio-intel-mid.c2
-rw-r--r--drivers/gpio/gpio-lynxpoint.c2
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c2
-rw-r--r--drivers/gpio/gpio-msic.c2
-rw-r--r--drivers/gpio/gpio-msm-v2.c2
-rw-r--r--drivers/gpio/gpio-mvebu.c2
-rw-r--r--drivers/gpio/gpio-mxc.c16
-rw-r--r--drivers/gpio/gpio-mxs.c15
-rw-r--r--drivers/gpio/gpio-omap.c11
-rw-r--r--drivers/gpio/gpio-pl061.c2
-rw-r--r--drivers/gpio/gpio-pxa.c2
-rw-r--r--drivers/gpio/gpio-sa1100.c3
-rw-r--r--drivers/gpio/gpio-sx150x.c1
-rw-r--r--drivers/gpio/gpio-tegra.c2
-rw-r--r--drivers/gpio/gpio-timberdale.c2
-rw-r--r--drivers/gpio/gpio-tz1090.c4
-rw-r--r--drivers/gpio/gpio-vf610.c6
-rw-r--r--drivers/gpio/gpio-zx.c2
-rw-r--r--drivers/gpio/gpio-zynq.c2
-rw-r--r--drivers/gpio/gpiolib.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_smc.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c3
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h41
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c155
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h41
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c4
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c89
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c97
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c26
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c31
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c36
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c11
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c7
-rw-r--r--drivers/hwmon/Kconfig4
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/nct6775.c64
-rw-r--r--drivers/hwmon/ntc_thermistor.c2
-rw-r--r--drivers/hwmon/tmp102.c2
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c29
-rw-r--r--drivers/input/evdev.c13
-rw-r--r--drivers/input/keyboard/imx_keypad.c2
-rw-r--r--drivers/input/misc/ab8500-ponkey.c1
-rw-r--r--drivers/input/misc/pwm-beeper.c1
-rw-r--r--drivers/input/misc/regulator-haptic.c1
-rw-r--r--drivers/input/misc/sparcspkr.c2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c1
-rw-r--r--drivers/input/serio/i8042.c2
-rw-r--r--drivers/input/touchscreen/Kconfig24
-rw-r--r--drivers/input/touchscreen/Makefile2
-rw-r--r--drivers/input/touchscreen/colibri-vf50-ts.c386
-rw-r--r--drivers/input/touchscreen/cyttsp4_i2c.c1
-rw-r--r--drivers/input/touchscreen/cyttsp_i2c.c1
-rw-r--r--drivers/input/touchscreen/elants_i2c.c2
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c523
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c8
-rw-r--r--drivers/iommu/omap-iommu-debug.c3
-rw-r--r--drivers/irqchip/exynos-combiner.c8
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c6
-rw-r--r--drivers/irqchip/irq-bcm2835.c6
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c2
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c2
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c7
-rw-r--r--drivers/irqchip/irq-clps711x.c6
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c2
-rw-r--r--drivers/irqchip/irq-gic-v2m.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c6
-rw-r--r--drivers/irqchip/irq-gic-v3.c19
-rw-r--r--drivers/irqchip/irq-gic.c86
-rw-r--r--drivers/irqchip/irq-hip04.c4
-rw-r--r--drivers/irqchip/irq-i8259.c2
-rw-r--r--drivers/irqchip/irq-imgpdc.c4
-rw-r--r--drivers/irqchip/irq-keystone.c4
-rw-r--r--drivers/irqchip/irq-metag-ext.c2
-rw-r--r--drivers/irqchip/irq-metag.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c2
-rw-r--r--drivers/irqchip/irq-mmp.c5
-rw-r--r--drivers/irqchip/irq-mxs.c1
-rw-r--r--drivers/irqchip/irq-orion.c2
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c11
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c10
-rw-r--r--drivers/irqchip/irq-s3c24xx.c16
-rw-r--r--drivers/irqchip/irq-sun4i.c2
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c2
-rw-r--r--drivers/irqchip/irq-tb10x.c2
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c10
-rw-r--r--drivers/irqchip/irq-vic.c4
-rw-r--r--drivers/irqchip/irq-vt8500.c1
-rw-r--r--drivers/irqchip/spear-shirq.c3
-rw-r--r--drivers/leds/Kconfig3
-rw-r--r--drivers/leds/leds-aat1290.c3
-rw-r--r--drivers/leds/leds-bcm6328.c1
-rw-r--r--drivers/leds/leds-bcm6358.c1
-rw-r--r--drivers/leds/leds-ktd2692.c1
-rw-r--r--drivers/leds/leds-max77693.c1
-rw-r--r--drivers/leds/leds-ns2.c1
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/dm-crypt.c17
-rw-r--r--drivers/md/dm-mpath.c27
-rw-r--r--drivers/md/dm-thin.c4
-rw-r--r--drivers/media/platform/omap/Kconfig1
-rw-r--r--drivers/media/platform/omap/omap_vout.c69
-rw-r--r--drivers/media/v4l2-core/Kconfig1
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c207
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c91
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c148
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c90
-rw-r--r--drivers/mfd/asic3.c2
-rw-r--r--drivers/mfd/ezx-pcap.c2
-rw-r--r--drivers/mfd/htc-egpio.c2
-rw-r--r--drivers/mfd/jz4740-adc.c2
-rw-r--r--drivers/mfd/pm8921-core.c2
-rw-r--r--drivers/mfd/t7l66xb.c2
-rw-r--r--drivers/mfd/tc6393xb.c3
-rw-r--r--drivers/mfd/ucb1x00-core.c2
-rw-r--r--drivers/misc/cxl/Makefile2
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/sysfs.c2
-rw-r--r--drivers/misc/cxl/vphb.c6
-rw-r--r--drivers/misc/mei/wd.c1
-rw-r--r--drivers/net/arcnet/arcnet.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx.c1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c24
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c12
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c29
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h5
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c10
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c15
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c8
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c8
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c1
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c18
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c111
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c17
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c74
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c47
-rw-r--r--drivers/net/ethernet/via/Kconfig2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/fjes/fjes_hw.c8
-rw-r--r--drivers/net/geneve.c32
-rw-r--r--drivers/net/irda/ali-ircc.c6
-rw-r--r--drivers/net/macvtap.c4
-rw-r--r--drivers/net/ntb_netdev.c77
-rw-r--r--drivers/net/phy/fixed_phy.c2
-rw-r--r--drivers/net/phy/marvell.c9
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c1
-rw-r--r--drivers/net/phy/mdio-gpio.c1
-rw-r--r--drivers/net/phy/mdio-mux.c19
-rw-r--r--drivers/net/phy/mdio_bus.c31
-rw-r--r--drivers/net/phy/phy_device.c62
-rw-r--r--drivers/net/phy/vitesse.c14
-rw-r--r--drivers/net/ppp/ppp_generic.c4
-rw-r--r--drivers/net/usb/Kconfig11
-rw-r--r--drivers/net/usb/Makefile2
-rw-r--r--drivers/net/usb/ch9200.c432
-rw-r--r--drivers/net/vrf.c3
-rw-r--r--drivers/net/vxlan.c15
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c39
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h3
-rw-r--r--drivers/ntb/ntb_transport.c126
-rw-r--r--drivers/nvdimm/btt_devs.c4
-rw-r--r--drivers/nvdimm/pfn_devs.c4
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/of/of_mdio.c27
-rw-r--r--drivers/of/of_pci_irq.c22
-rw-r--r--drivers/parisc/dino.c3
-rw-r--r--drivers/parisc/lba_pci.c1
-rw-r--r--drivers/pci/access.c27
-rw-r--r--drivers/pci/bus.c2
-rw-r--r--drivers/pci/host/pci-keystone.c5
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c1
-rw-r--r--drivers/pci/host/pci-xgene-msi.c2
-rw-r--r--drivers/pci/probe.c23
-rw-r--r--drivers/pci/quirks.c20
-rw-r--r--drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c2
-rw-r--r--drivers/pinctrl/core.c3
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c2
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c4
-rw-r--r--drivers/pinctrl/pinctrl-adi2.c3
-rw-r--r--drivers/pinctrl/pinctrl-amd.c6
-rw-r--r--drivers/pinctrl/pinctrl-at91.c2
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c2
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c4
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c10
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c2
-rw-r--r--drivers/pinctrl/pinctrl-single.c2
-rw-r--r--drivers/pinctrl/pinctrl-st.c4
-rw-r--r--drivers/pinctrl/pinmux.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c10
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c10
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c4
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c12
-rw-r--r--drivers/platform/x86/acerhdf.c9
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c18
-rw-r--r--drivers/platform/x86/hp-wmi.c31
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c9
-rw-r--r--drivers/platform/x86/toshiba_acpi.c10
-rw-r--r--drivers/platform/x86/wmi.c51
-rw-r--r--drivers/power/charger-manager.c2
-rw-r--r--drivers/power/power_supply_core.c2
-rw-r--r--drivers/power/twl4030_charger.c8
-rw-r--r--drivers/regulator/anatop-regulator.c1
-rw-r--r--drivers/regulator/core.c21
-rw-r--r--drivers/regulator/gpio-regulator.c1
-rw-r--r--drivers/regulator/pbias-regulator.c56
-rw-r--r--drivers/regulator/tps65218-regulator.c2
-rw-r--r--drivers/regulator/vexpress.c1
-rw-r--r--drivers/s390/virtio/virtio_ccw.c10
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c5
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c24
-rw-r--r--drivers/scsi/device_handler/Kconfig2
-rw-r--r--drivers/scsi/device_handler/Makefile1
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c621
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c31
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c58
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c55
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c80
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--drivers/scsi/ipr.c8
-rw-r--r--drivers/scsi/libiscsi.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c7
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c6
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h41
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c38
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c605
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c12
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h52
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h4
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_tool.h4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c310
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h57
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c343
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c22
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c1
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c1
-rw-r--r--drivers/scsi/qla2xxx/Kconfig4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c13
-rw-r--r--drivers/scsi/scsi_common.c109
-rw-r--r--drivers/scsi/scsi_debug.c158
-rw-r--r--drivers/scsi/scsi_dh.c437
-rw-r--r--drivers/scsi/scsi_error.c85
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/scsi_priv.h9
-rw-r--r--drivers/scsi/scsi_sysfs.c10
-rw-r--r--drivers/scsi/scsi_transport_sas.c10
-rw-r--r--drivers/sh/intc/core.c2
-rw-r--r--drivers/sh/intc/internals.h10
-rw-r--r--drivers/sh/intc/virq.c4
-rw-r--r--drivers/sh/pm_runtime.c19
-rw-r--r--drivers/soc/dove/pmu.c6
-rw-r--r--drivers/spi/spi-atmel.c2
-rw-r--r--drivers/spi/spi-bcm2835.c6
-rw-r--r--drivers/spi/spi-meson-spifc.c1
-rw-r--r--drivers/spi/spi-mt65xx.c53
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c4
-rw-r--r--drivers/spi/spi.c3
-rw-r--r--drivers/spi/spidev.c3
-rw-r--r--drivers/spmi/spmi-pmic-arb.c2
-rw-r--r--drivers/staging/board/armadillo800eva.c2
-rw-r--r--drivers/staging/board/board.c36
-rw-r--r--drivers/staging/rdma/Kconfig2
-rw-r--r--drivers/staging/rdma/Makefile1
-rw-r--r--drivers/staging/rdma/ehca/Kconfig (renamed from drivers/infiniband/hw/ehca/Kconfig)3
-rw-r--r--drivers/staging/rdma/ehca/Makefile (renamed from drivers/infiniband/hw/ehca/Makefile)0
-rw-r--r--drivers/staging/rdma/ehca/TODO4
-rw-r--r--drivers/staging/rdma/ehca/ehca_av.c (renamed from drivers/infiniband/hw/ehca/ehca_av.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_classes.h (renamed from drivers/infiniband/hw/ehca/ehca_classes.h)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_classes_pSeries.h (renamed from drivers/infiniband/hw/ehca/ehca_classes_pSeries.h)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_cq.c (renamed from drivers/infiniband/hw/ehca/ehca_cq.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_eq.c (renamed from drivers/infiniband/hw/ehca/ehca_eq.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_hca.c (renamed from drivers/infiniband/hw/ehca/ehca_hca.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_irq.c (renamed from drivers/infiniband/hw/ehca/ehca_irq.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_irq.h (renamed from drivers/infiniband/hw/ehca/ehca_irq.h)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_iverbs.h (renamed from drivers/infiniband/hw/ehca/ehca_iverbs.h)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_main.c (renamed from drivers/infiniband/hw/ehca/ehca_main.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_mcast.c (renamed from drivers/infiniband/hw/ehca/ehca_mcast.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_mrmw.c (renamed from drivers/infiniband/hw/ehca/ehca_mrmw.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_mrmw.h (renamed from drivers/infiniband/hw/ehca/ehca_mrmw.h)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_pd.c (renamed from drivers/infiniband/hw/ehca/ehca_pd.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_qes.h (renamed from drivers/infiniband/hw/ehca/ehca_qes.h)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_qp.c (renamed from drivers/infiniband/hw/ehca/ehca_qp.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_reqs.c (renamed from drivers/infiniband/hw/ehca/ehca_reqs.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_sqp.c (renamed from drivers/infiniband/hw/ehca/ehca_sqp.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_tools.h (renamed from drivers/infiniband/hw/ehca/ehca_tools.h)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_uverbs.c (renamed from drivers/infiniband/hw/ehca/ehca_uverbs.c)0
-rw-r--r--drivers/staging/rdma/ehca/hcp_if.c (renamed from drivers/infiniband/hw/ehca/hcp_if.c)0
-rw-r--r--drivers/staging/rdma/ehca/hcp_if.h (renamed from drivers/infiniband/hw/ehca/hcp_if.h)0
-rw-r--r--drivers/staging/rdma/ehca/hcp_phyp.c (renamed from drivers/infiniband/hw/ehca/hcp_phyp.c)0
-rw-r--r--drivers/staging/rdma/ehca/hcp_phyp.h (renamed from drivers/infiniband/hw/ehca/hcp_phyp.h)0
-rw-r--r--drivers/staging/rdma/ehca/hipz_fns.h (renamed from drivers/infiniband/hw/ehca/hipz_fns.h)0
-rw-r--r--drivers/staging/rdma/ehca/hipz_fns_core.h (renamed from drivers/infiniband/hw/ehca/hipz_fns_core.h)0
-rw-r--r--drivers/staging/rdma/ehca/hipz_hw.h (renamed from drivers/infiniband/hw/ehca/hipz_hw.h)0
-rw-r--r--drivers/staging/rdma/ehca/ipz_pt_fn.c (renamed from drivers/infiniband/hw/ehca/ipz_pt_fn.c)0
-rw-r--r--drivers/staging/rdma/ehca/ipz_pt_fn.h (renamed from drivers/infiniband/hw/ehca/ipz_pt_fn.h)0
-rw-r--r--drivers/staging/rdma/hfi1/chip.c4
-rw-r--r--drivers/staging/rdma/hfi1/device.c54
-rw-r--r--drivers/staging/rdma/hfi1/device.h3
-rw-r--r--drivers/staging/rdma/hfi1/diag.c36
-rw-r--r--drivers/staging/rdma/hfi1/file_ops.c10
-rw-r--r--drivers/staging/rdma/hfi1/mad.c4
-rw-r--r--drivers/staging/rdma/hfi1/sdma.c6
-rw-r--r--drivers/staging/rdma/hfi1/sdma.h36
-rw-r--r--drivers/staging/rdma/hfi1/verbs.c8
-rw-r--r--drivers/target/iscsi/iscsi_target.c86
-rw-r--r--drivers/target/iscsi/iscsi_target.h6
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c34
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c78
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h4
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c38
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c39
-rw-r--r--drivers/target/loopback/tcm_loop.c22
-rw-r--r--drivers/target/target_core_device.c11
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_hba.c5
-rw-r--r--drivers/target/target_core_sbc.c49
-rw-r--r--drivers/target/target_core_spc.c55
-rw-r--r--drivers/target/target_core_tpg.c17
-rw-r--r--drivers/target/target_core_transport.c507
-rw-r--r--drivers/target/target_core_user.c14
-rw-r--r--drivers/target/target_core_xcopy.c6
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c2
-rw-r--r--drivers/thermal/Kconfig25
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/armada_thermal.c2
-rw-r--r--drivers/thermal/cpu_cooling.c52
-rw-r--r--drivers/thermal/db8500_cpufreq_cooling.c1
-rw-r--r--drivers/thermal/db8500_thermal.c7
-rw-r--r--drivers/thermal/dove_thermal.c2
-rw-r--r--drivers/thermal/fair_share.c2
-rw-r--r--drivers/thermal/gov_bang_bang.c5
-rw-r--r--drivers/thermal/hisi_thermal.c4
-rw-r--r--drivers/thermal/imx_thermal.c27
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c2
-rw-r--r--drivers/thermal/int340x_thermal/int340x_thermal_zone.c10
-rw-r--r--drivers/thermal/int340x_thermal/int340x_thermal_zone.h8
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c4
-rw-r--r--drivers/thermal/intel_pch_thermal.c283
-rw-r--r--drivers/thermal/intel_powerclamp.c3
-rw-r--r--drivers/thermal/intel_quark_dts_thermal.c13
-rw-r--r--drivers/thermal/intel_soc_dts_iosf.c8
-rw-r--r--drivers/thermal/kirkwood_thermal.c2
-rw-r--r--drivers/thermal/of-thermal.c14
-rw-r--r--drivers/thermal/power_allocator.c257
-rw-r--r--drivers/thermal/qcom-spmi-temp-alarm.c2
-rw-r--r--drivers/thermal/rcar_thermal.c7
-rw-r--r--drivers/thermal/rockchip_thermal.c10
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c23
-rw-r--r--drivers/thermal/spear_thermal.c2
-rw-r--r--drivers/thermal/st/st_thermal.c5
-rw-r--r--drivers/thermal/step_wise.c4
-rw-r--r--drivers/thermal/tegra_soctherm.c4
-rw-r--r--drivers/thermal/thermal_core.c137
-rw-r--r--drivers/thermal/thermal_hwmon.c10
-rw-r--r--drivers/thermal/ti-soc-thermal/Kconfig8
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c10
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c10
-rw-r--r--drivers/vhost/net.c3
-rw-r--r--drivers/vhost/scsi.c4
-rw-r--r--drivers/vhost/test.c3
-rw-r--r--drivers/vhost/vhost.h4
-rw-r--r--drivers/watchdog/Kconfig22
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c2
-rw-r--r--drivers/watchdog/at91sam9_wdt.c22
-rw-r--r--drivers/watchdog/at91sam9_wdt.h2
-rw-r--r--drivers/watchdog/bcm2835_wdt.c1
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c1
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c1
-rw-r--r--drivers/watchdog/booke_wdt.c4
-rw-r--r--drivers/watchdog/coh901327_wdt.c1
-rw-r--r--drivers/watchdog/da9052_wdt.c1
-rw-r--r--drivers/watchdog/da9055_wdt.c1
-rw-r--r--drivers/watchdog/da9062_wdt.c1
-rw-r--r--drivers/watchdog/da9063_wdt.c1
-rw-r--r--drivers/watchdog/davinci_wdt.c1
-rw-r--r--drivers/watchdog/digicolor_wdt.c1
-rw-r--r--drivers/watchdog/ep93xx_wdt.c1
-rw-r--r--drivers/watchdog/gpio_wdt.c65
-rw-r--r--drivers/watchdog/ie6xx_wdt.c1
-rw-r--r--drivers/watchdog/imgpdc_wdt.c1
-rw-r--r--drivers/watchdog/intel-mid_wdt.c1
-rw-r--r--drivers/watchdog/jz4740_wdt.c1
-rw-r--r--drivers/watchdog/lpc18xx_wdt.c340
-rw-r--r--drivers/watchdog/mena21_wdt.c1
-rw-r--r--drivers/watchdog/menf21bmc_wdt.c1
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c156
-rw-r--r--drivers/watchdog/mtk_wdt.c39
-rw-r--r--drivers/watchdog/nv_tco.c2
-rw-r--r--drivers/watchdog/omap_wdt.c1
-rw-r--r--drivers/watchdog/orion_wdt.c1
-rw-r--r--drivers/watchdog/pnx4008_wdt.c1
-rw-r--r--drivers/watchdog/qcom-wdt.c1
-rw-r--r--drivers/watchdog/retu_wdt.c1
-rw-r--r--drivers/watchdog/rt2880_wdt.c1
-rw-r--r--drivers/watchdog/s3c2410_wdt.c1
-rw-r--r--drivers/watchdog/sama5d4_wdt.c280
-rw-r--r--drivers/watchdog/shwdt.c1
-rw-r--r--drivers/watchdog/sirfsoc_wdt.c1
-rw-r--r--drivers/watchdog/sp805_wdt.c1
-rw-r--r--drivers/watchdog/st_lpc_wdt.c1
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c1
-rw-r--r--drivers/watchdog/sunxi_wdt.c2
-rw-r--r--drivers/watchdog/tegra_wdt.c1
-rw-r--r--drivers/watchdog/twl4030_wdt.c1
-rw-r--r--drivers/watchdog/txx9wdt.c1
-rw-r--r--drivers/watchdog/ux500_wdt.c1
-rw-r--r--drivers/watchdog/via_wdt.c1
-rw-r--r--drivers/watchdog/wm831x_wdt.c1
-rw-r--r--drivers/watchdog/wm8350_wdt.c1
-rw-r--r--fs/block_dev.c7
-rw-r--r--fs/btrfs/async-thread.c57
-rw-r--r--fs/btrfs/async-thread.h2
-rw-r--r--fs/btrfs/btrfs_inode.h2
-rw-r--r--fs/btrfs/dev-replace.c3
-rw-r--r--fs/btrfs/disk-io.c78
-rw-r--r--fs/btrfs/disk-io.h1
-rw-r--r--fs/btrfs/extent-tree.c7
-rw-r--r--fs/btrfs/extent_io.c65
-rw-r--r--fs/btrfs/inode.c48
-rw-r--r--fs/btrfs/scrub.c12
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/btrfs/transaction.c32
-rw-r--r--fs/btrfs/transaction.h5
-rw-r--r--fs/btrfs/tree-defrag.c3
-rw-r--r--fs/btrfs/volumes.c21
-rw-r--r--fs/ceph/addr.c6
-rw-r--r--fs/ceph/caps.c8
-rw-r--r--fs/ceph/file.c14
-rw-r--r--fs/ceph/mds_client.c59
-rw-r--r--fs/ceph/mds_client.h1
-rw-r--r--fs/ceph/snap.c7
-rw-r--r--fs/ceph/super.c1
-rw-r--r--fs/cifs/cifsfs.c5
-rw-r--r--fs/cifs/ioctl.c6
-rw-r--r--fs/dax.c3
-rw-r--r--fs/fs-writeback.c30
-rw-r--r--fs/gfs2/glock.c348
-rw-r--r--fs/gfs2/glops.c38
-rw-r--r--fs/gfs2/incore.h15
-rw-r--r--fs/gfs2/lock_dlm.c12
-rw-r--r--fs/gfs2/lops.c6
-rw-r--r--fs/gfs2/meta_io.c6
-rw-r--r--fs/gfs2/meta_io.h2
-rw-r--r--fs/gfs2/quota.c22
-rw-r--r--fs/gfs2/rgrp.c10
-rw-r--r--fs/gfs2/trace_gfs2.h34
-rw-r--r--fs/gfs2/trans.c4
-rw-r--r--fs/nfs/delegation.c8
-rw-r--r--fs/nfs/delegation.h2
-rw-r--r--fs/nfs/direct.c7
-rw-r--r--fs/nfs/filelayout/filelayout.c31
-rw-r--r--fs/nfs/nfs42proc.c4
-rw-r--r--fs/nfs/nfs4proc.c127
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nfs/pagelist.c2
-rw-r--r--fs/nfs/pnfs.c35
-rw-r--r--fs/nfs/pnfs.h7
-rw-r--r--fs/nfs/read.c3
-rw-r--r--fs/nfs/write.c3
-rw-r--r--fs/nsfs.c3
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c9
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c14
-rw-r--r--fs/seq_file.c70
-rw-r--r--fs/userfaultfd.c12
-rw-r--r--include/acpi/button.h4
-rw-r--r--include/acpi/video.h2
-rw-r--r--include/asm-generic/memory_model.h2
-rw-r--r--include/asm-generic/qspinlock.h4
-rw-r--r--include/kvm/arm_vgic.h6
-rw-r--r--include/linux/backing-dev.h11
-rw-r--r--include/linux/blkdev.h52
-rw-r--r--include/linux/ceph/ceph_features.h1
-rw-r--r--include/linux/ceph/libceph.h2
-rw-r--r--include/linux/ceph/messenger.h6
-rw-r--r--include/linux/ceph/msgr.h4
-rw-r--r--include/linux/cgroup-defs.h27
-rw-r--r--include/linux/clockchips.h29
-rw-r--r--include/linux/cpufreq.h5
-rw-r--r--include/linux/devfreq.h24
-rw-r--r--include/linux/init_task.h8
-rw-r--r--include/linux/irq.h81
-rw-r--r--include/linux/irqdesc.h41
-rw-r--r--include/linux/irqhandler.h2
-rw-r--r--include/linux/jump_label.h10
-rw-r--r--include/linux/mm.h44
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/ntb.h9
-rw-r--r--include/linux/ntb_transport.h1
-rw-r--r--include/linux/phy.h6
-rw-r--r--include/linux/pm_opp.h6
-rw-r--r--include/linux/sched.h12
-rw-r--r--include/linux/security.h2
-rw-r--r--include/linux/seq_file.h19
-rw-r--r--include/linux/skbuff.h9
-rw-r--r--include/linux/spi/spi.h2
-rw-r--r--include/linux/sunrpc/xprtsock.h3
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/thermal.h34
-rw-r--r--include/linux/tick.h9
-rw-r--r--include/linux/wait.h5
-rw-r--r--include/media/videobuf2-memops.h11
-rw-r--r--include/net/flow.h1
-rw-r--r--include/net/inet_timewait_sock.h14
-rw-r--r--include/net/ip6_fib.h3
-rw-r--r--include/net/ip6_tunnel.h17
-rw-r--r--include/net/ip_fib.h30
-rw-r--r--include/net/ip_tunnels.h2
-rw-r--r--include/net/route.h2
-rw-r--r--include/rdma/opa_port_info.h4
-rw-r--r--include/scsi/scsi_common.h5
-rw-r--r--include/scsi/scsi_device.h27
-rw-r--r--include/scsi/scsi_dh.h29
-rw-r--r--include/scsi/scsi_eh.h6
-rw-r--r--include/target/iscsi/iscsi_target_core.h15
-rw-r--r--include/target/iscsi/iscsi_target_stat.h2
-rw-r--r--include/target/iscsi/iscsi_transport.h2
-rw-r--r--include/target/target_core_backend.h2
-rw-r--r--include/target/target_core_base.h27
-rw-r--r--include/target/target_core_fabric.h14
-rw-r--r--include/trace/events/thermal_power_allocator.h6
-rw-r--r--include/uapi/asm-generic/unistd.h8
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/lwtunnel.h4
-rw-r--r--include/uapi/linux/membarrier.h53
-rw-r--r--include/uapi/linux/target_core_user.h4
-rw-r--r--init/Kconfig12
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/cgroup.c110
-rw-r--r--kernel/cpu_pm.c2
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/irq/chip.c33
-rw-r--r--kernel/irq/handle.c4
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/irqdesc.c20
-rw-r--r--kernel/irq/irqdomain.c1
-rw-r--r--kernel/irq/manage.c12
-rw-r--r--kernel/irq/proc.c2
-rw-r--r--kernel/irq/resend.c2
-rw-r--r--kernel/locking/qspinlock.c2
-rw-r--r--kernel/membarrier.c66
-rw-r--r--kernel/sched/core.c51
-rw-r--r--kernel/sched/wait.c7
-rw-r--r--kernel/sys_ni.c3
-rw-r--r--kernel/time/clockevents.c42
-rw-r--r--kernel/time/tick-common.c1
-rw-r--r--kernel/time/tick-sched.c15
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/time/timer_list.c54
-rw-r--r--lib/iommu-common.c6
-rw-r--r--lib/rhashtable.c5
-rw-r--r--lib/string_helpers.c6
-rw-r--r--mm/Kconfig3
-rw-r--r--mm/Makefile1
-rw-r--r--mm/early_ioremap.c1
-rw-r--r--mm/frame_vector.c230
-rw-r--r--mm/kasan/kasan.c3
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mmap.c11
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/atm/clip.c3
-rw-r--r--net/bluetooth/smp.c12
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/ceph/ceph_common.c1
-rw-r--r--net/ceph/crypto.c4
-rw-r--r--net/ceph/messenger.c83
-rw-r--r--net/ceph/mon_client.c37
-rw-r--r--net/ceph/osd_client.c51
-rw-r--r--net/ceph/osdmap.c2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/fib_rules.c14
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/net-sysfs.c9
-rw-r--r--net/core/netpoll.c10
-rw-r--r--net/core/rtnetlink.c26
-rw-r--r--net/core/sock.c12
-rw-r--r--net/dccp/ackvec.c12
-rw-r--r--net/dccp/ccid.c3
-rw-r--r--net/dccp/minisocks.c4
-rw-r--r--net/dsa/dsa.c41
-rw-r--r--net/dsa/tag_trailer.c2
-rw-r--r--net/ipv4/arp.c39
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/icmp.c4
-rw-r--r--net/ipv4/inet_connection_sock.c8
-rw-r--r--net/ipv4/inet_timewait_sock.c16
-rw-r--r--net/ipv4/ip_tunnel_core.c54
-rw-r--r--net/ipv4/route.c6
-rw-r--r--net/ipv4/tcp_cubic.c10
-rw-r--r--net/ipv4/tcp_minisocks.c13
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/addrconf.c7
-rw-r--r--net/ipv6/ip6_fib.c26
-rw-r--r--net/ipv6/ip6_gre.c93
-rw-r--r--net/ipv6/ip6_output.c14
-rw-r--r--net/ipv6/ip6_tunnel.c147
-rw-r--r--net/ipv6/route.c16
-rw-r--r--net/mac80211/cfg.c13
-rw-r--r--net/netfilter/nf_log.c9
-rw-r--r--net/netfilter/nft_compat.c24
-rw-r--r--net/netlink/af_netlink.c63
-rw-r--r--net/netlink/af_netlink.h10
-rw-r--r--net/openvswitch/Kconfig3
-rw-r--r--net/openvswitch/conntrack.c8
-rw-r--r--net/openvswitch/datapath.c4
-rw-r--r--net/openvswitch/flow_netlink.c82
-rw-r--r--net/openvswitch/flow_table.c23
-rw-r--r--net/openvswitch/flow_table.h2
-rw-r--r--net/packet/af_packet.c32
-rw-r--r--net/sched/cls_fw.c30
-rw-r--r--net/sctp/protocol.c64
-rw-r--r--net/sunrpc/sched.c14
-rw-r--r--net/sunrpc/xprt.c6
-rw-r--r--net/sunrpc/xprtsock.c15
-rw-r--r--net/tipc/msg.c1
-rw-r--r--scripts/extract-cert.c12
-rwxr-xr-xscripts/sign-file.c13
-rw-r--r--security/device_cgroup.c2
-rw-r--r--sound/arm/Kconfig15
-rw-r--r--sound/pci/hda/hda_tegra.c30
-rw-r--r--sound/pci/hda/patch_realtek.c31
-rw-r--r--sound/soc/au1x/psc-i2s.c1
-rw-r--r--sound/soc/codecs/rt5645.c22
-rw-r--r--sound/soc/codecs/wm0010.c23
-rw-r--r--sound/soc/codecs/wm8960.c26
-rw-r--r--sound/soc/codecs/wm8962.c3
-rw-r--r--sound/soc/davinci/davinci-mcasp.c14
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c3
-rw-r--r--sound/soc/fsl/fsl_ssi.c5
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c20
-rw-r--r--sound/soc/mediatek/mtk-afe-pcm.c17
-rw-r--r--sound/soc/pxa/Kconfig2
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c4
-rw-r--r--sound/soc/soc-dapm.c2
-rw-r--r--sound/soc/soc-utils.c9
-rw-r--r--sound/soc/spear/Kconfig2
-rw-r--r--sound/soc/sti/uniperif_player.c14
-rw-r--r--sound/soc/sti/uniperif_reader.c6
-rw-r--r--tools/perf/builtin-script.c4
-rw-r--r--tools/perf/tests/sw-clock.c18
-rw-r--r--tools/perf/tests/task-exit.c18
-rw-r--r--tools/perf/ui/browsers/hists.c12
-rw-r--r--tools/perf/util/evlist.c138
-rw-r--r--tools/perf/util/evlist.h9
-rw-r--r--tools/perf/util/evsel.c1
-rw-r--r--tools/perf/util/evsel.h1
-rw-r--r--tools/perf/util/header.c4
-rw-r--r--tools/perf/util/intel-bts.c2
-rw-r--r--tools/perf/util/intel-pt.c2
-rw-r--r--tools/perf/util/parse-events.c7
-rw-r--r--tools/perf/util/parse-events.y2
-rw-r--r--tools/testing/selftests/Makefile3
-rw-r--r--tools/testing/selftests/exec/Makefile4
-rw-r--r--tools/testing/selftests/ftrace/Makefile2
-rw-r--r--tools/testing/selftests/lib.mk11
-rw-r--r--tools/testing/selftests/membarrier/.gitignore1
-rw-r--r--tools/testing/selftests/membarrier/Makefile10
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test.c118
-rw-r--r--tools/testing/selftests/mqueue/Makefile10
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c37
-rw-r--r--tools/testing/selftests/seccomp/test_harness.h7
-rw-r--r--tools/testing/selftests/vm/Makefile9
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c52
-rw-r--r--tools/testing/selftests/x86/entry_from_vm86.c5
-rwxr-xr-xtools/testing/selftests/zram/zram.sh10
-rwxr-xr-xtools/testing/selftests/zram/zram_lib.sh3
-rw-r--r--tools/virtio/Makefile2
-rw-r--r--tools/virtio/asm/barrier.h2
-rw-r--r--tools/virtio/linux/export.h3
-rw-r--r--tools/virtio/linux/kernel.h8
-rw-r--r--virt/kvm/arm/arch_timer.c8
-rw-r--r--virt/kvm/arm/vgic-v3.c2
-rw-r--r--virt/kvm/arm/vgic.c42
-rw-r--r--virt/kvm/coalesced_mmio.h4
-rw-r--r--virt/kvm/eventfd.c124
-rw-r--r--virt/kvm/kvm_main.c27
1105 files changed, 12640 insertions, 7316 deletions
diff --git a/Documentation/devicetree/bindings/arm/gic-v3.txt b/Documentation/devicetree/bindings/arm/gic-v3.txt
index ddfade40ac59..7803e77d85cb 100644
--- a/Documentation/devicetree/bindings/arm/gic-v3.txt
+++ b/Documentation/devicetree/bindings/arm/gic-v3.txt
@@ -57,6 +57,8 @@ used to route Message Signalled Interrupts (MSI) to the CPUs.
57These nodes must have the following properties: 57These nodes must have the following properties:
58- compatible : Should at least contain "arm,gic-v3-its". 58- compatible : Should at least contain "arm,gic-v3-its".
59- msi-controller : Boolean property. Identifies the node as an MSI controller 59- msi-controller : Boolean property. Identifies the node as an MSI controller
60- #msi-cells: Must be <1>. The single msi-cell is the DeviceID of the device
61 which will generate the MSI.
60- reg: Specifies the base physical address and size of the ITS 62- reg: Specifies the base physical address and size of the ITS
61 registers. 63 registers.
62 64
@@ -83,6 +85,7 @@ Examples:
83 gic-its@2c200000 { 85 gic-its@2c200000 {
84 compatible = "arm,gic-v3-its"; 86 compatible = "arm,gic-v3-its";
85 msi-controller; 87 msi-controller;
88 #msi-cells = <1>;
86 reg = <0x0 0x2c200000 0 0x200000>; 89 reg = <0x0 0x2c200000 0 0x200000>;
87 }; 90 };
88 }; 91 };
@@ -107,12 +110,14 @@ Examples:
107 gic-its@2c200000 { 110 gic-its@2c200000 {
108 compatible = "arm,gic-v3-its"; 111 compatible = "arm,gic-v3-its";
109 msi-controller; 112 msi-controller;
113 #msi-cells = <1>;
110 reg = <0x0 0x2c200000 0 0x200000>; 114 reg = <0x0 0x2c200000 0 0x200000>;
111 }; 115 };
112 116
113 gic-its@2c400000 { 117 gic-its@2c400000 {
114 compatible = "arm,gic-v3-its"; 118 compatible = "arm,gic-v3-its";
115 msi-controller; 119 msi-controller;
120 #msi-cells = <1>;
116 reg = <0x0 0x2c400000 0 0x200000>; 121 reg = <0x0 0x2c400000 0 0x200000>;
117 }; 122 };
118 }; 123 };
diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt
index a8274eabae2e..b8e41c148a3c 100644
--- a/Documentation/devicetree/bindings/arm/idle-states.txt
+++ b/Documentation/devicetree/bindings/arm/idle-states.txt
@@ -497,7 +497,7 @@ cpus {
497 }; 497 };
498 498
499 idle-states { 499 idle-states {
500 entry-method = "arm,psci"; 500 entry-method = "psci";
501 501
502 CPU_RETENTION_0_0: cpu-retention-0-0 { 502 CPU_RETENTION_0_0: cpu-retention-0-0 {
503 compatible = "arm,idle-state"; 503 compatible = "arm,idle-state";
diff --git a/Documentation/devicetree/bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt
index 5788d5cf1252..82d40e2505f6 100644
--- a/Documentation/devicetree/bindings/gpio/gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio.txt
@@ -16,7 +16,9 @@ properties, each containing a 'gpio-list':
16GPIO properties should be named "[<name>-]gpios", with <name> being the purpose 16GPIO properties should be named "[<name>-]gpios", with <name> being the purpose
17of this GPIO for the device. While a non-existent <name> is considered valid 17of this GPIO for the device. While a non-existent <name> is considered valid
18for compatibility reasons (resolving to the "gpios" property), it is not allowed 18for compatibility reasons (resolving to the "gpios" property), it is not allowed
19for new bindings. 19for new bindings. Also, GPIO properties named "[<name>-]gpio" are valid and old
20bindings use it, but are only supported for compatibility reasons and should not
21be used for newer bindings since it has been deprecated.
20 22
21GPIO properties can contain one or more GPIO phandles, but only in exceptional 23GPIO properties can contain one or more GPIO phandles, but only in exceptional
22cases should they contain more than one. If your device uses several GPIOs with 24cases should they contain more than one. If your device uses several GPIOs with
diff --git a/Documentation/devicetree/bindings/iio/accel/bma180.txt b/Documentation/devicetree/bindings/iio/accel/bma180.txt
index c5933573e0f6..4a3679d54457 100644
--- a/Documentation/devicetree/bindings/iio/accel/bma180.txt
+++ b/Documentation/devicetree/bindings/iio/accel/bma180.txt
@@ -1,10 +1,11 @@
1* Bosch BMA180 triaxial acceleration sensor 1* Bosch BMA180 / BMA250 triaxial acceleration sensor
2 2
3http://omapworld.com/BMA180_111_1002839.pdf 3http://omapworld.com/BMA180_111_1002839.pdf
4http://ae-bst.resource.bosch.com/media/products/dokumente/bma250/bst-bma250-ds002-05.pdf
4 5
5Required properties: 6Required properties:
6 7
7 - compatible : should be "bosch,bma180" 8 - compatible : should be "bosch,bma180" or "bosch,bma250"
8 - reg : the I2C address of the sensor 9 - reg : the I2C address of the sensor
9 10
10Optional properties: 11Optional properties:
@@ -13,6 +14,9 @@ Optional properties:
13 14
14 - interrupts : interrupt mapping for GPIO IRQ, it should by configured with 15 - interrupts : interrupt mapping for GPIO IRQ, it should by configured with
15 flags IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING 16 flags IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING
17 For the bma250 the first interrupt listed must be the one
18 connected to the INT1 pin, the second (optional) interrupt
19 listed must be the one connected to the INT2 pin.
16 20
17Example: 21Example:
18 22
diff --git a/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt b/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt
new file mode 100644
index 000000000000..9d9e930f3251
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt
@@ -0,0 +1,36 @@
1* Toradex Colibri VF50 Touchscreen driver
2
3Required Properties:
4- compatible must be toradex,vf50-touchscreen
5- io-channels: adc channels being used by the Colibri VF50 module
6- xp-gpios: FET gate driver for input of X+
7- xm-gpios: FET gate driver for input of X-
8- yp-gpios: FET gate driver for input of Y+
9- ym-gpios: FET gate driver for input of Y-
10- interrupt-parent: phandle for the interrupt controller
11- interrupts: pen irq interrupt for touch detection
12- pinctrl-names: "idle", "default", "gpios"
13- pinctrl-0: pinctrl node for pen/touch detection state pinmux
14- pinctrl-1: pinctrl node for X/Y and pressure measurement (ADC) state pinmux
15- pinctrl-2: pinctrl node for gpios functioning as FET gate drivers
16- vf50-ts-min-pressure: pressure level at which to stop measuring X/Y values
17
18Example:
19
20 touchctrl: vf50_touchctrl {
21 compatible = "toradex,vf50-touchscreen";
22 io-channels = <&adc1 0>,<&adc0 0>,
23 <&adc0 1>,<&adc1 2>;
24 xp-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
25 xm-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>;
26 yp-gpios = <&gpio0 12 GPIO_ACTIVE_LOW>;
27 ym-gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>;
28 interrupt-parent = <&gpio0>;
29 interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
30 pinctrl-names = "idle","default","gpios";
31 pinctrl-0 = <&pinctrl_touchctrl_idle>;
32 pinctrl-1 = <&pinctrl_touchctrl_default>;
33 pinctrl-2 = <&pinctrl_touchctrl_gpios>;
34 vf50-ts-min-pressure = <200>;
35 status = "disabled";
36 };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt b/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt
new file mode 100644
index 000000000000..853dff96dd9f
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt
@@ -0,0 +1,36 @@
1* Freescale i.MX6UL Touch Controller
2
3Required properties:
4- compatible: must be "fsl,imx6ul-tsc".
5- reg: this touch controller address and the ADC2 address.
6- interrupts: the interrupt of this touch controller and ADC2.
7- clocks: the root clock of touch controller and ADC2.
8- clock-names; must be "tsc" and "adc".
9- xnur-gpio: the X- gpio this controller connect to.
10 This xnur-gpio returns to low once the finger leave the touch screen (The
11 last touch event the touch controller capture).
12
13Optional properties:
14- measure-delay-time: the value of measure delay time.
15 Before X-axis or Y-axis measurement, the screen need some time before
16 even potential distribution ready.
17 This value depends on the touch screen.
18- pre-charge-time: the touch screen need some time to precharge.
19 This value depends on the touch screen.
20
21Example:
22 tsc: tsc@02040000 {
23 compatible = "fsl,imx6ul-tsc";
24 reg = <0x02040000 0x4000>, <0x0219c000 0x4000>;
25 interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
26 <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
27 clocks = <&clks IMX6UL_CLK_IPG>,
28 <&clks IMX6UL_CLK_ADC2>;
29 clock-names = "tsc", "adc";
30 pinctrl-names = "default";
31 pinctrl-0 = <&pinctrl_tsc>;
32 xnur-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
33 measure-delay-time = <0xfff>;
34 pre-charge-time = <0xffff>;
35 status = "okay";
36 };
diff --git a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
index d8ef5bf50f11..7fab84b33531 100644
--- a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
+++ b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
@@ -7,7 +7,8 @@ OHCI and EHCI controllers.
7 7
8Required properties: 8Required properties:
9- compatible: "renesas,pci-r8a7790" for the R8A7790 SoC; 9- compatible: "renesas,pci-r8a7790" for the R8A7790 SoC;
10 "renesas,pci-r8a7791" for the R8A7791 SoC. 10 "renesas,pci-r8a7791" for the R8A7791 SoC;
11 "renesas,pci-r8a7794" for the R8A7794 SoC.
11- reg: A list of physical regions to access the device: the first is 12- reg: A list of physical regions to access the device: the first is
12 the operational registers for the OHCI/EHCI controllers and the 13 the operational registers for the OHCI/EHCI controllers and the
13 second is for the bridge configuration and control registers. 14 second is for the bridge configuration and control registers.
diff --git a/Documentation/devicetree/bindings/regulator/pbias-regulator.txt b/Documentation/devicetree/bindings/regulator/pbias-regulator.txt
index 32aa26f1e434..acbcb452a69a 100644
--- a/Documentation/devicetree/bindings/regulator/pbias-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/pbias-regulator.txt
@@ -2,7 +2,12 @@ PBIAS internal regulator for SD card dual voltage i/o pads on OMAP SoCs.
2 2
3Required properties: 3Required properties:
4- compatible: 4- compatible:
5 - "ti,pbias-omap" for OMAP2, OMAP3, OMAP4, OMAP5, DRA7. 5 - should be "ti,pbias-dra7" for DRA7
6 - should be "ti,pbias-omap2" for OMAP2
7 - should be "ti,pbias-omap3" for OMAP3
8 - should be "ti,pbias-omap4" for OMAP4
9 - should be "ti,pbias-omap5" for OMAP5
10 - "ti,pbias-omap" is deprecated
6- reg: pbias register offset from syscon base and size of pbias register. 11- reg: pbias register offset from syscon base and size of pbias register.
7- syscon : phandle of the system control module 12- syscon : phandle of the system control module
8- regulator-name : should be 13- regulator-name : should be
diff --git a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
index dcefc438272f..6160ffbcb3d3 100644
--- a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
+++ b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
@@ -15,17 +15,18 @@ Required properties:
15- interrupts: Should contain spi interrupt 15- interrupts: Should contain spi interrupt
16 16
17- clocks: phandles to input clocks. 17- clocks: phandles to input clocks.
18 The first should be <&topckgen CLK_TOP_SPI_SEL>. 18 The first should be one of the following. It's PLL.
19 The second should be one of the following.
20 - <&clk26m>: specify parent clock 26MHZ. 19 - <&clk26m>: specify parent clock 26MHZ.
21 - <&topckgen CLK_TOP_SYSPLL3_D2>: specify parent clock 109MHZ. 20 - <&topckgen CLK_TOP_SYSPLL3_D2>: specify parent clock 109MHZ.
22 It's the default one. 21 It's the default one.
23 - <&topckgen CLK_TOP_SYSPLL4_D2>: specify parent clock 78MHZ. 22 - <&topckgen CLK_TOP_SYSPLL4_D2>: specify parent clock 78MHZ.
24 - <&topckgen CLK_TOP_UNIVPLL2_D4>: specify parent clock 104MHZ. 23 - <&topckgen CLK_TOP_UNIVPLL2_D4>: specify parent clock 104MHZ.
25 - <&topckgen CLK_TOP_UNIVPLL1_D8>: specify parent clock 78MHZ. 24 - <&topckgen CLK_TOP_UNIVPLL1_D8>: specify parent clock 78MHZ.
25 The second should be <&topckgen CLK_TOP_SPI_SEL>. It's clock mux.
26 The third is <&pericfg CLK_PERI_SPI0>. It's clock gate.
26 27
27- clock-names: shall be "spi-clk" for the controller clock, and 28- clock-names: shall be "parent-clk" for the parent clock, "sel-clk" for the
28 "parent-clk" for the parent clock. 29 muxes clock, and "spi-clk" for the clock gate.
29 30
30Optional properties: 31Optional properties:
31- mediatek,pad-select: specify which pins group(ck/mi/mo/cs) spi 32- mediatek,pad-select: specify which pins group(ck/mi/mo/cs) spi
@@ -44,8 +45,11 @@ spi: spi@1100a000 {
44 #size-cells = <0>; 45 #size-cells = <0>;
45 reg = <0 0x1100a000 0 0x1000>; 46 reg = <0 0x1100a000 0 0x1000>;
46 interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>; 47 interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>;
47 clocks = <&topckgen CLK_TOP_SPI_SEL>, <&topckgen CLK_TOP_SYSPLL3_D2>; 48 clocks = <&topckgen CLK_TOP_SYSPLL3_D2>,
48 clock-names = "spi-clk", "parent-clk"; 49 <&topckgen CLK_TOP_SPI_SEL>,
50 <&pericfg CLK_PERI_SPI0>;
51 clock-names = "parent-clk", "sel-clk", "spi-clk";
52
49 mediatek,pad-select = <0>; 53 mediatek,pad-select = <0>;
50 status = "disabled"; 54 status = "disabled";
51}; 55};
diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt
index 8a49362dea6e..41b817f7b670 100644
--- a/Documentation/devicetree/bindings/thermal/thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/thermal.txt
@@ -55,19 +55,11 @@ of heat dissipation). For example a fan's cooling states correspond to
55the different fan speeds possible. Cooling states are referred to by 55the different fan speeds possible. Cooling states are referred to by
56single unsigned integers, where larger numbers mean greater heat 56single unsigned integers, where larger numbers mean greater heat
57dissipation. The precise set of cooling states associated with a device 57dissipation. The precise set of cooling states associated with a device
58(as referred to be the cooling-min-state and cooling-max-state 58(as referred to by the cooling-min-level and cooling-max-level
59properties) should be defined in a particular device's binding. 59properties) should be defined in a particular device's binding.
60For more examples of cooling devices, refer to the example sections below. 60For more examples of cooling devices, refer to the example sections below.
61 61
62Required properties: 62Required properties:
63- cooling-min-state: An integer indicating the smallest
64 Type: unsigned cooling state accepted. Typically 0.
65 Size: one cell
66
67- cooling-max-state: An integer indicating the largest
68 Type: unsigned cooling state accepted.
69 Size: one cell
70
71- #cooling-cells: Used to provide cooling device specific information 63- #cooling-cells: Used to provide cooling device specific information
72 Type: unsigned while referring to it. Must be at least 2, in order 64 Type: unsigned while referring to it. Must be at least 2, in order
73 Size: one cell to specify minimum and maximum cooling state used 65 Size: one cell to specify minimum and maximum cooling state used
@@ -77,6 +69,15 @@ Required properties:
77 See Cooling device maps section below for more details 69 See Cooling device maps section below for more details
78 on how consumers refer to cooling devices. 70 on how consumers refer to cooling devices.
79 71
72Optional properties:
73- cooling-min-level: An integer indicating the smallest
74 Type: unsigned cooling state accepted. Typically 0.
75 Size: one cell
76
77- cooling-max-level: An integer indicating the largest
78 Type: unsigned cooling state accepted.
79 Size: one cell
80
80* Trip points 81* Trip points
81 82
82The trip node is a node to describe a point in the temperature domain 83The trip node is a node to describe a point in the temperature domain
@@ -225,8 +226,8 @@ cpus {
225 396000 950000 226 396000 950000
226 198000 850000 227 198000 850000
227 >; 228 >;
228 cooling-min-state = <0>; 229 cooling-min-level = <0>;
229 cooling-max-state = <3>; 230 cooling-max-level = <3>;
230 #cooling-cells = <2>; /* min followed by max */ 231 #cooling-cells = <2>; /* min followed by max */
231 }; 232 };
232 ... 233 ...
@@ -240,8 +241,8 @@ cpus {
240 */ 241 */
241 fan0: fan@0x48 { 242 fan0: fan@0x48 {
242 ... 243 ...
243 cooling-min-state = <0>; 244 cooling-min-level = <0>;
244 cooling-max-state = <9>; 245 cooling-max-level = <9>;
245 #cooling-cells = <2>; /* min followed by max */ 246 #cooling-cells = <2>; /* min followed by max */
246 }; 247 };
247}; 248};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index ac5f0c34ae00..82d2ac97af74 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -203,6 +203,7 @@ sitronix Sitronix Technology Corporation
203skyworks Skyworks Solutions, Inc. 203skyworks Skyworks Solutions, Inc.
204smsc Standard Microsystems Corporation 204smsc Standard Microsystems Corporation
205snps Synopsys, Inc. 205snps Synopsys, Inc.
206socionext Socionext Inc.
206solidrun SolidRun 207solidrun SolidRun
207solomon Solomon Systech Limited 208solomon Solomon Systech Limited
208sony Sony Corporation 209sony Sony Corporation
diff --git a/Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt b/Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt
new file mode 100644
index 000000000000..f7cc7c060910
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt
@@ -0,0 +1,35 @@
1* Atmel SAMA5D4 Watchdog Timer (WDT) Controller
2
3Required properties:
4- compatible: "atmel,sama5d4-wdt"
5- reg: base physical address and length of memory mapped region.
6
7Optional properties:
8- timeout-sec: watchdog timeout value (in seconds).
9- interrupts: interrupt number to the CPU.
10- atmel,watchdog-type: should be "hardware" or "software".
11 "hardware": enable watchdog fault reset. A watchdog fault triggers
12 watchdog reset.
13 "software": enable watchdog fault interrupt. A watchdog fault asserts
14 watchdog interrupt.
15- atmel,idle-halt: present if you want to stop the watchdog when the CPU is
16 in idle state.
17 CAUTION: This property should be used with care, it actually makes the
18 watchdog not counting when the CPU is in idle state, therefore the
19 watchdog reset time depends on mean CPU usage and will not reset at all
20 if the CPU stop working while it is in idle state, which is probably
21 not what you want.
22- atmel,dbg-halt: present if you want to stop the watchdog when the CPU is
23 in debug state.
24
25Example:
26 watchdog@fc068640 {
27 compatible = "atmel,sama5d4-wdt";
28 reg = <0xfc068640 0x10>;
29 interrupts = <4 IRQ_TYPE_LEVEL_HIGH 5>;
30 timeout-sec = <10>;
31 atmel,watchdog-type = "hardware";
32 atmel,dbg-halt;
33 atmel,idle-halt;
34 status = "okay";
35 };
diff --git a/Documentation/devicetree/bindings/watchdog/lpc18xx-wdt.txt b/Documentation/devicetree/bindings/watchdog/lpc18xx-wdt.txt
new file mode 100644
index 000000000000..09f6b24969e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/lpc18xx-wdt.txt
@@ -0,0 +1,19 @@
1* NXP LPC18xx Watchdog Timer (WDT)
2
3Required properties:
4- compatible: Should be "nxp,lpc1850-wwdt"
5- reg: Should contain WDT registers location and length
6- clocks: Must contain an entry for each entry in clock-names.
7- clock-names: Should contain "wdtclk" and "reg"; the watchdog counter
8 clock and register interface clock respectively.
9- interrupts: Should contain WDT interrupt
10
11Examples:
12
13watchdog@40080000 {
14 compatible = "nxp,lpc1850-wwdt";
15 reg = <0x40080000 0x24>;
16 clocks = <&cgu BASE_SAFE_CLK>, <&ccu1 CLK_CPU_WWDT>;
17 clock-names = "wdtclk", "reg";
18 interrupts = <49>;
19};
diff --git a/Documentation/gpio/board.txt b/Documentation/gpio/board.txt
index b80606de545a..f59c43b6411b 100644
--- a/Documentation/gpio/board.txt
+++ b/Documentation/gpio/board.txt
@@ -21,8 +21,8 @@ exact way to do it depends on the GPIO controller providing the GPIOs, see the
21device tree bindings for your controller. 21device tree bindings for your controller.
22 22
23GPIOs mappings are defined in the consumer device's node, in a property named 23GPIOs mappings are defined in the consumer device's node, in a property named
24<function>-gpios, where <function> is the function the driver will request 24either <function>-gpios or <function>-gpio, where <function> is the function
25through gpiod_get(). For example: 25the driver will request through gpiod_get(). For example:
26 26
27 foo_device { 27 foo_device {
28 compatible = "acme,foo"; 28 compatible = "acme,foo";
@@ -31,7 +31,7 @@ through gpiod_get(). For example:
31 <&gpio 16 GPIO_ACTIVE_HIGH>, /* green */ 31 <&gpio 16 GPIO_ACTIVE_HIGH>, /* green */
32 <&gpio 17 GPIO_ACTIVE_HIGH>; /* blue */ 32 <&gpio 17 GPIO_ACTIVE_HIGH>; /* blue */
33 33
34 power-gpios = <&gpio 1 GPIO_ACTIVE_LOW>; 34 power-gpio = <&gpio 1 GPIO_ACTIVE_LOW>;
35 }; 35 };
36 36
37This property will make GPIOs 15, 16 and 17 available to the driver under the 37This property will make GPIOs 15, 16 and 17 available to the driver under the
@@ -39,15 +39,24 @@ This property will make GPIOs 15, 16 and 17 available to the driver under the
39 39
40 struct gpio_desc *red, *green, *blue, *power; 40 struct gpio_desc *red, *green, *blue, *power;
41 41
42 red = gpiod_get_index(dev, "led", 0); 42 red = gpiod_get_index(dev, "led", 0, GPIOD_OUT_HIGH);
43 green = gpiod_get_index(dev, "led", 1); 43 green = gpiod_get_index(dev, "led", 1, GPIOD_OUT_HIGH);
44 blue = gpiod_get_index(dev, "led", 2); 44 blue = gpiod_get_index(dev, "led", 2, GPIOD_OUT_HIGH);
45 45
46 power = gpiod_get(dev, "power"); 46 power = gpiod_get(dev, "power", GPIOD_OUT_HIGH);
47 47
48The led GPIOs will be active-high, while the power GPIO will be active-low (i.e. 48The led GPIOs will be active-high, while the power GPIO will be active-low (i.e.
49gpiod_is_active_low(power) will be true). 49gpiod_is_active_low(power) will be true).
50 50
51The second parameter of the gpiod_get() functions, the con_id string, has to be
52the <function>-prefix of the GPIO suffixes ("gpios" or "gpio", automatically
53looked up by the gpiod functions internally) used in the device tree. With above
54"led-gpios" example, use the prefix without the "-" as con_id parameter: "led".
55
56Internally, the GPIO subsystem prefixes the GPIO suffix ("gpios" or "gpio")
57with the string passed in con_id to get the resulting string
58(snprintf(... "%s-%s", con_id, gpio_suffixes[]).
59
51ACPI 60ACPI
52---- 61----
53ACPI also supports function names for GPIOs in a similar fashion to DT. 62ACPI also supports function names for GPIOs in a similar fashion to DT.
@@ -142,13 +151,14 @@ The driver controlling "foo.0" will then be able to obtain its GPIOs as follows:
142 151
143 struct gpio_desc *red, *green, *blue, *power; 152 struct gpio_desc *red, *green, *blue, *power;
144 153
145 red = gpiod_get_index(dev, "led", 0); 154 red = gpiod_get_index(dev, "led", 0, GPIOD_OUT_HIGH);
146 green = gpiod_get_index(dev, "led", 1); 155 green = gpiod_get_index(dev, "led", 1, GPIOD_OUT_HIGH);
147 blue = gpiod_get_index(dev, "led", 2); 156 blue = gpiod_get_index(dev, "led", 2, GPIOD_OUT_HIGH);
148 157
149 power = gpiod_get(dev, "power"); 158 power = gpiod_get(dev, "power", GPIOD_OUT_HIGH);
150 gpiod_direction_output(power, 1);
151 159
152Since the "power" GPIO is mapped as active-low, its actual signal will be 0 160Since the "led" GPIOs are mapped as active-high, this example will switch their
153after this code. Contrary to the legacy integer GPIO interface, the active-low 161signals to 1, i.e. enabling the LEDs. And for the "power" GPIO, which is mapped
154property is handled during mapping and is thus transparent to GPIO consumers. 162as active-low, its actual signal will be 0 after this code. Contrary to the legacy
163integer GPIO interface, the active-low property is handled during mapping and is
164thus transparent to GPIO consumers.
diff --git a/Documentation/gpio/consumer.txt b/Documentation/gpio/consumer.txt
index a206639454ab..e000502fde20 100644
--- a/Documentation/gpio/consumer.txt
+++ b/Documentation/gpio/consumer.txt
@@ -39,6 +39,9 @@ device that displays digits), an additional index argument can be specified:
39 const char *con_id, unsigned int idx, 39 const char *con_id, unsigned int idx,
40 enum gpiod_flags flags) 40 enum gpiod_flags flags)
41 41
42For a more detailed description of the con_id parameter in the DeviceTree case
43see Documentation/gpio/board.txt
44
42The flags parameter is used to optionally specify a direction and initial value 45The flags parameter is used to optionally specify a direction and initial value
43for the GPIO. Values can be: 46for the GPIO. Values can be:
44 47
diff --git a/Documentation/hwmon/nct6775 b/Documentation/hwmon/nct6775
index f0dd3d2fec96..76add4c9cd68 100644
--- a/Documentation/hwmon/nct6775
+++ b/Documentation/hwmon/nct6775
@@ -32,6 +32,10 @@ Supported chips:
32 Prefix: 'nct6792' 32 Prefix: 'nct6792'
33 Addresses scanned: ISA address retrieved from Super I/O registers 33 Addresses scanned: ISA address retrieved from Super I/O registers
34 Datasheet: Available from Nuvoton upon request 34 Datasheet: Available from Nuvoton upon request
35 * Nuvoton NCT6793D
36 Prefix: 'nct6793'
37 Addresses scanned: ISA address retrieved from Super I/O registers
38 Datasheet: Available from Nuvoton upon request
35 39
36Authors: 40Authors:
37 Guenter Roeck <linux@roeck-us.net> 41 Guenter Roeck <linux@roeck-us.net>
diff --git a/Documentation/networking/vrf.txt b/Documentation/networking/vrf.txt
new file mode 100644
index 000000000000..031ef4a63485
--- /dev/null
+++ b/Documentation/networking/vrf.txt
@@ -0,0 +1,96 @@
1Virtual Routing and Forwarding (VRF)
2====================================
3The VRF device combined with ip rules provides the ability to create virtual
4routing and forwarding domains (aka VRFs, VRF-lite to be specific) in the
5Linux network stack. One use case is the multi-tenancy problem where each
6tenant has their own unique routing tables and in the very least need
7different default gateways.
8
9Processes can be "VRF aware" by binding a socket to the VRF device. Packets
10through the socket then use the routing table associated with the VRF
11device. An important feature of the VRF device implementation is that it
12impacts only Layer 3 and above so L2 tools (e.g., LLDP) are not affected
13(ie., they do not need to be run in each VRF). The design also allows
14the use of higher priority ip rules (Policy Based Routing, PBR) to take
15precedence over the VRF device rules directing specific traffic as desired.
16
17In addition, VRF devices allow VRFs to be nested within namespaces. For
18example network namespaces provide separation of network interfaces at L1
19(Layer 1 separation), VLANs on the interfaces within a namespace provide
20L2 separation and then VRF devices provide L3 separation.
21
22Design
23------
24A VRF device is created with an associated route table. Network interfaces
25are then enslaved to a VRF device:
26
27 +-----------------------------+
28 | vrf-blue | ===> route table 10
29 +-----------------------------+
30 | | |
31 +------+ +------+ +-------------+
32 | eth1 | | eth2 | ... | bond1 |
33 +------+ +------+ +-------------+
34 | |
35 +------+ +------+
36 | eth8 | | eth9 |
37 +------+ +------+
38
39Packets received on an enslaved device and are switched to the VRF device
40using an rx_handler which gives the impression that packets flow through
41the VRF device. Similarly on egress routing rules are used to send packets
42to the VRF device driver before getting sent out the actual interface. This
43allows tcpdump on a VRF device to capture all packets into and out of the
44VRF as a whole.[1] Similiarly, netfilter [2] and tc rules can be applied
45using the VRF device to specify rules that apply to the VRF domain as a whole.
46
47[1] Packets in the forwarded state do not flow through the device, so those
48 packets are not seen by tcpdump. Will revisit this limitation in a
49 future release.
50
51[2] Iptables on ingress is limited to NF_INET_PRE_ROUTING only with skb->dev
52 set to real ingress device and egress is limited to NF_INET_POST_ROUTING.
53 Will revisit this limitation in a future release.
54
55
56Setup
57-----
581. VRF device is created with an association to a FIB table.
59 e.g, ip link add vrf-blue type vrf table 10
60 ip link set dev vrf-blue up
61
622. Rules are added that send lookups to the associated FIB table when the
63 iif or oif is the VRF device. e.g.,
64 ip ru add oif vrf-blue table 10
65 ip ru add iif vrf-blue table 10
66
67 Set the default route for the table (and hence default route for the VRF).
68 e.g, ip route add table 10 prohibit default
69
703. Enslave L3 interfaces to a VRF device.
71 e.g, ip link set dev eth1 master vrf-blue
72
73 Local and connected routes for enslaved devices are automatically moved to
74 the table associated with VRF device. Any additional routes depending on
75 the enslaved device will need to be reinserted following the enslavement.
76
774. Additional VRF routes are added to associated table.
78 e.g., ip route add table 10 ...
79
80
81Applications
82------------
83Applications that are to work within a VRF need to bind their socket to the
84VRF device:
85
86 setsockopt(sd, SOL_SOCKET, SO_BINDTODEVICE, dev, strlen(dev)+1);
87
88or to specify the output device using cmsg and IP_PKTINFO.
89
90
91Limitations
92-----------
93VRF device currently only works for IPv4. Support for IPv6 is under development.
94
95Index of original ingress interface is not available via cmsg. Will address
96soon.
diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt
index f4cb0b2d5cd7..477927becacb 100644
--- a/Documentation/static-keys.txt
+++ b/Documentation/static-keys.txt
@@ -15,8 +15,8 @@ The updated API replacements are:
15 15
16DEFINE_STATIC_KEY_TRUE(key); 16DEFINE_STATIC_KEY_TRUE(key);
17DEFINE_STATIC_KEY_FALSE(key); 17DEFINE_STATIC_KEY_FALSE(key);
18static_key_likely() 18static_branch_likely()
19statick_key_unlikely() 19static_branch_unlikely()
20 20
210) Abstract 210) Abstract
22 22
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index 6294b5186ae5..809ab6efcc74 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -54,13 +54,15 @@ default_qdisc
54-------------- 54--------------
55 55
56The default queuing discipline to use for network devices. This allows 56The default queuing discipline to use for network devices. This allows
57overriding the default queue discipline of pfifo_fast with an 57overriding the default of pfifo_fast with an alternative. Since the default
58alternative. Since the default queuing discipline is created with the 58queuing discipline is created without additional parameters so is best suited
59no additional parameters so is best suited to queuing disciplines that 59to queuing disciplines that work well without configuration like stochastic
60work well without configuration like stochastic fair queue (sfq), 60fair queue (sfq), CoDel (codel) or fair queue CoDel (fq_codel). Don't use
61CoDel (codel) or fair queue CoDel (fq_codel). Don't use queuing disciplines 61queuing disciplines like Hierarchical Token Bucket or Deficit Round Robin
62like Hierarchical Token Bucket or Deficit Round Robin which require setting 62which require setting up classes and bandwidths. Note that physical multiqueue
63up classes and bandwidths. 63interfaces still use mq as root qdisc, which in turn uses this default for its
64leaves. Virtual devices (like e.g. lo or veth) ignore this setting and instead
65default to noqueue.
64Default: pfifo_fast 66Default: pfifo_fast
65 67
66busy_read 68busy_read
diff --git a/Documentation/thermal/power_allocator.txt b/Documentation/thermal/power_allocator.txt
index c3797b529991..a1ce2235f121 100644
--- a/Documentation/thermal/power_allocator.txt
+++ b/Documentation/thermal/power_allocator.txt
@@ -4,7 +4,7 @@ Power allocator governor tunables
4Trip points 4Trip points
5----------- 5-----------
6 6
7The governor requires the following two passive trip points: 7The governor works optimally with the following two passive trip points:
8 8
91. "switch on" trip point: temperature above which the governor 91. "switch on" trip point: temperature above which the governor
10 control loop starts operating. This is the first passive trip 10 control loop starts operating. This is the first passive trip
diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt
index c1f6864a8c5d..10f062ea6bc2 100644
--- a/Documentation/thermal/sysfs-api.txt
+++ b/Documentation/thermal/sysfs-api.txt
@@ -180,6 +180,7 @@ Thermal zone device sys I/F, created once it's registered:
180 |---temp: Current temperature 180 |---temp: Current temperature
181 |---mode: Working mode of the thermal zone 181 |---mode: Working mode of the thermal zone
182 |---policy: Thermal governor used for this zone 182 |---policy: Thermal governor used for this zone
183 |---available_policies: Available thermal governors for this zone
183 |---trip_point_[0-*]_temp: Trip point temperature 184 |---trip_point_[0-*]_temp: Trip point temperature
184 |---trip_point_[0-*]_type: Trip point type 185 |---trip_point_[0-*]_type: Trip point type
185 |---trip_point_[0-*]_hyst: Hysteresis value for this trip point 186 |---trip_point_[0-*]_hyst: Hysteresis value for this trip point
@@ -256,6 +257,10 @@ policy
256 One of the various thermal governors used for a particular zone. 257 One of the various thermal governors used for a particular zone.
257 RW, Required 258 RW, Required
258 259
260available_policies
261 Available thermal governors which can be used for a particular zone.
262 RO, Required
263
259trip_point_[0-*]_temp 264trip_point_[0-*]_temp
260 The temperature above which trip point will be fired. 265 The temperature above which trip point will be fired.
261 Unit: millidegree Celsius 266 Unit: millidegree Celsius
@@ -417,6 +422,7 @@ method, the sys I/F structure will be built like this:
417 |---temp: 37000 422 |---temp: 37000
418 |---mode: enabled 423 |---mode: enabled
419 |---policy: step_wise 424 |---policy: step_wise
425 |---available_policies: step_wise fair_share
420 |---trip_point_0_temp: 100000 426 |---trip_point_0_temp: 100000
421 |---trip_point_0_type: critical 427 |---trip_point_0_type: critical
422 |---trip_point_1_temp: 80000 428 |---trip_point_1_temp: 80000
diff --git a/Documentation/watchdog/src/watchdog-test.c b/Documentation/watchdog/src/watchdog-test.c
index 3da822967ee0..fcdde8fc98be 100644
--- a/Documentation/watchdog/src/watchdog-test.c
+++ b/Documentation/watchdog/src/watchdog-test.c
@@ -41,6 +41,7 @@ static void term(int sig)
41int main(int argc, char *argv[]) 41int main(int argc, char *argv[])
42{ 42{
43 int flags; 43 int flags;
44 unsigned int ping_rate = 1;
44 45
45 fd = open("/dev/watchdog", O_WRONLY); 46 fd = open("/dev/watchdog", O_WRONLY);
46 47
@@ -63,22 +64,33 @@ int main(int argc, char *argv[])
63 fprintf(stderr, "Watchdog card enabled.\n"); 64 fprintf(stderr, "Watchdog card enabled.\n");
64 fflush(stderr); 65 fflush(stderr);
65 goto end; 66 goto end;
67 } else if (!strncasecmp(argv[1], "-t", 2) && argv[2]) {
68 flags = atoi(argv[2]);
69 ioctl(fd, WDIOC_SETTIMEOUT, &flags);
70 fprintf(stderr, "Watchdog timeout set to %u seconds.\n", flags);
71 fflush(stderr);
72 goto end;
73 } else if (!strncasecmp(argv[1], "-p", 2) && argv[2]) {
74 ping_rate = strtoul(argv[2], NULL, 0);
75 fprintf(stderr, "Watchdog ping rate set to %u seconds.\n", ping_rate);
76 fflush(stderr);
66 } else { 77 } else {
67 fprintf(stderr, "-d to disable, -e to enable.\n"); 78 fprintf(stderr, "-d to disable, -e to enable, -t <n> to set " \
79 "the timeout,\n-p <n> to set the ping rate, and \n");
68 fprintf(stderr, "run by itself to tick the card.\n"); 80 fprintf(stderr, "run by itself to tick the card.\n");
69 fflush(stderr); 81 fflush(stderr);
70 goto end; 82 goto end;
71 } 83 }
72 } else {
73 fprintf(stderr, "Watchdog Ticking Away!\n");
74 fflush(stderr);
75 } 84 }
76 85
86 fprintf(stderr, "Watchdog Ticking Away!\n");
87 fflush(stderr);
88
77 signal(SIGINT, term); 89 signal(SIGINT, term);
78 90
79 while(1) { 91 while(1) {
80 keep_alive(); 92 keep_alive();
81 sleep(1); 93 sleep(ping_rate);
82 } 94 }
83end: 95end:
84 close(fd); 96 close(fd);
diff --git a/MAINTAINERS b/MAINTAINERS
index c978a257f4aa..bcd263de4827 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -808,6 +808,13 @@ S: Maintained
808F: drivers/video/fbdev/arcfb.c 808F: drivers/video/fbdev/arcfb.c
809F: drivers/video/fbdev/core/fb_defio.c 809F: drivers/video/fbdev/core/fb_defio.c
810 810
811ARCNET NETWORK LAYER
812M: Michael Grzeschik <m.grzeschik@pengutronix.de>
813L: netdev@vger.kernel.org
814S: Maintained
815F: drivers/net/arcnet/
816F: include/uapi/linux/if_arcnet.h
817
811ARM MFM AND FLOPPY DRIVERS 818ARM MFM AND FLOPPY DRIVERS
812M: Ian Molton <spyro@f2s.com> 819M: Ian Molton <spyro@f2s.com>
813S: Maintained 820S: Maintained
@@ -6452,11 +6459,11 @@ F: drivers/hwmon/ltc4261.c
6452LTP (Linux Test Project) 6459LTP (Linux Test Project)
6453M: Mike Frysinger <vapier@gentoo.org> 6460M: Mike Frysinger <vapier@gentoo.org>
6454M: Cyril Hrubis <chrubis@suse.cz> 6461M: Cyril Hrubis <chrubis@suse.cz>
6455M: Wanlong Gao <gaowanlong@cn.fujitsu.com> 6462M: Wanlong Gao <wanlong.gao@gmail.com>
6456M: Jan Stancek <jstancek@redhat.com> 6463M: Jan Stancek <jstancek@redhat.com>
6457M: Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com> 6464M: Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com>
6458M: Alexey Kodanev <alexey.kodanev@oracle.com> 6465M: Alexey Kodanev <alexey.kodanev@oracle.com>
6459L: ltp-list@lists.sourceforge.net (subscribers-only) 6466L: ltp@lists.linux.it (subscribers-only)
6460W: http://linux-test-project.github.io/ 6467W: http://linux-test-project.github.io/
6461T: git git://github.com/linux-test-project/ltp.git 6468T: git git://github.com/linux-test-project/ltp.git
6462S: Maintained 6469S: Maintained
@@ -6789,6 +6796,14 @@ W: http://www.mellanox.com
6789Q: http://patchwork.ozlabs.org/project/netdev/list/ 6796Q: http://patchwork.ozlabs.org/project/netdev/list/
6790F: drivers/net/ethernet/mellanox/mlxsw/ 6797F: drivers/net/ethernet/mellanox/mlxsw/
6791 6798
6799MEMBARRIER SUPPORT
6800M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6801M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
6802L: linux-kernel@vger.kernel.org
6803S: Supported
6804F: kernel/membarrier.c
6805F: include/uapi/linux/membarrier.h
6806
6792MEMORY MANAGEMENT 6807MEMORY MANAGEMENT
6793L: linux-mm@kvack.org 6808L: linux-mm@kvack.org
6794W: http://www.linux-mm.org 6809W: http://www.linux-mm.org
@@ -7395,6 +7410,7 @@ NTB DRIVER CORE
7395M: Jon Mason <jdmason@kudzu.us> 7410M: Jon Mason <jdmason@kudzu.us>
7396M: Dave Jiang <dave.jiang@intel.com> 7411M: Dave Jiang <dave.jiang@intel.com>
7397M: Allen Hubbe <Allen.Hubbe@emc.com> 7412M: Allen Hubbe <Allen.Hubbe@emc.com>
7413L: linux-ntb@googlegroups.com
7398S: Supported 7414S: Supported
7399W: https://github.com/jonmason/ntb/wiki 7415W: https://github.com/jonmason/ntb/wiki
7400T: git git://github.com/jonmason/ntb.git 7416T: git git://github.com/jonmason/ntb.git
@@ -7406,6 +7422,7 @@ F: include/linux/ntb_transport.h
7406NTB INTEL DRIVER 7422NTB INTEL DRIVER
7407M: Jon Mason <jdmason@kudzu.us> 7423M: Jon Mason <jdmason@kudzu.us>
7408M: Dave Jiang <dave.jiang@intel.com> 7424M: Dave Jiang <dave.jiang@intel.com>
7425L: linux-ntb@googlegroups.com
7409S: Supported 7426S: Supported
7410W: https://github.com/jonmason/ntb/wiki 7427W: https://github.com/jonmason/ntb/wiki
7411T: git git://github.com/jonmason/ntb.git 7428T: git git://github.com/jonmason/ntb.git
@@ -8489,7 +8506,6 @@ F: Documentation/networking/LICENSE.qla3xxx
8489F: drivers/net/ethernet/qlogic/qla3xxx.* 8506F: drivers/net/ethernet/qlogic/qla3xxx.*
8490 8507
8491QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER 8508QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
8492M: Shahed Shaikh <shahed.shaikh@qlogic.com>
8493M: Dept-GELinuxNICDev@qlogic.com 8509M: Dept-GELinuxNICDev@qlogic.com
8494L: netdev@vger.kernel.org 8510L: netdev@vger.kernel.org
8495S: Supported 8511S: Supported
@@ -10327,6 +10343,16 @@ F: include/uapi/linux/thermal.h
10327F: include/linux/cpu_cooling.h 10343F: include/linux/cpu_cooling.h
10328F: Documentation/devicetree/bindings/thermal/ 10344F: Documentation/devicetree/bindings/thermal/
10329 10345
10346THERMAL/CPU_COOLING
10347M: Amit Daniel Kachhap <amit.kachhap@gmail.com>
10348M: Viresh Kumar <viresh.kumar@linaro.org>
10349M: Javi Merino <javi.merino@arm.com>
10350L: linux-pm@vger.kernel.org
10351S: Supported
10352F: Documentation/thermal/cpu-cooling-api.txt
10353F: drivers/thermal/cpu_cooling.c
10354F: include/linux/cpu_cooling.h
10355
10330THINGM BLINK(1) USB RGB LED DRIVER 10356THINGM BLINK(1) USB RGB LED DRIVER
10331M: Vivien Didelot <vivien.didelot@savoirfairelinux.com> 10357M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
10332S: Maintained 10358S: Maintained
@@ -11228,7 +11254,6 @@ VOLTAGE AND CURRENT REGULATOR FRAMEWORK
11228M: Liam Girdwood <lgirdwood@gmail.com> 11254M: Liam Girdwood <lgirdwood@gmail.com>
11229M: Mark Brown <broonie@kernel.org> 11255M: Mark Brown <broonie@kernel.org>
11230L: linux-kernel@vger.kernel.org 11256L: linux-kernel@vger.kernel.org
11231W: http://opensource.wolfsonmicro.com/node/15
11232W: http://www.slimlogic.co.uk/?p=48 11257W: http://www.slimlogic.co.uk/?p=48
11233T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator.git 11258T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator.git
11234S: Supported 11259S: Supported
@@ -11242,6 +11267,7 @@ L: netdev@vger.kernel.org
11242S: Maintained 11267S: Maintained
11243F: drivers/net/vrf.c 11268F: drivers/net/vrf.c
11244F: include/net/vrf.h 11269F: include/net/vrf.h
11270F: Documentation/networking/vrf.txt
11245 11271
11246VT1211 HARDWARE MONITOR DRIVER 11272VT1211 HARDWARE MONITOR DRIVER
11247M: Juerg Haefliger <juergh@gmail.com> 11273M: Juerg Haefliger <juergh@gmail.com>
@@ -11357,17 +11383,15 @@ WM97XX TOUCHSCREEN DRIVERS
11357M: Mark Brown <broonie@kernel.org> 11383M: Mark Brown <broonie@kernel.org>
11358M: Liam Girdwood <lrg@slimlogic.co.uk> 11384M: Liam Girdwood <lrg@slimlogic.co.uk>
11359L: linux-input@vger.kernel.org 11385L: linux-input@vger.kernel.org
11360T: git git://opensource.wolfsonmicro.com/linux-2.6-touch 11386W: https://github.com/CirrusLogic/linux-drivers/wiki
11361W: http://opensource.wolfsonmicro.com/node/7
11362S: Supported 11387S: Supported
11363F: drivers/input/touchscreen/*wm97* 11388F: drivers/input/touchscreen/*wm97*
11364F: include/linux/wm97xx.h 11389F: include/linux/wm97xx.h
11365 11390
11366WOLFSON MICROELECTRONICS DRIVERS 11391WOLFSON MICROELECTRONICS DRIVERS
11367L: patches@opensource.wolfsonmicro.com 11392L: patches@opensource.wolfsonmicro.com
11368T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc 11393T: git https://github.com/CirrusLogic/linux-drivers.git
11369T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus 11394W: https://github.com/CirrusLogic/linux-drivers/wiki
11370W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices
11371S: Supported 11395S: Supported
11372F: Documentation/hwmon/wm83?? 11396F: Documentation/hwmon/wm83??
11373F: arch/arm/mach-s3c64xx/mach-crag6410* 11397F: arch/arm/mach-s3c64xx/mach-crag6410*
diff --git a/Makefile b/Makefile
index f2d27061e5f7..84f4b31e3c6e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 2 2PATCHLEVEL = 3
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = 4EXTRAVERSION = -rc2
5NAME = Hurr durr I'ma sheep 5NAME = Hurr durr I'ma sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index f05bdb4b1cb9..ff4049155c84 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -297,7 +297,9 @@ static inline void __iomem * ioremap_nocache(unsigned long offset,
297 unsigned long size) 297 unsigned long size)
298{ 298{
299 return ioremap(offset, size); 299 return ioremap(offset, size);
300} 300}
301
302#define ioremap_uc ioremap_nocache
301 303
302static inline void iounmap(volatile void __iomem *addr) 304static inline void iounmap(volatile void __iomem *addr)
303{ 305{
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index 2804648c8ff4..2d6efcff3bf3 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -117,6 +117,6 @@ handle_irq(int irq)
117 } 117 }
118 118
119 irq_enter(); 119 irq_enter();
120 generic_handle_irq_desc(irq, desc); 120 generic_handle_irq_desc(desc);
121 irq_exit(); 121 irq_exit();
122} 122}
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index cded02c890aa..5f387ee5b5c5 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -242,7 +242,12 @@ pci_restore_srm_config(void)
242 242
243void pcibios_fixup_bus(struct pci_bus *bus) 243void pcibios_fixup_bus(struct pci_bus *bus)
244{ 244{
245 struct pci_dev *dev; 245 struct pci_dev *dev = bus->self;
246
247 if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
248 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
249 pci_read_bridge_bases(bus);
250 }
246 251
247 list_for_each_entry(dev, &bus->devices, bus_list) { 252 list_for_each_entry(dev, &bus->devices, bus_list) {
248 pdev_save_srm_config(dev); 253 pdev_save_srm_config(dev);
diff --git a/arch/alpha/lib/udelay.c b/arch/alpha/lib/udelay.c
index 69d52aa37bae..f2d81ff38aa6 100644
--- a/arch/alpha/lib/udelay.c
+++ b/arch/alpha/lib/udelay.c
@@ -30,6 +30,7 @@ __delay(int loops)
30 " bgt %0,1b" 30 " bgt %0,1b"
31 : "=&r" (tmp), "=r" (loops) : "1"(loops)); 31 : "=&r" (tmp), "=r" (loops) : "1"(loops));
32} 32}
33EXPORT_SYMBOL(__delay);
33 34
34#ifdef CONFIG_SMP 35#ifdef CONFIG_SMP
35#define LPJ cpu_data[smp_processor_id()].loops_per_jiffy 36#define LPJ cpu_data[smp_processor_id()].loops_per_jiffy
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index d9e44b62df05..4ffd1855f1bd 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -252,7 +252,7 @@ static struct irq_chip idu_irq_chip = {
252 252
253static int idu_first_irq; 253static int idu_first_irq;
254 254
255static void idu_cascade_isr(unsigned int __core_irq, struct irq_desc *desc) 255static void idu_cascade_isr(struct irq_desc *desc)
256{ 256{
257 struct irq_domain *domain = irq_desc_get_handler_data(desc); 257 struct irq_domain *domain = irq_desc_get_handler_data(desc);
258 unsigned int core_irq = irq_desc_get_irq(desc); 258 unsigned int core_irq = irq_desc_get_irq(desc);
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index ad9825d4026a..0a77b19e1df8 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -402,6 +402,8 @@ static void __init axs103_early_init(void)
402 unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F; 402 unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
403 if (num_cores > 2) 403 if (num_cores > 2)
404 arc_set_core_freq(50 * 1000000); 404 arc_set_core_freq(50 * 1000000);
405 else if (num_cores == 2)
406 arc_set_core_freq(75 * 1000000);
405#endif 407#endif
406 408
407 switch (arc_get_core_freq()/1000000) { 409 switch (arc_get_core_freq()/1000000) {
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 7451b447cc2d..2c2b28ee4811 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -54,6 +54,14 @@ AS += -EL
54LD += -EL 54LD += -EL
55endif 55endif
56 56
57#
58# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
59# later may result in code being generated that handles signed short and signed
60# char struct members incorrectly. So disable it.
61# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
62#
63KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra)
64
57# This selects which instruction set is used. 65# This selects which instruction set is used.
58# Note that GCC does not numerically define an architecture version 66# Note that GCC does not numerically define an architecture version
59# macro, but instead defines a whole series of macros which makes 67# macro, but instead defines a whole series of macros which makes
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c
index 96dabcb6c621..996aed3b4eee 100644
--- a/arch/arm/common/it8152.c
+++ b/arch/arm/common/it8152.c
@@ -95,7 +95,7 @@ void it8152_init_irq(void)
95 } 95 }
96} 96}
97 97
98void it8152_irq_demux(unsigned int irq, struct irq_desc *desc) 98void it8152_irq_demux(struct irq_desc *desc)
99{ 99{
100 int bits_pd, bits_lp, bits_ld; 100 int bits_pd, bits_lp, bits_ld;
101 int i; 101 int i;
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c
index 304adea4bc52..0e97b4b871f9 100644
--- a/arch/arm/common/locomo.c
+++ b/arch/arm/common/locomo.c
@@ -138,7 +138,7 @@ static struct locomo_dev_info locomo_devices[] = {
138 }, 138 },
139}; 139};
140 140
141static void locomo_handler(unsigned int __irq, struct irq_desc *desc) 141static void locomo_handler(struct irq_desc *desc)
142{ 142{
143 struct locomo *lchip = irq_desc_get_chip_data(desc); 143 struct locomo *lchip = irq_desc_get_chip_data(desc);
144 int req, i; 144 int req, i;
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index 4f290250fa93..3d224941b541 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -196,10 +196,8 @@ static struct sa1111_dev_info sa1111_devices[] = {
196 * active IRQs causes the interrupt output to pulse, the upper levels 196 * active IRQs causes the interrupt output to pulse, the upper levels
197 * will call us again if there are more interrupts to process. 197 * will call us again if there are more interrupts to process.
198 */ 198 */
199static void 199static void sa1111_irq_handler(struct irq_desc *desc)
200sa1111_irq_handler(unsigned int __irq, struct irq_desc *desc)
201{ 200{
202 unsigned int irq = irq_desc_get_irq(desc);
203 unsigned int stat0, stat1, i; 201 unsigned int stat0, stat1, i;
204 struct sa1111 *sachip = irq_desc_get_handler_data(desc); 202 struct sa1111 *sachip = irq_desc_get_handler_data(desc);
205 void __iomem *mapbase = sachip->base + SA1111_INTC; 203 void __iomem *mapbase = sachip->base + SA1111_INTC;
@@ -214,7 +212,7 @@ sa1111_irq_handler(unsigned int __irq, struct irq_desc *desc)
214 sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1); 212 sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1);
215 213
216 if (stat0 == 0 && stat1 == 0) { 214 if (stat0 == 0 && stat1 == 0) {
217 do_bad_IRQ(irq, desc); 215 do_bad_IRQ(desc);
218 return; 216 return;
219 } 217 }
220 218
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 7bbf325a4f31..b2bc8e11471d 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -491,11 +491,6 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
491#endif 491#endif
492 .endm 492 .endm
493 493
494 .macro uaccess_save_and_disable, tmp
495 uaccess_save \tmp
496 uaccess_disable \tmp
497 .endm
498
499 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo 494 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
500 .macro ret\c, reg 495 .macro ret\c, reg
501#if __LINUX_ARM_ARCH__ < 6 496#if __LINUX_ARM_ARCH__ < 6
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index b274bde24905..e7335a92144e 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -40,6 +40,7 @@ do { \
40 "2:\t.asciz " #__file "\n" \ 40 "2:\t.asciz " #__file "\n" \
41 ".popsection\n" \ 41 ".popsection\n" \
42 ".pushsection __bug_table,\"a\"\n" \ 42 ".pushsection __bug_table,\"a\"\n" \
43 ".align 2\n" \
43 "3:\t.word 1b, 2b\n" \ 44 "3:\t.word 1b, 2b\n" \
44 "\t.hword " #__line ", 0\n" \ 45 "\t.hword " #__line ", 0\n" \
45 ".popsection"); \ 46 ".popsection"); \
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index e878129f2fee..fc8ba1663601 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -12,6 +12,7 @@
12 12
13#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
14#include <asm/barrier.h> 14#include <asm/barrier.h>
15#include <asm/thread_info.h>
15#endif 16#endif
16 17
17/* 18/*
@@ -89,7 +90,8 @@ static inline unsigned int get_domain(void)
89 90
90 asm( 91 asm(
91 "mrc p15, 0, %0, c3, c0 @ get domain" 92 "mrc p15, 0, %0, c3, c0 @ get domain"
92 : "=r" (domain)); 93 : "=r" (domain)
94 : "m" (current_thread_info()->cpu_domain));
93 95
94 return domain; 96 return domain;
95} 97}
@@ -98,7 +100,7 @@ static inline void set_domain(unsigned val)
98{ 100{
99 asm volatile( 101 asm volatile(
100 "mcr p15, 0, %0, c3, c0 @ set domain" 102 "mcr p15, 0, %0, c3, c0 @ set domain"
101 : : "r" (val)); 103 : : "r" (val) : "memory");
102 isb(); 104 isb();
103} 105}
104 106
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
index d36a73d7c0e8..076777ff3daa 100644
--- a/arch/arm/include/asm/hardware/it8152.h
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -106,7 +106,7 @@ extern void __iomem *it8152_base_address;
106struct pci_dev; 106struct pci_dev;
107struct pci_sys_data; 107struct pci_sys_data;
108 108
109extern void it8152_irq_demux(unsigned int irq, struct irq_desc *desc); 109extern void it8152_irq_demux(struct irq_desc *desc);
110extern void it8152_init_irq(void); 110extern void it8152_init_irq(void);
111extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); 111extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
112extern int it8152_pci_setup(int nr, struct pci_sys_data *sys); 112extern int it8152_pci_setup(int nr, struct pci_sys_data *sys);
diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h
index af79da40af2a..9beb92914f4d 100644
--- a/arch/arm/include/asm/hw_irq.h
+++ b/arch/arm/include/asm/hw_irq.h
@@ -11,12 +11,6 @@ static inline void ack_bad_irq(int irq)
11 pr_crit("unexpected IRQ trap at vector %02x\n", irq); 11 pr_crit("unexpected IRQ trap at vector %02x\n", irq);
12} 12}
13 13
14void set_irq_flags(unsigned int irq, unsigned int flags);
15
16#define IRQF_VALID (1 << 0)
17#define IRQF_PROBE (1 << 1)
18#define IRQF_NOAUTOEN (1 << 2)
19
20#define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE) 14#define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE)
21 15
22#endif 16#endif
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index dcba0fa5176e..c4072d9f32c7 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -29,21 +29,18 @@
29 29
30#define __KVM_HAVE_ARCH_INTC_INITIALIZED 30#define __KVM_HAVE_ARCH_INTC_INITIALIZED
31 31
32#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
33#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
34#else
35#define KVM_MAX_VCPUS 0
36#endif
37
38#define KVM_USER_MEM_SLOTS 32 32#define KVM_USER_MEM_SLOTS 32
39#define KVM_PRIVATE_MEM_SLOTS 4 33#define KVM_PRIVATE_MEM_SLOTS 4
40#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 34#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
41#define KVM_HAVE_ONE_REG 35#define KVM_HAVE_ONE_REG
36#define KVM_HALT_POLL_NS_DEFAULT 500000
42 37
43#define KVM_VCPU_MAX_FEATURES 2 38#define KVM_VCPU_MAX_FEATURES 2
44 39
45#include <kvm/arm_vgic.h> 40#include <kvm/arm_vgic.h>
46 41
42#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
43
47u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); 44u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
48int __attribute_const__ kvm_target_cpu(void); 45int __attribute_const__ kvm_target_cpu(void);
49int kvm_reset_vcpu(struct kvm_vcpu *vcpu); 46int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
@@ -148,6 +145,7 @@ struct kvm_vm_stat {
148 145
149struct kvm_vcpu_stat { 146struct kvm_vcpu_stat {
150 u32 halt_successful_poll; 147 u32 halt_successful_poll;
148 u32 halt_attempted_poll;
151 u32 halt_wakeup; 149 u32 halt_wakeup;
152}; 150};
153 151
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index 2092ee1e1300..de4634b51456 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -23,10 +23,10 @@ extern int show_fiq_list(struct seq_file *, int);
23/* 23/*
24 * This is for easy migration, but should be changed in the source 24 * This is for easy migration, but should be changed in the source
25 */ 25 */
26#define do_bad_IRQ(irq,desc) \ 26#define do_bad_IRQ(desc) \
27do { \ 27do { \
28 raw_spin_lock(&desc->lock); \ 28 raw_spin_lock(&desc->lock); \
29 handle_bad_irq(irq, desc); \ 29 handle_bad_irq(desc); \
30 raw_spin_unlock(&desc->lock); \ 30 raw_spin_unlock(&desc->lock); \
31} while(0) 31} while(0)
32 32
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index d0a1119dcaf3..776757d1604a 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -25,7 +25,6 @@
25struct task_struct; 25struct task_struct;
26 26
27#include <asm/types.h> 27#include <asm/types.h>
28#include <asm/domain.h>
29 28
30typedef unsigned long mm_segment_t; 29typedef unsigned long mm_segment_t;
31 30
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 5ff4826cb154..2766183e69df 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -79,26 +79,6 @@ asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
79 handle_IRQ(irq, regs); 79 handle_IRQ(irq, regs);
80} 80}
81 81
82void set_irq_flags(unsigned int irq, unsigned int iflags)
83{
84 unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
85
86 if (irq >= nr_irqs) {
87 pr_err("Trying to set irq flags for IRQ%d\n", irq);
88 return;
89 }
90
91 if (iflags & IRQF_VALID)
92 clr |= IRQ_NOREQUEST;
93 if (iflags & IRQF_PROBE)
94 clr |= IRQ_NOPROBE;
95 if (!(iflags & IRQF_NOAUTOEN))
96 clr |= IRQ_NOAUTOEN;
97 /* Order is clear bits in "clr" then set bits in "set" */
98 irq_modify_status(irq, clr, set & ~clr);
99}
100EXPORT_SYMBOL_GPL(set_irq_flags);
101
102void __init init_IRQ(void) 82void __init init_IRQ(void)
103{ 83{
104 int ret; 84 int ret;
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index a6ad93c9bce3..fd9eefce0a7b 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -259,15 +259,17 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
259 if (err) 259 if (err)
260 return err; 260 return err;
261 261
262 patch_text((void *)bpt->bpt_addr, 262 /* Machine is already stopped, so we can use __patch_text() directly */
263 *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr); 263 __patch_text((void *)bpt->bpt_addr,
264 *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
264 265
265 return err; 266 return err;
266} 267}
267 268
268int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) 269int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
269{ 270{
270 patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr); 271 /* Machine is already stopped, so we can use __patch_text() directly */
272 __patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
271 273
272 return 0; 274 return 0;
273} 275}
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index a3089bacb8d8..7a7c4cea5523 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -226,6 +226,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
226 226
227 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); 227 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
228 228
229#ifdef CONFIG_CPU_USE_DOMAINS
229 /* 230 /*
230 * Copy the initial value of the domain access control register 231 * Copy the initial value of the domain access control register
231 * from the current thread: thread->addr_limit will have been 232 * from the current thread: thread->addr_limit will have been
@@ -233,6 +234,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
233 * kernel/fork.c 234 * kernel/fork.c
234 */ 235 */
235 thread->cpu_domain = get_domain(); 236 thread->cpu_domain = get_domain();
237#endif
236 238
237 if (likely(!(p->flags & PF_KTHREAD))) { 239 if (likely(!(p->flags & PF_KTHREAD))) {
238 *childregs = *current_pt_regs(); 240 *childregs = *current_pt_regs();
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index b6cda06b455f..7b8f2141427b 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -343,15 +343,18 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
343 */ 343 */
344 thumb = handler & 1; 344 thumb = handler & 1;
345 345
346#if __LINUX_ARM_ARCH__ >= 7
347 /* 346 /*
348 * Clear the If-Then Thumb-2 execution state 347 * Clear the If-Then Thumb-2 execution state. ARM spec
349 * ARM spec requires this to be all 000s in ARM mode 348 * requires this to be all 000s in ARM mode. Snapdragon
350 * Snapdragon S4/Krait misbehaves on a Thumb=>ARM 349 * S4/Krait misbehaves on a Thumb=>ARM signal transition
351 * signal transition without this. 350 * without this.
351 *
352 * We must do this whenever we are running on a Thumb-2
353 * capable CPU, which includes ARMv6T2. However, we elect
354 * to always do this to simplify the code; this field is
355 * marked UNK/SBZP for older architectures.
352 */ 356 */
353 cpsr &= ~PSR_IT_MASK; 357 cpsr &= ~PSR_IT_MASK;
354#endif
355 358
356 if (thumb) { 359 if (thumb) {
357 cpsr |= PSR_T_BIT; 360 cpsr |= PSR_T_BIT;
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index bfb915d05665..210eccadb69a 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -45,15 +45,4 @@ config KVM_ARM_HOST
45 ---help--- 45 ---help---
46 Provides host support for ARM processors. 46 Provides host support for ARM processors.
47 47
48config KVM_ARM_MAX_VCPUS
49 int "Number maximum supported virtual CPUs per VM"
50 depends on KVM_ARM_HOST
51 default 4
52 help
53 Static number of max supported virtual CPUs per VM.
54
55 If you choose a high number, the vcpu structures will be quite
56 large, so only choose a reasonable number that you expect to
57 actually use.
58
59endif # VIRTUALIZATION 48endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index ce404a5c3062..dc017adfddc8 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -446,7 +446,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
446 * Map the VGIC hardware resources before running a vcpu the first 446 * Map the VGIC hardware resources before running a vcpu the first
447 * time on this VM. 447 * time on this VM.
448 */ 448 */
449 if (unlikely(!vgic_ready(kvm))) { 449 if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
450 ret = kvm_vgic_map_resources(kvm); 450 ret = kvm_vgic_map_resources(kvm);
451 if (ret) 451 if (ret)
452 return ret; 452 return ret;
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index 702740d37465..51a59504bef4 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -515,8 +515,7 @@ ARM_BE8(rev r6, r6 )
515 515
516 mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL 516 mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
517 str r2, [vcpu, #VCPU_TIMER_CNTV_CTL] 517 str r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
518 bic r2, #1 @ Clear ENABLE 518
519 mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
520 isb 519 isb
521 520
522 mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL 521 mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
@@ -529,6 +528,9 @@ ARM_BE8(rev r6, r6 )
529 mcrr p15, 4, r2, r2, c14 @ CNTVOFF 528 mcrr p15, 4, r2, r2, c14 @ CNTVOFF
530 529
5311: 5301:
531 mov r2, #0 @ Clear ENABLE
532 mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
533
532 @ Allow physical timer/counter access for the host 534 @ Allow physical timer/counter access for the host
533 mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL 535 mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
534 orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN) 536 orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 7b4201294187..6984342da13d 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1792,8 +1792,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1792 if (vma->vm_flags & VM_PFNMAP) { 1792 if (vma->vm_flags & VM_PFNMAP) {
1793 gpa_t gpa = mem->guest_phys_addr + 1793 gpa_t gpa = mem->guest_phys_addr +
1794 (vm_start - mem->userspace_addr); 1794 (vm_start - mem->userspace_addr);
1795 phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) + 1795 phys_addr_t pa;
1796 vm_start - vma->vm_start; 1796
1797 pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
1798 pa += vm_start - vma->vm_start;
1797 1799
1798 /* IO region dirty page logging not allowed */ 1800 /* IO region dirty page logging not allowed */
1799 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) 1801 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 4b94b513168d..ad6f6424f1d1 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -126,7 +126,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
126 126
127static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) 127static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
128{ 128{
129 int i; 129 int i, matching_cpus = 0;
130 unsigned long mpidr; 130 unsigned long mpidr;
131 unsigned long target_affinity; 131 unsigned long target_affinity;
132 unsigned long target_affinity_mask; 132 unsigned long target_affinity_mask;
@@ -151,12 +151,16 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
151 */ 151 */
152 kvm_for_each_vcpu(i, tmp, kvm) { 152 kvm_for_each_vcpu(i, tmp, kvm) {
153 mpidr = kvm_vcpu_get_mpidr_aff(tmp); 153 mpidr = kvm_vcpu_get_mpidr_aff(tmp);
154 if (((mpidr & target_affinity_mask) == target_affinity) && 154 if ((mpidr & target_affinity_mask) == target_affinity) {
155 !tmp->arch.pause) { 155 matching_cpus++;
156 return PSCI_0_2_AFFINITY_LEVEL_ON; 156 if (!tmp->arch.pause)
157 return PSCI_0_2_AFFINITY_LEVEL_ON;
157 } 158 }
158 } 159 }
159 160
161 if (!matching_cpus)
162 return PSCI_RET_INVALID_PARAMS;
163
160 return PSCI_0_2_AFFINITY_LEVEL_OFF; 164 return PSCI_0_2_AFFINITY_LEVEL_OFF;
161} 165}
162 166
diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c
index 305d7c6242bb..bfb3703357c5 100644
--- a/arch/arm/mach-dove/irq.c
+++ b/arch/arm/mach-dove/irq.c
@@ -69,14 +69,14 @@ static struct irq_chip pmu_irq_chip = {
69 .irq_ack = pmu_irq_ack, 69 .irq_ack = pmu_irq_ack,
70}; 70};
71 71
72static void pmu_irq_handler(unsigned int __irq, struct irq_desc *desc) 72static void pmu_irq_handler(struct irq_desc *desc)
73{ 73{
74 unsigned int irq = irq_desc_get_irq(desc);
75 unsigned long cause = readl(PMU_INTERRUPT_CAUSE); 74 unsigned long cause = readl(PMU_INTERRUPT_CAUSE);
75 unsigned int irq;
76 76
77 cause &= readl(PMU_INTERRUPT_MASK); 77 cause &= readl(PMU_INTERRUPT_MASK);
78 if (cause == 0) { 78 if (cause == 0) {
79 do_bad_IRQ(irq, desc); 79 do_bad_IRQ(desc);
80 return; 80 return;
81 } 81 }
82 82
diff --git a/arch/arm/mach-footbridge/isa-irq.c b/arch/arm/mach-footbridge/isa-irq.c
index fcd79bc3a3e1..c01fca11b224 100644
--- a/arch/arm/mach-footbridge/isa-irq.c
+++ b/arch/arm/mach-footbridge/isa-irq.c
@@ -87,13 +87,12 @@ static struct irq_chip isa_hi_chip = {
87 .irq_unmask = isa_unmask_pic_hi_irq, 87 .irq_unmask = isa_unmask_pic_hi_irq,
88}; 88};
89 89
90static void 90static void isa_irq_handler(struct irq_desc *desc)
91isa_irq_handler(unsigned int irq, struct irq_desc *desc)
92{ 91{
93 unsigned int isa_irq = *(unsigned char *)PCIIACK_BASE; 92 unsigned int isa_irq = *(unsigned char *)PCIIACK_BASE;
94 93
95 if (isa_irq < _ISA_IRQ(0) || isa_irq >= _ISA_IRQ(16)) { 94 if (isa_irq < _ISA_IRQ(0) || isa_irq >= _ISA_IRQ(16)) {
96 do_bad_IRQ(isa_irq, desc); 95 do_bad_IRQ(desc);
97 return; 96 return;
98 } 97 }
99 98
diff --git a/arch/arm/mach-gemini/gpio.c b/arch/arm/mach-gemini/gpio.c
index 220333ed741d..2478d9f4d92d 100644
--- a/arch/arm/mach-gemini/gpio.c
+++ b/arch/arm/mach-gemini/gpio.c
@@ -126,7 +126,7 @@ static int gpio_set_irq_type(struct irq_data *d, unsigned int type)
126 return 0; 126 return 0;
127} 127}
128 128
129static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) 129static void gpio_irq_handler(struct irq_desc *desc)
130{ 130{
131 unsigned int port = (unsigned int)irq_desc_get_handler_data(desc); 131 unsigned int port = (unsigned int)irq_desc_get_handler_data(desc);
132 unsigned int gpio_irq_no, irq_stat; 132 unsigned int gpio_irq_no, irq_stat;
diff --git a/arch/arm/mach-imx/3ds_debugboard.c b/arch/arm/mach-imx/3ds_debugboard.c
index 45903be6e7b3..16496a071ecb 100644
--- a/arch/arm/mach-imx/3ds_debugboard.c
+++ b/arch/arm/mach-imx/3ds_debugboard.c
@@ -85,7 +85,7 @@ static struct platform_device smsc_lan9217_device = {
85 .resource = smsc911x_resources, 85 .resource = smsc911x_resources,
86}; 86};
87 87
88static void mxc_expio_irq_handler(u32 irq, struct irq_desc *desc) 88static void mxc_expio_irq_handler(struct irq_desc *desc)
89{ 89{
90 u32 imr_val; 90 u32 imr_val;
91 u32 int_valid; 91 u32 int_valid;
diff --git a/arch/arm/mach-imx/mach-mx31ads.c b/arch/arm/mach-imx/mach-mx31ads.c
index 2c0853560bd2..2b147e4bf9c9 100644
--- a/arch/arm/mach-imx/mach-mx31ads.c
+++ b/arch/arm/mach-imx/mach-mx31ads.c
@@ -154,7 +154,7 @@ static inline void mxc_init_imx_uart(void)
154 imx31_add_imx_uart0(&uart_pdata); 154 imx31_add_imx_uart0(&uart_pdata);
155} 155}
156 156
157static void mx31ads_expio_irq_handler(u32 irq, struct irq_desc *desc) 157static void mx31ads_expio_irq_handler(struct irq_desc *desc)
158{ 158{
159 u32 imr_val; 159 u32 imr_val;
160 u32 int_valid; 160 u32 int_valid;
diff --git a/arch/arm/mach-iop13xx/msi.c b/arch/arm/mach-iop13xx/msi.c
index 9f89e76dfbb9..f6235b28578c 100644
--- a/arch/arm/mach-iop13xx/msi.c
+++ b/arch/arm/mach-iop13xx/msi.c
@@ -91,7 +91,7 @@ static void (*write_imipr[])(u32) = {
91 write_imipr_3, 91 write_imipr_3,
92}; 92};
93 93
94static void iop13xx_msi_handler(unsigned int irq, struct irq_desc *desc) 94static void iop13xx_msi_handler(struct irq_desc *desc)
95{ 95{
96 int i, j; 96 int i, j;
97 unsigned long status; 97 unsigned long status;
diff --git a/arch/arm/mach-lpc32xx/irq.c b/arch/arm/mach-lpc32xx/irq.c
index cce4cef12b6e..2ae431e8bc1b 100644
--- a/arch/arm/mach-lpc32xx/irq.c
+++ b/arch/arm/mach-lpc32xx/irq.c
@@ -370,7 +370,7 @@ static struct irq_chip lpc32xx_irq_chip = {
370 .irq_set_wake = lpc32xx_irq_wake 370 .irq_set_wake = lpc32xx_irq_wake
371}; 371};
372 372
373static void lpc32xx_sic1_handler(unsigned int irq, struct irq_desc *desc) 373static void lpc32xx_sic1_handler(struct irq_desc *desc)
374{ 374{
375 unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC1_BASE)); 375 unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC1_BASE));
376 376
@@ -383,7 +383,7 @@ static void lpc32xx_sic1_handler(unsigned int irq, struct irq_desc *desc)
383 } 383 }
384} 384}
385 385
386static void lpc32xx_sic2_handler(unsigned int irq, struct irq_desc *desc) 386static void lpc32xx_sic2_handler(struct irq_desc *desc)
387{ 387{
388 unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC2_BASE)); 388 unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC2_BASE));
389 389
diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c
index 6373e2bff203..842302df99c1 100644
--- a/arch/arm/mach-netx/generic.c
+++ b/arch/arm/mach-netx/generic.c
@@ -69,8 +69,7 @@ static struct platform_device *devices[] __initdata = {
69#define DEBUG_IRQ(fmt...) while (0) {} 69#define DEBUG_IRQ(fmt...) while (0) {}
70#endif 70#endif
71 71
72static void 72static void netx_hif_demux_handler(struct irq_desc *desc)
73netx_hif_demux_handler(unsigned int irq_unused, struct irq_desc *desc)
74{ 73{
75 unsigned int irq = NETX_IRQ_HIF_CHAINED(0); 74 unsigned int irq = NETX_IRQ_HIF_CHAINED(0);
76 unsigned int stat; 75 unsigned int stat;
diff --git a/arch/arm/mach-omap1/fpga.c b/arch/arm/mach-omap1/fpga.c
index dfec671b1639..39e20d0ead08 100644
--- a/arch/arm/mach-omap1/fpga.c
+++ b/arch/arm/mach-omap1/fpga.c
@@ -87,7 +87,7 @@ static void fpga_mask_ack_irq(struct irq_data *d)
87 fpga_ack_irq(d); 87 fpga_ack_irq(d);
88} 88}
89 89
90static void innovator_fpga_IRQ_demux(unsigned int irq, struct irq_desc *desc) 90static void innovator_fpga_IRQ_demux(struct irq_desc *desc)
91{ 91{
92 u32 stat; 92 u32 stat;
93 int fpga_irq; 93 int fpga_irq;
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index 257e98c26618..3fc2cbe52113 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -102,7 +102,7 @@ static void omap_prcm_events_filter_priority(unsigned long *events,
102 * dispatched accordingly. Clearing of the wakeup events should be 102 * dispatched accordingly. Clearing of the wakeup events should be
103 * done by the SoC specific individual handlers. 103 * done by the SoC specific individual handlers.
104 */ 104 */
105static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc) 105static void omap_prcm_irq_handler(struct irq_desc *desc)
106{ 106{
107 unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG]; 107 unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG];
108 unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG]; 108 unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG];
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index 70366b35d299..a3ebb517cca1 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -496,7 +496,7 @@ static struct irq_chip balloon3_irq_chip = {
496 .irq_unmask = balloon3_unmask_irq, 496 .irq_unmask = balloon3_unmask_irq,
497}; 497};
498 498
499static void balloon3_irq_handler(unsigned int __irq, struct irq_desc *desc) 499static void balloon3_irq_handler(struct irq_desc *desc)
500{ 500{
501 unsigned long pending = __raw_readl(BALLOON3_INT_CONTROL_REG) & 501 unsigned long pending = __raw_readl(BALLOON3_INT_CONTROL_REG) &
502 balloon3_irq_enabled; 502 balloon3_irq_enabled;
diff --git a/arch/arm/mach-pxa/cm-x2xx-pci.c b/arch/arm/mach-pxa/cm-x2xx-pci.c
index 1fa79f1f832d..3221ae15bef7 100644
--- a/arch/arm/mach-pxa/cm-x2xx-pci.c
+++ b/arch/arm/mach-pxa/cm-x2xx-pci.c
@@ -29,13 +29,12 @@
29void __iomem *it8152_base_address; 29void __iomem *it8152_base_address;
30static int cmx2xx_it8152_irq_gpio; 30static int cmx2xx_it8152_irq_gpio;
31 31
32static void cmx2xx_it8152_irq_demux(unsigned int __irq, struct irq_desc *desc) 32static void cmx2xx_it8152_irq_demux(struct irq_desc *desc)
33{ 33{
34 unsigned int irq = irq_desc_get_irq(desc);
35 /* clear our parent irq */ 34 /* clear our parent irq */
36 desc->irq_data.chip->irq_ack(&desc->irq_data); 35 desc->irq_data.chip->irq_ack(&desc->irq_data);
37 36
38 it8152_irq_demux(irq, desc); 37 it8152_irq_demux(desc);
39} 38}
40 39
41void __cmx2xx_pci_init_irq(int irq_gpio) 40void __cmx2xx_pci_init_irq(int irq_gpio)
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c
index b070167deef2..4823d972e647 100644
--- a/arch/arm/mach-pxa/lpd270.c
+++ b/arch/arm/mach-pxa/lpd270.c
@@ -120,7 +120,7 @@ static struct irq_chip lpd270_irq_chip = {
120 .irq_unmask = lpd270_unmask_irq, 120 .irq_unmask = lpd270_unmask_irq,
121}; 121};
122 122
123static void lpd270_irq_handler(unsigned int __irq, struct irq_desc *desc) 123static void lpd270_irq_handler(struct irq_desc *desc)
124{ 124{
125 unsigned int irq; 125 unsigned int irq;
126 unsigned long pending; 126 unsigned long pending;
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index 9a0c8affdadb..d8319b54299a 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -284,7 +284,7 @@ static struct irq_chip pcm990_irq_chip = {
284 .irq_unmask = pcm990_unmask_irq, 284 .irq_unmask = pcm990_unmask_irq,
285}; 285};
286 286
287static void pcm990_irq_handler(unsigned int __irq, struct irq_desc *desc) 287static void pcm990_irq_handler(struct irq_desc *desc)
288{ 288{
289 unsigned int irq; 289 unsigned int irq;
290 unsigned long pending; 290 unsigned long pending;
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index 4841d6cefe76..8ab26370107e 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -276,7 +276,7 @@ static inline unsigned long viper_irq_pending(void)
276 viper_irq_enabled_mask; 276 viper_irq_enabled_mask;
277} 277}
278 278
279static void viper_irq_handler(unsigned int __irq, struct irq_desc *desc) 279static void viper_irq_handler(struct irq_desc *desc)
280{ 280{
281 unsigned int irq; 281 unsigned int irq;
282 unsigned long pending; 282 unsigned long pending;
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 6f94dd7b4dee..30e62a3f0701 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -105,7 +105,7 @@ static inline unsigned long zeus_irq_pending(void)
105 return __raw_readw(ZEUS_CPLD_ISA_IRQ) & zeus_irq_enabled_mask; 105 return __raw_readw(ZEUS_CPLD_ISA_IRQ) & zeus_irq_enabled_mask;
106} 106}
107 107
108static void zeus_irq_handler(unsigned int __irq, struct irq_desc *desc) 108static void zeus_irq_handler(struct irq_desc *desc)
109{ 109{
110 unsigned int irq; 110 unsigned int irq;
111 unsigned long pending; 111 unsigned long pending;
diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c
index f726d4c4e6dd..dc67a7fb3831 100644
--- a/arch/arm/mach-rpc/ecard.c
+++ b/arch/arm/mach-rpc/ecard.c
@@ -551,8 +551,7 @@ static void ecard_check_lockup(struct irq_desc *desc)
551 } 551 }
552} 552}
553 553
554static void 554static void ecard_irq_handler(struct irq_desc *desc)
555ecard_irq_handler(unsigned int irq, struct irq_desc *desc)
556{ 555{
557 ecard_t *ec; 556 ecard_t *ec;
558 int called = 0; 557 int called = 0;
diff --git a/arch/arm/mach-s3c24xx/bast-irq.c b/arch/arm/mach-s3c24xx/bast-irq.c
index ced1ab86ac83..2bb08961e934 100644
--- a/arch/arm/mach-s3c24xx/bast-irq.c
+++ b/arch/arm/mach-s3c24xx/bast-irq.c
@@ -100,9 +100,7 @@ static struct irq_chip bast_pc104_chip = {
100 .irq_ack = bast_pc104_maskack 100 .irq_ack = bast_pc104_maskack
101}; 101};
102 102
103static void 103static void bast_irq_pc104_demux(struct irq_desc *desc)
104bast_irq_pc104_demux(unsigned int irq,
105 struct irq_desc *desc)
106{ 104{
107 unsigned int stat; 105 unsigned int stat;
108 unsigned int irqno; 106 unsigned int irqno;
diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c
index fd63ecfb2f81..ddb30b8434c5 100644
--- a/arch/arm/mach-s3c64xx/common.c
+++ b/arch/arm/mach-s3c64xx/common.c
@@ -388,22 +388,22 @@ static inline void s3c_irq_demux_eint(unsigned int start, unsigned int end)
388 } 388 }
389} 389}
390 390
391static void s3c_irq_demux_eint0_3(unsigned int irq, struct irq_desc *desc) 391static void s3c_irq_demux_eint0_3(struct irq_desc *desc)
392{ 392{
393 s3c_irq_demux_eint(0, 3); 393 s3c_irq_demux_eint(0, 3);
394} 394}
395 395
396static void s3c_irq_demux_eint4_11(unsigned int irq, struct irq_desc *desc) 396static void s3c_irq_demux_eint4_11(struct irq_desc *desc)
397{ 397{
398 s3c_irq_demux_eint(4, 11); 398 s3c_irq_demux_eint(4, 11);
399} 399}
400 400
401static void s3c_irq_demux_eint12_19(unsigned int irq, struct irq_desc *desc) 401static void s3c_irq_demux_eint12_19(struct irq_desc *desc)
402{ 402{
403 s3c_irq_demux_eint(12, 19); 403 s3c_irq_demux_eint(12, 19);
404} 404}
405 405
406static void s3c_irq_demux_eint20_27(unsigned int irq, struct irq_desc *desc) 406static void s3c_irq_demux_eint20_27(struct irq_desc *desc)
407{ 407{
408 s3c_irq_demux_eint(20, 27); 408 s3c_irq_demux_eint(20, 27);
409} 409}
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index 6d237b4f7a8e..8411985af9ff 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -166,7 +166,7 @@ static struct sa1100_port_fns neponset_port_fns = {
166 * ensure that the IRQ signal is deasserted before returning. This 166 * ensure that the IRQ signal is deasserted before returning. This
167 * is rather unfortunate. 167 * is rather unfortunate.
168 */ 168 */
169static void neponset_irq_handler(unsigned int irq, struct irq_desc *desc) 169static void neponset_irq_handler(struct irq_desc *desc)
170{ 170{
171 struct neponset_drvdata *d = irq_desc_get_handler_data(desc); 171 struct neponset_drvdata *d = irq_desc_get_handler_data(desc);
172 unsigned int irr; 172 unsigned int irr;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e62604384945..1a7815e5421b 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1249,7 +1249,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
1249 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1249 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1250 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1250 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1251 dma_addr_t dma_addr, iova; 1251 dma_addr_t dma_addr, iova;
1252 int i, ret = DMA_ERROR_CODE; 1252 int i;
1253 1253
1254 dma_addr = __alloc_iova(mapping, size); 1254 dma_addr = __alloc_iova(mapping, size);
1255 if (dma_addr == DMA_ERROR_CODE) 1255 if (dma_addr == DMA_ERROR_CODE)
@@ -1257,6 +1257,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
1257 1257
1258 iova = dma_addr; 1258 iova = dma_addr;
1259 for (i = 0; i < count; ) { 1259 for (i = 0; i < count; ) {
1260 int ret;
1261
1260 unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 1262 unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
1261 phys_addr_t phys = page_to_phys(pages[i]); 1263 phys_addr_t phys = page_to_phys(pages[i]);
1262 unsigned int len, j; 1264 unsigned int len, j;
diff --git a/arch/arm/nwfpe/entry.S b/arch/arm/nwfpe/entry.S
index 71df43547659..39c20afad7ed 100644
--- a/arch/arm/nwfpe/entry.S
+++ b/arch/arm/nwfpe/entry.S
@@ -95,9 +95,10 @@ emulate:
95 reteq r4 @ no, return failure 95 reteq r4 @ no, return failure
96 96
97next: 97next:
98 uaccess_enable r3
98.Lx1: ldrt r6, [r5], #4 @ get the next instruction and 99.Lx1: ldrt r6, [r5], #4 @ get the next instruction and
99 @ increment PC 100 @ increment PC
100 101 uaccess_disable r3
101 and r2, r6, #0x0F000000 @ test for FP insns 102 and r2, r6, #0x0F000000 @ test for FP insns
102 teq r2, #0x0C000000 103 teq r2, #0x0C000000
103 teqne r2, #0x0D000000 104 teqne r2, #0x0D000000
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index 79c33eca09a3..7bd22d8e5b11 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -407,7 +407,7 @@ static int gpio_irq_set_type(struct irq_data *d, u32 type)
407 return 0; 407 return 0;
408} 408}
409 409
410static void gpio_irq_handler(unsigned __irq, struct irq_desc *desc) 410static void gpio_irq_handler(struct irq_desc *desc)
411{ 411{
412 struct orion_gpio_chip *ochip = irq_desc_get_handler_data(desc); 412 struct orion_gpio_chip *ochip = irq_desc_get_handler_data(desc);
413 u32 cause, type; 413 u32 cause, type;
diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S
index f00e08075938..10fd99c568c6 100644
--- a/arch/arm/xen/hypercall.S
+++ b/arch/arm/xen/hypercall.S
@@ -98,8 +98,23 @@ ENTRY(privcmd_call)
98 mov r1, r2 98 mov r1, r2
99 mov r2, r3 99 mov r2, r3
100 ldr r3, [sp, #8] 100 ldr r3, [sp, #8]
101 /*
102 * Privcmd calls are issued by the userspace. We need to allow the
103 * kernel to access the userspace memory before issuing the hypercall.
104 */
105 uaccess_enable r4
106
107 /* r4 is loaded now as we use it as scratch register before */
101 ldr r4, [sp, #4] 108 ldr r4, [sp, #4]
102 __HVC(XEN_IMM) 109 __HVC(XEN_IMM)
110
111 /*
112 * Disable userspace access from kernel. This is fine to do it
113 * unconditionally as no set_fs(KERNEL_DS)/set_fs(get_ds()) is
114 * called before.
115 */
116 uaccess_disable r4
117
103 ldm sp!, {r4} 118 ldm sp!, {r4}
104 ret lr 119 ret lr
105ENDPROC(privcmd_call); 120ENDPROC(privcmd_call);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7d95663c0160..07d1811aa03f 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -32,6 +32,7 @@ config ARM64
32 select GENERIC_CLOCKEVENTS_BROADCAST 32 select GENERIC_CLOCKEVENTS_BROADCAST
33 select GENERIC_CPU_AUTOPROBE 33 select GENERIC_CPU_AUTOPROBE
34 select GENERIC_EARLY_IOREMAP 34 select GENERIC_EARLY_IOREMAP
35 select GENERIC_IDLE_POLL_SETUP
35 select GENERIC_IRQ_PROBE 36 select GENERIC_IRQ_PROBE
36 select GENERIC_IRQ_SHOW 37 select GENERIC_IRQ_SHOW
37 select GENERIC_IRQ_SHOW_LEVEL 38 select GENERIC_IRQ_SHOW_LEVEL
@@ -331,6 +332,22 @@ config ARM64_ERRATUM_845719
331 332
332 If unsure, say Y. 333 If unsure, say Y.
333 334
335config ARM64_ERRATUM_843419
336 bool "Cortex-A53: 843419: A load or store might access an incorrect address"
337 depends on MODULES
338 default y
339 help
340 This option builds kernel modules using the large memory model in
341 order to avoid the use of the ADRP instruction, which can cause
342 a subsequent memory access to use an incorrect address on Cortex-A53
343 parts up to r0p4.
344
345 Note that the kernel itself must be linked with a version of ld
346 which fixes potentially affected ADRP instructions through the
347 use of veneers.
348
349 If unsure, say Y.
350
334endmenu 351endmenu
335 352
336 353
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 15ff5b4156fd..f9914d7c1bb0 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -41,6 +41,10 @@ endif
41 41
42CHECKFLAGS += -D__aarch64__ 42CHECKFLAGS += -D__aarch64__
43 43
44ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
45CFLAGS_MODULE += -mcmodel=large
46endif
47
44# Default value 48# Default value
45head-y := arch/arm64/kernel/head.o 49head-y := arch/arm64/kernel/head.o
46 50
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index d18ee4259ee5..06a15644be38 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -81,7 +81,7 @@
81 }; 81 };
82 82
83 idle-states { 83 idle-states {
84 entry-method = "arm,psci"; 84 entry-method = "psci";
85 85
86 CPU_SLEEP_0: cpu-sleep-0 { 86 CPU_SLEEP_0: cpu-sleep-0 {
87 compatible = "arm,idle-state"; 87 compatible = "arm,idle-state";
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index a712bea3bf2c..cc093a482aa4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -106,7 +106,7 @@
106 }; 106 };
107 107
108 idle-states { 108 idle-states {
109 entry-method = "arm,psci"; 109 entry-method = "psci";
110 110
111 cpu_sleep: cpu-sleep-0 { 111 cpu_sleep: cpu-sleep-0 {
112 compatible = "arm,idle-state"; 112 compatible = "arm,idle-state";
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 2bb7009bdac7..a57601f9d17c 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -43,9 +43,4 @@ static inline void ack_bad_irq(unsigned int irq)
43 irq_err_count++; 43 irq_err_count++;
44} 44}
45 45
46/*
47 * No arch-specific IRQ flags.
48 */
49#define set_irq_flags(irq, flags)
50
51#endif /* __ASM_HARDIRQ_H */ 46#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 7605e095217f..9694f2654593 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -95,6 +95,7 @@
95 SCTLR_EL2_SA | SCTLR_EL2_I) 95 SCTLR_EL2_SA | SCTLR_EL2_I)
96 96
97/* TCR_EL2 Registers bits */ 97/* TCR_EL2 Registers bits */
98#define TCR_EL2_RES1 ((1 << 31) | (1 << 23))
98#define TCR_EL2_TBI (1 << 20) 99#define TCR_EL2_TBI (1 << 20)
99#define TCR_EL2_PS (7 << 16) 100#define TCR_EL2_PS (7 << 16)
100#define TCR_EL2_PS_40B (2 << 16) 101#define TCR_EL2_PS_40B (2 << 16)
@@ -106,9 +107,10 @@
106#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \ 107#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \
107 TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ) 108 TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
108 109
109#define TCR_EL2_FLAGS (TCR_EL2_PS_40B) 110#define TCR_EL2_FLAGS (TCR_EL2_RES1 | TCR_EL2_PS_40B)
110 111
111/* VTCR_EL2 Registers bits */ 112/* VTCR_EL2 Registers bits */
113#define VTCR_EL2_RES1 (1 << 31)
112#define VTCR_EL2_PS_MASK (7 << 16) 114#define VTCR_EL2_PS_MASK (7 << 16)
113#define VTCR_EL2_TG0_MASK (1 << 14) 115#define VTCR_EL2_TG0_MASK (1 << 14)
114#define VTCR_EL2_TG0_4K (0 << 14) 116#define VTCR_EL2_TG0_4K (0 << 14)
@@ -147,7 +149,8 @@
147 */ 149 */
148#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \ 150#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
149 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \ 151 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
150 VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B) 152 VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
153 VTCR_EL2_RES1)
151#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B) 154#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B)
152#else 155#else
153/* 156/*
@@ -158,7 +161,8 @@
158 */ 161 */
159#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \ 162#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
160 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \ 163 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
161 VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B) 164 VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
165 VTCR_EL2_RES1)
162#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B) 166#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B)
163#endif 167#endif
164 168
@@ -168,7 +172,6 @@
168#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT) 172#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT)
169 173
170/* Hyp System Trap Register */ 174/* Hyp System Trap Register */
171#define HSTR_EL2_TTEE (1 << 16)
172#define HSTR_EL2_T(x) (1 << x) 175#define HSTR_EL2_T(x) (1 << x)
173 176
174/* Hyp Coproccessor Trap Register Shifts */ 177/* Hyp Coproccessor Trap Register Shifts */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 67fa0de3d483..5e377101f919 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -53,9 +53,7 @@
53#define IFSR32_EL2 25 /* Instruction Fault Status Register */ 53#define IFSR32_EL2 25 /* Instruction Fault Status Register */
54#define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */ 54#define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */
55#define DBGVCR32_EL2 27 /* Debug Vector Catch Register */ 55#define DBGVCR32_EL2 27 /* Debug Vector Catch Register */
56#define TEECR32_EL1 28 /* ThumbEE Configuration Register */ 56#define NR_SYS_REGS 28
57#define TEEHBR32_EL1 29 /* ThumbEE Handler Base Register */
58#define NR_SYS_REGS 30
59 57
60/* 32bit mapping */ 58/* 32bit mapping */
61#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ 59#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 415938dc45cf..ed039688c221 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -30,19 +30,16 @@
30 30
31#define __KVM_HAVE_ARCH_INTC_INITIALIZED 31#define __KVM_HAVE_ARCH_INTC_INITIALIZED
32 32
33#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
34#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
35#else
36#define KVM_MAX_VCPUS 0
37#endif
38
39#define KVM_USER_MEM_SLOTS 32 33#define KVM_USER_MEM_SLOTS 32
40#define KVM_PRIVATE_MEM_SLOTS 4 34#define KVM_PRIVATE_MEM_SLOTS 4
41#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 35#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
36#define KVM_HALT_POLL_NS_DEFAULT 500000
42 37
43#include <kvm/arm_vgic.h> 38#include <kvm/arm_vgic.h>
44#include <kvm/arm_arch_timer.h> 39#include <kvm/arm_arch_timer.h>
45 40
41#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
42
46#define KVM_VCPU_MAX_FEATURES 3 43#define KVM_VCPU_MAX_FEATURES 3
47 44
48int __attribute_const__ kvm_target_cpu(void); 45int __attribute_const__ kvm_target_cpu(void);
@@ -195,6 +192,7 @@ struct kvm_vm_stat {
195 192
196struct kvm_vcpu_stat { 193struct kvm_vcpu_stat {
197 u32 halt_successful_poll; 194 u32 halt_successful_poll;
195 u32 halt_attempted_poll;
198 u32 halt_wakeup; 196 u32 halt_wakeup;
199}; 197};
200 198
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 6900b2d95371..b0329be95cb1 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -26,13 +26,9 @@
26 * Software defined PTE bits definition. 26 * Software defined PTE bits definition.
27 */ 27 */
28#define PTE_VALID (_AT(pteval_t, 1) << 0) 28#define PTE_VALID (_AT(pteval_t, 1) << 0)
29#define PTE_WRITE (PTE_DBM) /* same as DBM (51) */
29#define PTE_DIRTY (_AT(pteval_t, 1) << 55) 30#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
30#define PTE_SPECIAL (_AT(pteval_t, 1) << 56) 31#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
31#ifdef CONFIG_ARM64_HW_AFDBM
32#define PTE_WRITE (PTE_DBM) /* same as DBM */
33#else
34#define PTE_WRITE (_AT(pteval_t, 1) << 57)
35#endif
36#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ 32#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
37 33
38/* 34/*
@@ -146,7 +142,7 @@ extern struct page *empty_zero_page;
146#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) 142#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
147 143
148#ifdef CONFIG_ARM64_HW_AFDBM 144#ifdef CONFIG_ARM64_HW_AFDBM
149#define pte_hw_dirty(pte) (!(pte_val(pte) & PTE_RDONLY)) 145#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
150#else 146#else
151#define pte_hw_dirty(pte) (0) 147#define pte_hw_dirty(pte) (0)
152#endif 148#endif
@@ -238,7 +234,7 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
238 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via 234 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
239 * the page fault mechanism. Checking the dirty status of a pte becomes: 235 * the page fault mechanism. Checking the dirty status of a pte becomes:
240 * 236 *
241 * PTE_DIRTY || !PTE_RDONLY 237 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
242 */ 238 */
243static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 239static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
244 pte_t *ptep, pte_t pte) 240 pte_t *ptep, pte_t pte)
@@ -503,7 +499,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
503 PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK; 499 PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
504 /* preserve the hardware dirty information */ 500 /* preserve the hardware dirty information */
505 if (pte_hw_dirty(pte)) 501 if (pte_hw_dirty(pte))
506 newprot |= PTE_DIRTY; 502 pte = pte_mkdirty(pte);
507 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 503 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
508 return pte; 504 return pte;
509} 505}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 9b3b62ac9c24..cebf78661a55 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -134,7 +134,7 @@ static int os_lock_notify(struct notifier_block *self,
134 unsigned long action, void *data) 134 unsigned long action, void *data)
135{ 135{
136 int cpu = (unsigned long)data; 136 int cpu = (unsigned long)data;
137 if (action == CPU_ONLINE) 137 if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
138 smp_call_function_single(cpu, clear_os_lock, NULL, 1); 138 smp_call_function_single(cpu, clear_os_lock, NULL, 1);
139 return NOTIFY_OK; 139 return NOTIFY_OK;
140} 140}
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index a055be6125cf..90d09eddd5b2 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -523,6 +523,11 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
523 msr hstr_el2, xzr // Disable CP15 traps to EL2 523 msr hstr_el2, xzr // Disable CP15 traps to EL2
524#endif 524#endif
525 525
526 /* EL2 debug */
527 mrs x0, pmcr_el0 // Disable debug access traps
528 ubfx x0, x0, #11, #5 // to EL2 and allow access to
529 msr mdcr_el2, x0 // all PMU counters from EL1
530
526 /* Stage-2 translation */ 531 /* Stage-2 translation */
527 msr vttbr_el2, xzr 532 msr vttbr_el2, xzr
528 533
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index c97040ecf838..bba85c8f8037 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -872,7 +872,7 @@ static int hw_breakpoint_reset_notify(struct notifier_block *self,
872 void *hcpu) 872 void *hcpu)
873{ 873{
874 int cpu = (long)hcpu; 874 int cpu = (long)hcpu;
875 if (action == CPU_ONLINE) 875 if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
876 smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1); 876 smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1);
877 return NOTIFY_OK; 877 return NOTIFY_OK;
878} 878}
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 67bf4107f6ef..876eb8df50bf 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -332,12 +332,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
332 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, 332 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
333 AARCH64_INSN_IMM_ADR); 333 AARCH64_INSN_IMM_ADR);
334 break; 334 break;
335#ifndef CONFIG_ARM64_ERRATUM_843419
335 case R_AARCH64_ADR_PREL_PG_HI21_NC: 336 case R_AARCH64_ADR_PREL_PG_HI21_NC:
336 overflow_check = false; 337 overflow_check = false;
337 case R_AARCH64_ADR_PREL_PG_HI21: 338 case R_AARCH64_ADR_PREL_PG_HI21:
338 ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21, 339 ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
339 AARCH64_INSN_IMM_ADR); 340 AARCH64_INSN_IMM_ADR);
340 break; 341 break;
342#endif
341 case R_AARCH64_ADD_ABS_LO12_NC: 343 case R_AARCH64_ADD_ABS_LO12_NC:
342 case R_AARCH64_LDST8_ABS_LO12_NC: 344 case R_AARCH64_LDST8_ABS_LO12_NC:
343 overflow_check = false; 345 overflow_check = false;
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 948f0ad2de23..71ef6dc89ae5 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -212,14 +212,32 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
212 212
213/* 213/*
214 * VFP save/restore code. 214 * VFP save/restore code.
215 *
216 * We have to be careful with endianness, since the fpsimd context-switch
217 * code operates on 128-bit (Q) register values whereas the compat ABI
218 * uses an array of 64-bit (D) registers. Consequently, we need to swap
219 * the two halves of each Q register when running on a big-endian CPU.
215 */ 220 */
221union __fpsimd_vreg {
222 __uint128_t raw;
223 struct {
224#ifdef __AARCH64EB__
225 u64 hi;
226 u64 lo;
227#else
228 u64 lo;
229 u64 hi;
230#endif
231 };
232};
233
216static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) 234static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
217{ 235{
218 struct fpsimd_state *fpsimd = &current->thread.fpsimd_state; 236 struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
219 compat_ulong_t magic = VFP_MAGIC; 237 compat_ulong_t magic = VFP_MAGIC;
220 compat_ulong_t size = VFP_STORAGE_SIZE; 238 compat_ulong_t size = VFP_STORAGE_SIZE;
221 compat_ulong_t fpscr, fpexc; 239 compat_ulong_t fpscr, fpexc;
222 int err = 0; 240 int i, err = 0;
223 241
224 /* 242 /*
225 * Save the hardware registers to the fpsimd_state structure. 243 * Save the hardware registers to the fpsimd_state structure.
@@ -235,10 +253,15 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
235 /* 253 /*
236 * Now copy the FP registers. Since the registers are packed, 254 * Now copy the FP registers. Since the registers are packed,
237 * we can copy the prefix we want (V0-V15) as it is. 255 * we can copy the prefix we want (V0-V15) as it is.
238 * FIXME: Won't work if big endian.
239 */ 256 */
240 err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs, 257 for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
241 sizeof(frame->ufp.fpregs)); 258 union __fpsimd_vreg vreg = {
259 .raw = fpsimd->vregs[i >> 1],
260 };
261
262 __put_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
263 __put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
264 }
242 265
243 /* Create an AArch32 fpscr from the fpsr and the fpcr. */ 266 /* Create an AArch32 fpscr from the fpsr and the fpcr. */
244 fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) | 267 fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
@@ -263,7 +286,7 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
263 compat_ulong_t magic = VFP_MAGIC; 286 compat_ulong_t magic = VFP_MAGIC;
264 compat_ulong_t size = VFP_STORAGE_SIZE; 287 compat_ulong_t size = VFP_STORAGE_SIZE;
265 compat_ulong_t fpscr; 288 compat_ulong_t fpscr;
266 int err = 0; 289 int i, err = 0;
267 290
268 __get_user_error(magic, &frame->magic, err); 291 __get_user_error(magic, &frame->magic, err);
269 __get_user_error(size, &frame->size, err); 292 __get_user_error(size, &frame->size, err);
@@ -273,12 +296,14 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
273 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) 296 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
274 return -EINVAL; 297 return -EINVAL;
275 298
276 /* 299 /* Copy the FP registers into the start of the fpsimd_state. */
277 * Copy the FP registers into the start of the fpsimd_state. 300 for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
278 * FIXME: Won't work if big endian. 301 union __fpsimd_vreg vreg;
279 */ 302
280 err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs, 303 __get_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
281 sizeof(frame->ufp.fpregs)); 304 __get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
305 fpsimd.vregs[i >> 1] = vreg.raw;
306 }
282 307
283 /* Extract the fpsr and the fpcr from the fpscr */ 308 /* Extract the fpsr and the fpcr from the fpscr */
284 __get_user_error(fpscr, &frame->ufp.fpscr, err); 309 __get_user_error(fpscr, &frame->ufp.fpscr, err);
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index bfffe8f4bd53..5c7e920e4861 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -41,15 +41,4 @@ config KVM_ARM_HOST
41 ---help--- 41 ---help---
42 Provides host support for ARM processors. 42 Provides host support for ARM processors.
43 43
44config KVM_ARM_MAX_VCPUS
45 int "Number maximum supported virtual CPUs per VM"
46 depends on KVM_ARM_HOST
47 default 4
48 help
49 Static number of max supported virtual CPUs per VM.
50
51 If you choose a high number, the vcpu structures will be quite
52 large, so only choose a reasonable number that you expect to
53 actually use.
54
55endif # VIRTUALIZATION 44endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 37c89ea2c572..e5836138ec42 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -433,20 +433,13 @@
433 mrs x5, ifsr32_el2 433 mrs x5, ifsr32_el2
434 stp x4, x5, [x3] 434 stp x4, x5, [x3]
435 435
436 skip_fpsimd_state x8, 3f 436 skip_fpsimd_state x8, 2f
437 mrs x6, fpexc32_el2 437 mrs x6, fpexc32_el2
438 str x6, [x3, #16] 438 str x6, [x3, #16]
4393: 4392:
440 skip_debug_state x8, 2f 440 skip_debug_state x8, 1f
441 mrs x7, dbgvcr32_el2 441 mrs x7, dbgvcr32_el2
442 str x7, [x3, #24] 442 str x7, [x3, #24]
4432:
444 skip_tee_state x8, 1f
445
446 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
447 mrs x4, teecr32_el1
448 mrs x5, teehbr32_el1
449 stp x4, x5, [x3]
4501: 4431:
451.endm 444.endm
452 445
@@ -466,16 +459,9 @@
466 msr dacr32_el2, x4 459 msr dacr32_el2, x4
467 msr ifsr32_el2, x5 460 msr ifsr32_el2, x5
468 461
469 skip_debug_state x8, 2f 462 skip_debug_state x8, 1f
470 ldr x7, [x3, #24] 463 ldr x7, [x3, #24]
471 msr dbgvcr32_el2, x7 464 msr dbgvcr32_el2, x7
4722:
473 skip_tee_state x8, 1f
474
475 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
476 ldp x4, x5, [x3]
477 msr teecr32_el1, x4
478 msr teehbr32_el1, x5
4791: 4651:
480.endm 466.endm
481 467
@@ -570,8 +556,6 @@ alternative_endif
570 mrs x3, cntv_ctl_el0 556 mrs x3, cntv_ctl_el0
571 and x3, x3, #3 557 and x3, x3, #3
572 str w3, [x0, #VCPU_TIMER_CNTV_CTL] 558 str w3, [x0, #VCPU_TIMER_CNTV_CTL]
573 bic x3, x3, #1 // Clear Enable
574 msr cntv_ctl_el0, x3
575 559
576 isb 560 isb
577 561
@@ -579,6 +563,9 @@ alternative_endif
579 str x3, [x0, #VCPU_TIMER_CNTV_CVAL] 563 str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
580 564
5811: 5651:
566 // Disable the virtual timer
567 msr cntv_ctl_el0, xzr
568
582 // Allow physical timer/counter access for the host 569 // Allow physical timer/counter access for the host
583 mrs x2, cnthctl_el2 570 mrs x2, cnthctl_el2
584 orr x2, x2, #3 571 orr x2, x2, #3
@@ -753,6 +740,9 @@ ENTRY(__kvm_vcpu_run)
753 // Guest context 740 // Guest context
754 add x2, x0, #VCPU_CONTEXT 741 add x2, x0, #VCPU_CONTEXT
755 742
743 // We must restore the 32-bit state before the sysregs, thanks
744 // to Cortex-A57 erratum #852523.
745 restore_guest_32bit_state
756 bl __restore_sysregs 746 bl __restore_sysregs
757 747
758 skip_debug_state x3, 1f 748 skip_debug_state x3, 1f
@@ -760,7 +750,6 @@ ENTRY(__kvm_vcpu_run)
760 kern_hyp_va x3 750 kern_hyp_va x3
761 bl __restore_debug 751 bl __restore_debug
7621: 7521:
763 restore_guest_32bit_state
764 restore_guest_regs 753 restore_guest_regs
765 754
766 // That's it, no more messing around. 755 // That's it, no more messing around.
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index b41607d270ac..d03d3af17e7e 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -272,7 +272,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
272{ 272{
273 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 273 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
274 274
275 if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 275 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
276 return -EFAULT; 276 return -EFAULT;
277 return 0; 277 return 0;
278} 278}
@@ -314,7 +314,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
314{ 314{
315 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; 315 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
316 316
317 if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 317 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
318 return -EFAULT; 318 return -EFAULT;
319 319
320 return 0; 320 return 0;
@@ -358,7 +358,7 @@ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
358{ 358{
359 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; 359 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
360 360
361 if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 361 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
362 return -EFAULT; 362 return -EFAULT;
363 return 0; 363 return 0;
364} 364}
@@ -400,7 +400,7 @@ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
400{ 400{
401 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; 401 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
402 402
403 if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 403 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
404 return -EFAULT; 404 return -EFAULT;
405 return 0; 405 return 0;
406} 406}
@@ -539,13 +539,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
539 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110), 539 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
540 trap_dbgauthstatus_el1 }, 540 trap_dbgauthstatus_el1 },
541 541
542 /* TEECR32_EL1 */
543 { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
544 NULL, reset_val, TEECR32_EL1, 0 },
545 /* TEEHBR32_EL1 */
546 { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
547 NULL, reset_val, TEEHBR32_EL1, 0 },
548
549 /* MDCCSR_EL1 */ 542 /* MDCCSR_EL1 */
550 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000), 543 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
551 trap_raz_wi }, 544 trap_raz_wi },
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 0bcc4bc94b4a..99224dcebdc5 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -100,7 +100,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
100 if (IS_ENABLED(CONFIG_ZONE_DMA) && 100 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
101 dev->coherent_dma_mask <= DMA_BIT_MASK(32)) 101 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
102 flags |= GFP_DMA; 102 flags |= GFP_DMA;
103 if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { 103 if (dev_get_cma_area(dev) && (flags & __GFP_WAIT)) {
104 struct page *page; 104 struct page *page;
105 void *addr; 105 void *addr;
106 106
diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c
index d51ff8f1c541..96cabad68489 100644
--- a/arch/avr32/mach-at32ap/extint.c
+++ b/arch/avr32/mach-at32ap/extint.c
@@ -144,7 +144,7 @@ static struct irq_chip eic_chip = {
144 .irq_set_type = eic_set_irq_type, 144 .irq_set_type = eic_set_irq_type,
145}; 145};
146 146
147static void demux_eic_irq(unsigned int irq, struct irq_desc *desc) 147static void demux_eic_irq(struct irq_desc *desc)
148{ 148{
149 struct eic *eic = irq_desc_get_handler_data(desc); 149 struct eic *eic = irq_desc_get_handler_data(desc);
150 unsigned long status, pending; 150 unsigned long status, pending;
diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c
index 157a5e0e789f..4f61378c3453 100644
--- a/arch/avr32/mach-at32ap/pio.c
+++ b/arch/avr32/mach-at32ap/pio.c
@@ -281,7 +281,7 @@ static struct irq_chip gpio_irqchip = {
281 .irq_set_type = gpio_irq_type, 281 .irq_set_type = gpio_irq_type,
282}; 282};
283 283
284static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) 284static void gpio_irq_handler(struct irq_desc *desc)
285{ 285{
286 struct pio_device *pio = irq_desc_get_chip_data(desc); 286 struct pio_device *pio = irq_desc_get_chip_data(desc);
287 unsigned gpio_irq; 287 unsigned gpio_irq;
diff --git a/arch/blackfin/include/asm/irq_handler.h b/arch/blackfin/include/asm/irq_handler.h
index 4b2a992794d7..d2f90c72378e 100644
--- a/arch/blackfin/include/asm/irq_handler.h
+++ b/arch/blackfin/include/asm/irq_handler.h
@@ -60,7 +60,7 @@ extern void bfin_internal_mask_irq(unsigned int irq);
60extern void bfin_internal_unmask_irq(unsigned int irq); 60extern void bfin_internal_unmask_irq(unsigned int irq);
61 61
62struct irq_desc; 62struct irq_desc;
63extern void bfin_demux_mac_status_irq(unsigned int, struct irq_desc *); 63extern void bfin_demux_mac_status_irq(struct irq_desc *);
64extern void bfin_demux_gpio_irq(unsigned int, struct irq_desc *); 64extern void bfin_demux_gpio_irq(struct irq_desc *);
65 65
66#endif 66#endif
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 0ba25764b8c0..052cde5ed2e4 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -107,7 +107,7 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
107 * than crashing, do something sensible. 107 * than crashing, do something sensible.
108 */ 108 */
109 if (irq >= NR_IRQS) 109 if (irq >= NR_IRQS)
110 handle_bad_irq(irq, &bad_irq_desc); 110 handle_bad_irq(&bad_irq_desc);
111 else 111 else
112 generic_handle_irq(irq); 112 generic_handle_irq(irq);
113 113
diff --git a/arch/blackfin/mach-bf537/ints-priority.c b/arch/blackfin/mach-bf537/ints-priority.c
index 14b2f74554dc..a48baae4384d 100644
--- a/arch/blackfin/mach-bf537/ints-priority.c
+++ b/arch/blackfin/mach-bf537/ints-priority.c
@@ -89,8 +89,7 @@ static struct irq_chip bf537_generic_error_irqchip = {
89 .irq_unmask = bf537_generic_error_unmask_irq, 89 .irq_unmask = bf537_generic_error_unmask_irq,
90}; 90};
91 91
92static void bf537_demux_error_irq(unsigned int int_err_irq, 92static void bf537_demux_error_irq(struct irq_desc *inta_desc)
93 struct irq_desc *inta_desc)
94{ 93{
95 int irq = 0; 94 int irq = 0;
96 95
@@ -182,15 +181,12 @@ static struct irq_chip bf537_mac_rx_irqchip = {
182 .irq_unmask = bf537_mac_rx_unmask_irq, 181 .irq_unmask = bf537_mac_rx_unmask_irq,
183}; 182};
184 183
185static void bf537_demux_mac_rx_irq(unsigned int __int_irq, 184static void bf537_demux_mac_rx_irq(struct irq_desc *desc)
186 struct irq_desc *desc)
187{ 185{
188 unsigned int int_irq = irq_desc_get_irq(desc);
189
190 if (bfin_read_DMA1_IRQ_STATUS() & (DMA_DONE | DMA_ERR)) 186 if (bfin_read_DMA1_IRQ_STATUS() & (DMA_DONE | DMA_ERR))
191 bfin_handle_irq(IRQ_MAC_RX); 187 bfin_handle_irq(IRQ_MAC_RX);
192 else 188 else
193 bfin_demux_gpio_irq(int_irq, desc); 189 bfin_demux_gpio_irq(desc);
194} 190}
195#endif 191#endif
196 192
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index a6d1b03cdf36..e8d4d748d0fd 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -656,8 +656,7 @@ static struct irq_chip bfin_mac_status_irqchip = {
656 .irq_set_wake = bfin_mac_status_set_wake, 656 .irq_set_wake = bfin_mac_status_set_wake,
657}; 657};
658 658
659void bfin_demux_mac_status_irq(unsigned int int_err_irq, 659void bfin_demux_mac_status_irq(struct irq_desc *inta_desc)
660 struct irq_desc *inta_desc)
661{ 660{
662 int i, irq = 0; 661 int i, irq = 0;
663 u32 status = bfin_read_EMAC_SYSTAT(); 662 u32 status = bfin_read_EMAC_SYSTAT();
@@ -825,7 +824,7 @@ static void bfin_demux_gpio_block(unsigned int irq)
825 } 824 }
826} 825}
827 826
828void bfin_demux_gpio_irq(unsigned int __inta_irq, struct irq_desc *desc) 827void bfin_demux_gpio_irq(struct irq_desc *desc)
829{ 828{
830 unsigned int inta_irq = irq_desc_get_irq(desc); 829 unsigned int inta_irq = irq_desc_get_irq(desc);
831 unsigned int irq; 830 unsigned int irq;
diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c
index d487698e978a..ddcb45d7dfa7 100644
--- a/arch/c6x/platforms/megamod-pic.c
+++ b/arch/c6x/platforms/megamod-pic.c
@@ -93,7 +93,7 @@ static struct irq_chip megamod_chip = {
93 .irq_unmask = unmask_megamod, 93 .irq_unmask = unmask_megamod,
94}; 94};
95 95
96static void megamod_irq_cascade(unsigned int __irq, struct irq_desc *desc) 96static void megamod_irq_cascade(struct irq_desc *desc)
97{ 97{
98 struct megamod_cascade_data *cascade; 98 struct megamod_cascade_data *cascade;
99 struct megamod_pic *pic; 99 struct megamod_pic *pic;
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 0314e325a669..8da5653bd895 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -36,6 +36,17 @@ config FORCE_MAX_ZONEORDER
36 int 36 int
37 default 6 37 default 6
38 38
39config TRACE_IRQFLAGS_SUPPORT
40 depends on ETRAX_ARCH_V32
41 def_bool y
42
43config STACKTRACE_SUPPORT
44 def_bool y
45
46config LOCKDEP_SUPPORT
47 depends on ETRAX_ARCH_V32
48 def_bool y
49
39config CRIS 50config CRIS
40 bool 51 bool
41 default y 52 default y
@@ -58,6 +69,7 @@ config CRIS
58 select CLKSRC_MMIO if ETRAX_ARCH_V32 69 select CLKSRC_MMIO if ETRAX_ARCH_V32
59 select GENERIC_CLOCKEVENTS if ETRAX_ARCH_V32 70 select GENERIC_CLOCKEVENTS if ETRAX_ARCH_V32
60 select GENERIC_SCHED_CLOCK if ETRAX_ARCH_V32 71 select GENERIC_SCHED_CLOCK if ETRAX_ARCH_V32
72 select HAVE_DEBUG_BUGVERBOSE if ETRAX_ARCH_V32
61 73
62config HZ 74config HZ
63 int 75 int
diff --git a/arch/cris/arch-v10/kernel/entry.S b/arch/cris/arch-v10/kernel/entry.S
index 81570fcd0412..b5622521dad5 100644
--- a/arch/cris/arch-v10/kernel/entry.S
+++ b/arch/cris/arch-v10/kernel/entry.S
@@ -955,6 +955,14 @@ sys_call_table:
955 .long sys_process_vm_writev 955 .long sys_process_vm_writev
956 .long sys_kcmp /* 350 */ 956 .long sys_kcmp /* 350 */
957 .long sys_finit_module 957 .long sys_finit_module
958 .long sys_sched_setattr
959 .long sys_sched_getattr
960 .long sys_renameat2
961 .long sys_seccomp /* 355 */
962 .long sys_getrandom
963 .long sys_memfd_create
964 .long sys_bpf
965 .long sys_execveat
958 966
959 /* 967 /*
960 * NOTE!! This doesn't have to be exact - we just have 968 * NOTE!! This doesn't have to be exact - we just have
diff --git a/arch/cris/arch-v10/lib/dmacopy.c b/arch/cris/arch-v10/lib/dmacopy.c
deleted file mode 100644
index 49f5b8ca5b47..000000000000
--- a/arch/cris/arch-v10/lib/dmacopy.c
+++ /dev/null
@@ -1,42 +0,0 @@
1/*
2 * memcpy for large blocks, using memory-memory DMA channels 6 and 7 in Etrax
3 */
4
5#include <asm/svinto.h>
6#include <asm/io.h>
7
8#define D(x)
9
10void *dma_memcpy(void *pdst,
11 const void *psrc,
12 unsigned int pn)
13{
14 static etrax_dma_descr indma, outdma;
15
16 D(printk(KERN_DEBUG "dma_memcpy %d bytes... ", pn));
17
18#if 0
19 *R_GEN_CONFIG = genconfig_shadow =
20 (genconfig_shadow & ~0x3c0000) |
21 IO_STATE(R_GEN_CONFIG, dma6, intdma7) |
22 IO_STATE(R_GEN_CONFIG, dma7, intdma6);
23#endif
24 indma.sw_len = outdma.sw_len = pn;
25 indma.ctrl = d_eol | d_eop;
26 outdma.ctrl = d_eol;
27 indma.buf = psrc;
28 outdma.buf = pdst;
29
30 *R_DMA_CH6_FIRST = &indma;
31 *R_DMA_CH7_FIRST = &outdma;
32 *R_DMA_CH6_CMD = IO_STATE(R_DMA_CH6_CMD, cmd, start);
33 *R_DMA_CH7_CMD = IO_STATE(R_DMA_CH7_CMD, cmd, start);
34
35 while (*R_DMA_CH7_CMD == 1)
36 /* wait for completion */;
37
38 D(printk(KERN_DEBUG "done\n"));
39}
40
41
42
diff --git a/arch/cris/arch-v10/lib/old_checksum.c b/arch/cris/arch-v10/lib/old_checksum.c
deleted file mode 100644
index 8f79163f1394..000000000000
--- a/arch/cris/arch-v10/lib/old_checksum.c
+++ /dev/null
@@ -1,86 +0,0 @@
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * IP/TCP/UDP checksumming routines
7 *
8 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
9 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
10 * Tom May, <ftom@netcom.com>
11 * Lots of code moved from tcp.c and ip.c; see those files
12 * for more names.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 */
19
20#include <net/checksum.h>
21#include <net/module.h>
22
23#undef PROFILE_CHECKSUM
24
25#ifdef PROFILE_CHECKSUM
26/* these are just for profiling the checksum code with an oscillioscope.. uh */
27#if 0
28#define BITOFF *((unsigned char *)0xb0000030) = 0xff
29#define BITON *((unsigned char *)0xb0000030) = 0x0
30#endif
31#include <asm/io.h>
32#define CBITON LED_ACTIVE_SET(1)
33#define CBITOFF LED_ACTIVE_SET(0)
34#define BITOFF
35#define BITON
36#else
37#define BITOFF
38#define BITON
39#define CBITOFF
40#define CBITON
41#endif
42
43/*
44 * computes a partial checksum, e.g. for TCP/UDP fragments
45 */
46
47#include <asm/delay.h>
48
49__wsum csum_partial(const void *p, int len, __wsum __sum)
50{
51 u32 sum = (__force u32)__sum;
52 const u16 *buff = p;
53 /*
54 * Experiments with ethernet and slip connections show that buff
55 * is aligned on either a 2-byte or 4-byte boundary.
56 */
57 const void *endMarker = p + len;
58 const void *marker = endMarker - (len % 16);
59#if 0
60 if((int)buff & 0x3)
61 printk("unaligned buff %p\n", buff);
62 __delay(900); /* extra delay of 90 us to test performance hit */
63#endif
64 BITON;
65 while (buff < marker) {
66 sum += *buff++;
67 sum += *buff++;
68 sum += *buff++;
69 sum += *buff++;
70 sum += *buff++;
71 sum += *buff++;
72 sum += *buff++;
73 sum += *buff++;
74 }
75 marker = endMarker - (len % 2);
76 while (buff < marker)
77 sum += *buff++;
78
79 if (endMarker > buff)
80 sum += *(const u8 *)buff; /* add extra byte separately */
81
82 BITOFF;
83 return (__force __wsum)sum;
84}
85
86EXPORT_SYMBOL(csum_partial);
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index 4fc16b44fff2..e6c523cc40bc 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -202,7 +202,7 @@ config ETRAX_PA_CHANGEABLE_DIR
202 default "0x00" if ETRAXFS 202 default "0x00" if ETRAXFS
203 default "0x00000000" if !ETRAXFS 203 default "0x00000000" if !ETRAXFS
204 help 204 help
205 This is a bitmask (8 bits) with information of what bits in PA that a 205 This is a bitmask with information of what bits in PA that a
206 user can change direction on using ioctl's. 206 user can change direction on using ioctl's.
207 Bit set = changeable. 207 Bit set = changeable.
208 You probably want 0 here, but it depends on your hardware. 208 You probably want 0 here, but it depends on your hardware.
@@ -213,7 +213,7 @@ config ETRAX_PA_CHANGEABLE_BITS
213 default "0x00" if ETRAXFS 213 default "0x00" if ETRAXFS
214 default "0x00000000" if !ETRAXFS 214 default "0x00000000" if !ETRAXFS
215 help 215 help
216 This is a bitmask (8 bits) with information of what bits in PA 216 This is a bitmask with information of what bits in PA
217 that a user can change the value on using ioctl's. 217 that a user can change the value on using ioctl's.
218 Bit set = changeable. 218 Bit set = changeable.
219 219
@@ -223,7 +223,7 @@ config ETRAX_PB_CHANGEABLE_DIR
223 default "0x00000" if ETRAXFS 223 default "0x00000" if ETRAXFS
224 default "0x00000000" if !ETRAXFS 224 default "0x00000000" if !ETRAXFS
225 help 225 help
226 This is a bitmask (18 bits) with information of what bits in PB 226 This is a bitmask with information of what bits in PB
227 that a user can change direction on using ioctl's. 227 that a user can change direction on using ioctl's.
228 Bit set = changeable. 228 Bit set = changeable.
229 You probably want 0 here, but it depends on your hardware. 229 You probably want 0 here, but it depends on your hardware.
@@ -234,7 +234,7 @@ config ETRAX_PB_CHANGEABLE_BITS
234 default "0x00000" if ETRAXFS 234 default "0x00000" if ETRAXFS
235 default "0x00000000" if !ETRAXFS 235 default "0x00000000" if !ETRAXFS
236 help 236 help
237 This is a bitmask (18 bits) with information of what bits in PB 237 This is a bitmask with information of what bits in PB
238 that a user can change the value on using ioctl's. 238 that a user can change the value on using ioctl's.
239 Bit set = changeable. 239 Bit set = changeable.
240 240
@@ -244,7 +244,7 @@ config ETRAX_PC_CHANGEABLE_DIR
244 default "0x00000" if ETRAXFS 244 default "0x00000" if ETRAXFS
245 default "0x00000000" if !ETRAXFS 245 default "0x00000000" if !ETRAXFS
246 help 246 help
247 This is a bitmask (18 bits) with information of what bits in PC 247 This is a bitmask with information of what bits in PC
248 that a user can change direction on using ioctl's. 248 that a user can change direction on using ioctl's.
249 Bit set = changeable. 249 Bit set = changeable.
250 You probably want 0 here, but it depends on your hardware. 250 You probably want 0 here, but it depends on your hardware.
@@ -253,9 +253,9 @@ config ETRAX_PC_CHANGEABLE_BITS
253 hex "PC user changeable bits mask" 253 hex "PC user changeable bits mask"
254 depends on ETRAX_GPIO 254 depends on ETRAX_GPIO
255 default "0x00000" if ETRAXFS 255 default "0x00000" if ETRAXFS
256 default "0x00000000" if ETRAXFS 256 default "0x00000000" if !ETRAXFS
257 help 257 help
258 This is a bitmask (18 bits) with information of what bits in PC 258 This is a bitmask with information of what bits in PC
259 that a user can change the value on using ioctl's. 259 that a user can change the value on using ioctl's.
260 Bit set = changeable. 260 Bit set = changeable.
261 261
@@ -264,7 +264,7 @@ config ETRAX_PD_CHANGEABLE_DIR
264 depends on ETRAX_GPIO && ETRAXFS 264 depends on ETRAX_GPIO && ETRAXFS
265 default "0x00000" 265 default "0x00000"
266 help 266 help
267 This is a bitmask (18 bits) with information of what bits in PD 267 This is a bitmask with information of what bits in PD
268 that a user can change direction on using ioctl's. 268 that a user can change direction on using ioctl's.
269 Bit set = changeable. 269 Bit set = changeable.
270 You probably want 0x00000 here, but it depends on your hardware. 270 You probably want 0x00000 here, but it depends on your hardware.
diff --git a/arch/cris/arch-v32/drivers/axisflashmap.c b/arch/cris/arch-v32/drivers/axisflashmap.c
index 28dd77144e8f..5387424683cc 100644
--- a/arch/cris/arch-v32/drivers/axisflashmap.c
+++ b/arch/cris/arch-v32/drivers/axisflashmap.c
@@ -313,6 +313,7 @@ static int __init init_axis_flash(void)
313 size_t len; 313 size_t len;
314 int ram_rootfs_partition = -1; /* -1 => no RAM rootfs partition */ 314 int ram_rootfs_partition = -1; /* -1 => no RAM rootfs partition */
315 int part; 315 int part;
316 struct mtd_partition *partition;
316 317
317 /* We need a root fs. If it resides in RAM, we need to use an 318 /* We need a root fs. If it resides in RAM, we need to use an
318 * MTDRAM device, so it must be enabled in the kernel config, 319 * MTDRAM device, so it must be enabled in the kernel config,
@@ -329,7 +330,7 @@ static int __init init_axis_flash(void)
329 330
330 main_mtd = flash_probe(); 331 main_mtd = flash_probe();
331 if (main_mtd) 332 if (main_mtd)
332 printk(KERN_INFO "%s: 0x%08x bytes of NOR flash memory.\n", 333 printk(KERN_INFO "%s: 0x%08llx bytes of NOR flash memory.\n",
333 main_mtd->name, main_mtd->size); 334 main_mtd->name, main_mtd->size);
334 335
335#ifdef CONFIG_ETRAX_NANDFLASH 336#ifdef CONFIG_ETRAX_NANDFLASH
@@ -388,10 +389,10 @@ static int __init init_axis_flash(void)
388#endif 389#endif
389 390
390 if (main_mtd) { 391 if (main_mtd) {
392 loff_t ptable_sector = CONFIG_ETRAX_PTABLE_SECTOR;
391 main_mtd->owner = THIS_MODULE; 393 main_mtd->owner = THIS_MODULE;
392 axisflash_mtd = main_mtd; 394 axisflash_mtd = main_mtd;
393 395
394 loff_t ptable_sector = CONFIG_ETRAX_PTABLE_SECTOR;
395 396
396 /* First partition (rescue) is always set to the default. */ 397 /* First partition (rescue) is always set to the default. */
397 pidx++; 398 pidx++;
@@ -517,7 +518,7 @@ static int __init init_axis_flash(void)
517 /* Decide whether to use default partition table. */ 518 /* Decide whether to use default partition table. */
518 /* Only use default table if we actually have a device (main_mtd) */ 519 /* Only use default table if we actually have a device (main_mtd) */
519 520
520 struct mtd_partition *partition = &axis_partitions[0]; 521 partition = &axis_partitions[0];
521 if (main_mtd && !ptable_ok) { 522 if (main_mtd && !ptable_ok) {
522 memcpy(axis_partitions, axis_default_partitions, 523 memcpy(axis_partitions, axis_default_partitions,
523 sizeof(axis_default_partitions)); 524 sizeof(axis_default_partitions));
@@ -580,7 +581,7 @@ static int __init init_axis_flash(void)
580 printk(KERN_INFO "axisflashmap: Adding RAM partition " 581 printk(KERN_INFO "axisflashmap: Adding RAM partition "
581 "for rootfs image.\n"); 582 "for rootfs image.\n");
582 err = mtdram_init_device(mtd_ram, 583 err = mtdram_init_device(mtd_ram,
583 (void *)partition[part].offset, 584 (void *)(u_int32_t)partition[part].offset,
584 partition[part].size, 585 partition[part].size,
585 partition[part].name); 586 partition[part].name);
586 if (err) 587 if (err)
diff --git a/arch/cris/arch-v32/drivers/mach-a3/gpio.c b/arch/cris/arch-v32/drivers/mach-a3/gpio.c
index 74f9fe80940c..c92e1da3684d 100644
--- a/arch/cris/arch-v32/drivers/mach-a3/gpio.c
+++ b/arch/cris/arch-v32/drivers/mach-a3/gpio.c
@@ -957,7 +957,7 @@ static void __init virtual_gpio_init(void)
957 957
958static int __init gpio_init(void) 958static int __init gpio_init(void)
959{ 959{
960 int res; 960 int res, res2;
961 961
962 printk(KERN_INFO "ETRAX FS GPIO driver v2.7, (c) 2003-2008 " 962 printk(KERN_INFO "ETRAX FS GPIO driver v2.7, (c) 2003-2008 "
963 "Axis Communications AB\n"); 963 "Axis Communications AB\n");
@@ -977,7 +977,7 @@ static int __init gpio_init(void)
977 CRIS_LED_DISK_READ(0); 977 CRIS_LED_DISK_READ(0);
978 CRIS_LED_DISK_WRITE(0); 978 CRIS_LED_DISK_WRITE(0);
979 979
980 int res2 = request_irq(GIO_INTR_VECT, gpio_interrupt, 980 res2 = request_irq(GIO_INTR_VECT, gpio_interrupt,
981 IRQF_SHARED, "gpio", &alarmlist); 981 IRQF_SHARED, "gpio", &alarmlist);
982 if (res2) { 982 if (res2) {
983 printk(KERN_ERR "err: irq for gpio\n"); 983 printk(KERN_ERR "err: irq for gpio\n");
diff --git a/arch/cris/arch-v32/drivers/mach-fs/gpio.c b/arch/cris/arch-v32/drivers/mach-fs/gpio.c
index 009f4ee1bd09..72968fbf814b 100644
--- a/arch/cris/arch-v32/drivers/mach-fs/gpio.c
+++ b/arch/cris/arch-v32/drivers/mach-fs/gpio.c
@@ -425,12 +425,11 @@ gpio_open(struct inode *inode, struct file *filp)
425 if (p > GPIO_MINOR_LAST) 425 if (p > GPIO_MINOR_LAST)
426 return -EINVAL; 426 return -EINVAL;
427 427
428 priv = kmalloc(sizeof(struct gpio_private), GFP_KERNEL); 428 priv = kzalloc(sizeof(struct gpio_private), GFP_KERNEL);
429 if (!priv) 429 if (!priv)
430 return -ENOMEM; 430 return -ENOMEM;
431 431
432 mutex_lock(&gpio_mutex); 432 mutex_lock(&gpio_mutex);
433 memset(priv, 0, sizeof(*priv));
434 433
435 priv->minor = p; 434 priv->minor = p;
436 435
diff --git a/arch/cris/arch-v32/kernel/entry.S b/arch/cris/arch-v32/kernel/entry.S
index 026a0b21b8f0..b17a20999f87 100644
--- a/arch/cris/arch-v32/kernel/entry.S
+++ b/arch/cris/arch-v32/kernel/entry.S
@@ -240,6 +240,17 @@ ret_from_sys_call:
240 240
241 .type _Rexit,@function 241 .type _Rexit,@function
242_Rexit: 242_Rexit:
243#if defined(CONFIG_TRACE_IRQFLAGS)
244 addoq +PT_ccs, $sp, $acr
245 move.d [$acr], $r0
246 btstq 15, $r0 ; I1
247 bpl 1f
248 nop
249 jsr trace_hardirqs_on
250 nop
2511:
252#endif
253
243 ;; This epilogue MUST match the prologues in multiple_interrupt, irq.h 254 ;; This epilogue MUST match the prologues in multiple_interrupt, irq.h
244 ;; and ptregs.h. 255 ;; and ptregs.h.
245 addq 4, $sp ; Skip orig_r10. 256 addq 4, $sp ; Skip orig_r10.
@@ -875,6 +886,14 @@ sys_call_table:
875 .long sys_process_vm_writev 886 .long sys_process_vm_writev
876 .long sys_kcmp /* 350 */ 887 .long sys_kcmp /* 350 */
877 .long sys_finit_module 888 .long sys_finit_module
889 .long sys_sched_setattr
890 .long sys_sched_getattr
891 .long sys_renameat2
892 .long sys_seccomp /* 355 */
893 .long sys_getrandom
894 .long sys_memfd_create
895 .long sys_bpf
896 .long sys_execveat
878 897
879 /* 898 /*
880 * NOTE!! This doesn't have to be exact - we just have 899 * NOTE!! This doesn't have to be exact - we just have
diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c
index cebd32e2a8fb..c7ce784a393c 100644
--- a/arch/cris/arch-v32/kernel/process.c
+++ b/arch/cris/arch-v32/kernel/process.c
@@ -23,9 +23,9 @@ extern void stop_watchdog(void);
23/* We use this if we don't have any better idle routine. */ 23/* We use this if we don't have any better idle routine. */
24void default_idle(void) 24void default_idle(void)
25{ 25{
26 local_irq_enable();
26 /* Halt until exception. */ 27 /* Halt until exception. */
27 __asm__ volatile("ei \n\t" 28 __asm__ volatile("halt");
28 "halt ");
29} 29}
30 30
31/* 31/*
diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c
index 3a36ae6b79d5..150d1d76c29d 100644
--- a/arch/cris/arch-v32/kernel/signal.c
+++ b/arch/cris/arch-v32/kernel/signal.c
@@ -19,7 +19,6 @@
19#include <asm/processor.h> 19#include <asm/processor.h>
20#include <asm/ucontext.h> 20#include <asm/ucontext.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22#include <arch/ptrace.h>
23#include <arch/hwregs/cpu_vect.h> 22#include <arch/hwregs/cpu_vect.h>
24 23
25extern unsigned long cris_signal_return_page; 24extern unsigned long cris_signal_return_page;
diff --git a/arch/cris/arch-v32/mach-fs/pinmux.c b/arch/cris/arch-v32/mach-fs/pinmux.c
index 05a04708b8eb..d8a3a3c439dd 100644
--- a/arch/cris/arch-v32/mach-fs/pinmux.c
+++ b/arch/cris/arch-v32/mach-fs/pinmux.c
@@ -46,6 +46,8 @@ static int __crisv32_pinmux_alloc(int port, int first_pin, int last_pin,
46 pins[port][i] = mode; 46 pins[port][i] = mode;
47 47
48 crisv32_pinmux_set(port); 48 crisv32_pinmux_set(port);
49
50 return 0;
49} 51}
50 52
51static int crisv32_pinmux_init(void) 53static int crisv32_pinmux_init(void)
@@ -93,6 +95,7 @@ int crisv32_pinmux_alloc_fixed(enum fixed_function function)
93 int ret = -EINVAL; 95 int ret = -EINVAL;
94 char saved[sizeof pins]; 96 char saved[sizeof pins];
95 unsigned long flags; 97 unsigned long flags;
98 reg_pinmux_rw_hwprot hwprot;
96 99
97 spin_lock_irqsave(&pinmux_lock, flags); 100 spin_lock_irqsave(&pinmux_lock, flags);
98 101
@@ -101,7 +104,7 @@ int crisv32_pinmux_alloc_fixed(enum fixed_function function)
101 104
102 crisv32_pinmux_init(); /* Must be done before we read rw_hwprot */ 105 crisv32_pinmux_init(); /* Must be done before we read rw_hwprot */
103 106
104 reg_pinmux_rw_hwprot hwprot = REG_RD(pinmux, regi_pinmux, rw_hwprot); 107 hwprot = REG_RD(pinmux, regi_pinmux, rw_hwprot);
105 108
106 switch (function) { 109 switch (function) {
107 case pinmux_ser1: 110 case pinmux_ser1:
@@ -227,6 +230,7 @@ int crisv32_pinmux_dealloc_fixed(enum fixed_function function)
227 int ret = -EINVAL; 230 int ret = -EINVAL;
228 char saved[sizeof pins]; 231 char saved[sizeof pins];
229 unsigned long flags; 232 unsigned long flags;
233 reg_pinmux_rw_hwprot hwprot;
230 234
231 spin_lock_irqsave(&pinmux_lock, flags); 235 spin_lock_irqsave(&pinmux_lock, flags);
232 236
@@ -235,7 +239,7 @@ int crisv32_pinmux_dealloc_fixed(enum fixed_function function)
235 239
236 crisv32_pinmux_init(); /* Must be done before we read rw_hwprot */ 240 crisv32_pinmux_init(); /* Must be done before we read rw_hwprot */
237 241
238 reg_pinmux_rw_hwprot hwprot = REG_RD(pinmux, regi_pinmux, rw_hwprot); 242 hwprot = REG_RD(pinmux, regi_pinmux, rw_hwprot);
239 243
240 switch (function) { 244 switch (function) {
241 case pinmux_ser1: 245 case pinmux_ser1:
diff --git a/arch/cris/configs/artpec_3_defconfig b/arch/cris/configs/artpec_3_defconfig
index 71854d41c5a0..70e497e0b03e 100644
--- a/arch/cris/configs/artpec_3_defconfig
+++ b/arch/cris/configs/artpec_3_defconfig
@@ -12,10 +12,6 @@ CONFIG_ETRAX_FAST_TIMER=y
12CONFIG_CRIS_MACH_ARTPEC3=y 12CONFIG_CRIS_MACH_ARTPEC3=y
13CONFIG_ETRAX_DRAM_SIZE=32 13CONFIG_ETRAX_DRAM_SIZE=32
14CONFIG_ETRAX_FLASH1_SIZE=4 14CONFIG_ETRAX_FLASH1_SIZE=4
15CONFIG_ETRAX_DEF_GIO_PA_OE=1c
16CONFIG_ETRAX_DEF_GIO_PA_OUT=00
17CONFIG_ETRAX_DEF_GIO_PB_OE=00000
18CONFIG_ETRAX_DEF_GIO_PB_OUT=00000
19CONFIG_NET=y 15CONFIG_NET=y
20CONFIG_PACKET=y 16CONFIG_PACKET=y
21CONFIG_UNIX=y 17CONFIG_UNIX=y
@@ -42,3 +38,4 @@ CONFIG_JFFS2_FS=y
42CONFIG_CRAMFS=y 38CONFIG_CRAMFS=y
43CONFIG_NFS_FS=y 39CONFIG_NFS_FS=y
44CONFIG_NFS_V3=y 40CONFIG_NFS_V3=y
41CONFIG_ETRAX_GPIO=y
diff --git a/arch/cris/configs/etraxfs_defconfig b/arch/cris/configs/etraxfs_defconfig
index 87c7227fecb2..91232680d6c8 100644
--- a/arch/cris/configs/etraxfs_defconfig
+++ b/arch/cris/configs/etraxfs_defconfig
@@ -38,3 +38,4 @@ CONFIG_JFFS2_FS=y
38CONFIG_CRAMFS=y 38CONFIG_CRAMFS=y
39CONFIG_NFS_FS=y 39CONFIG_NFS_FS=y
40CONFIG_NFS_V3=y 40CONFIG_NFS_V3=y
41CONFIG_ETRAX_GPIO=y
diff --git a/arch/cris/include/arch-v32/arch/bug.h b/arch/cris/include/arch-v32/arch/bug.h
index 0f211e135248..fb59faaaae0a 100644
--- a/arch/cris/include/arch-v32/arch/bug.h
+++ b/arch/cris/include/arch-v32/arch/bug.h
@@ -10,6 +10,7 @@
10 * All other stuff is done out-of-band with exception handlers. 10 * All other stuff is done out-of-band with exception handlers.
11 */ 11 */
12#define BUG() \ 12#define BUG() \
13do { \
13 __asm__ __volatile__ ("0: break 14\n\t" \ 14 __asm__ __volatile__ ("0: break 14\n\t" \
14 ".section .fixup,\"ax\"\n" \ 15 ".section .fixup,\"ax\"\n" \
15 "1:\n\t" \ 16 "1:\n\t" \
@@ -21,9 +22,15 @@
21 ".section __ex_table,\"a\"\n\t" \ 22 ".section __ex_table,\"a\"\n\t" \
22 ".dword 0b, 1b\n\t" \ 23 ".dword 0b, 1b\n\t" \
23 ".previous\n\t" \ 24 ".previous\n\t" \
24 : : "ri" (__FILE__), "i" (__LINE__)) 25 : : "ri" (__FILE__), "i" (__LINE__)); \
26 unreachable(); \
27} while (0)
25#else 28#else
26#define BUG() __asm__ __volatile__ ("break 14\n\t") 29#define BUG() \
30do { \
31 __asm__ __volatile__ ("break 14\n\t"); \
32 unreachable(); \
33} while (0)
27#endif 34#endif
28 35
29#define HAVE_ARCH_BUG 36#define HAVE_ARCH_BUG
diff --git a/arch/cris/include/arch-v32/arch/irqflags.h b/arch/cris/include/arch-v32/arch/irqflags.h
index 041851f8ec6f..5f6fddf99509 100644
--- a/arch/cris/include/arch-v32/arch/irqflags.h
+++ b/arch/cris/include/arch-v32/arch/irqflags.h
@@ -2,7 +2,7 @@
2#define __ASM_CRIS_ARCH_IRQFLAGS_H 2#define __ASM_CRIS_ARCH_IRQFLAGS_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <arch/ptrace.h> 5#include <asm/ptrace.h>
6 6
7static inline unsigned long arch_local_save_flags(void) 7static inline unsigned long arch_local_save_flags(void)
8{ 8{
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index ad2244f35bca..b7f68192d15b 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -1,14 +1,20 @@
1generic-y += atomic.h 1generic-y += atomic.h
2generic-y += auxvec.h
2generic-y += barrier.h 3generic-y += barrier.h
4generic-y += bitsperlong.h
3generic-y += clkdev.h 5generic-y += clkdev.h
4generic-y += cmpxchg.h 6generic-y += cmpxchg.h
5generic-y += cputime.h 7generic-y += cputime.h
6generic-y += device.h 8generic-y += device.h
7generic-y += div64.h 9generic-y += div64.h
10generic-y += errno.h
8generic-y += exec.h 11generic-y += exec.h
9generic-y += emergency-restart.h 12generic-y += emergency-restart.h
13generic-y += fcntl.h
10generic-y += futex.h 14generic-y += futex.h
11generic-y += hardirq.h 15generic-y += hardirq.h
16generic-y += ioctl.h
17generic-y += ipcbuf.h
12generic-y += irq_regs.h 18generic-y += irq_regs.h
13generic-y += irq_work.h 19generic-y += irq_work.h
14generic-y += kdebug.h 20generic-y += kdebug.h
@@ -19,11 +25,22 @@ generic-y += local.h
19generic-y += local64.h 25generic-y += local64.h
20generic-y += mcs_spinlock.h 26generic-y += mcs_spinlock.h
21generic-y += mm-arch-hooks.h 27generic-y += mm-arch-hooks.h
28generic-y += mman.h
22generic-y += module.h 29generic-y += module.h
30generic-y += msgbuf.h
23generic-y += percpu.h 31generic-y += percpu.h
32generic-y += poll.h
24generic-y += preempt.h 33generic-y += preempt.h
34generic-y += resource.h
25generic-y += sections.h 35generic-y += sections.h
36generic-y += sembuf.h
37generic-y += shmbuf.h
38generic-y += siginfo.h
39generic-y += socket.h
40generic-y += sockios.h
41generic-y += statfs.h
26generic-y += topology.h 42generic-y += topology.h
27generic-y += trace_clock.h 43generic-y += trace_clock.h
44generic-y += types.h
28generic-y += vga.h 45generic-y += vga.h
29generic-y += xor.h 46generic-y += xor.h
diff --git a/arch/cris/include/asm/mmu_context.h b/arch/cris/include/asm/mmu_context.h
index 1d45fd6365b7..349acfd25d2f 100644
--- a/arch/cris/include/asm/mmu_context.h
+++ b/arch/cris/include/asm/mmu_context.h
@@ -11,7 +11,14 @@ extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11 11
12#define deactivate_mm(tsk,mm) do { } while (0) 12#define deactivate_mm(tsk,mm) do { } while (0)
13 13
14#define activate_mm(prev,next) switch_mm((prev),(next),NULL) 14static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
15{
16 unsigned long flags;
17
18 local_irq_save(flags);
19 switch_mm(prev, next, NULL);
20 local_irq_restore(flags);
21}
15 22
16/* current active pgd - this is similar to other processors pgd 23/* current active pgd - this is similar to other processors pgd
17 * registers like cr3 on the i386 24 * registers like cr3 on the i386
diff --git a/arch/cris/include/asm/stacktrace.h b/arch/cris/include/asm/stacktrace.h
new file mode 100644
index 000000000000..2d90856943ad
--- /dev/null
+++ b/arch/cris/include/asm/stacktrace.h
@@ -0,0 +1,8 @@
1#ifndef __CRIS_STACKTRACE_H
2#define __CRIS_STACKTRACE_H
3
4void walk_stackframe(unsigned long sp,
5 int (*fn)(unsigned long addr, void *data),
6 void *data);
7
8#endif
diff --git a/arch/cris/include/asm/types.h b/arch/cris/include/asm/types.h
deleted file mode 100644
index a3cac7757c7f..000000000000
--- a/arch/cris/include/asm/types.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ETRAX_TYPES_H
2#define _ETRAX_TYPES_H
3
4#include <uapi/asm/types.h>
5
6/*
7 * These aren't exported outside the kernel to avoid name space clashes
8 */
9
10#define BITS_PER_LONG 32
11
12#endif
diff --git a/arch/cris/include/asm/unistd.h b/arch/cris/include/asm/unistd.h
index 0f40fed1ba25..9c23535821c0 100644
--- a/arch/cris/include/asm/unistd.h
+++ b/arch/cris/include/asm/unistd.h
@@ -4,7 +4,7 @@
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6
7#define NR_syscalls 360 7#define NR_syscalls 365
8 8
9#include <arch/unistd.h> 9#include <arch/unistd.h>
10 10
diff --git a/arch/cris/include/uapi/asm/Kbuild b/arch/cris/include/uapi/asm/Kbuild
index 01f66b8f15e5..d5564a0ae66a 100644
--- a/arch/cris/include/uapi/asm/Kbuild
+++ b/arch/cris/include/uapi/asm/Kbuild
@@ -6,6 +6,9 @@ header-y += ../arch-v32/arch/
6header-y += auxvec.h 6header-y += auxvec.h
7header-y += bitsperlong.h 7header-y += bitsperlong.h
8header-y += byteorder.h 8header-y += byteorder.h
9header-y += elf.h
10header-y += elf_v10.h
11header-y += elf_v32.h
9header-y += errno.h 12header-y += errno.h
10header-y += ethernet.h 13header-y += ethernet.h
11header-y += etraxgpio.h 14header-y += etraxgpio.h
@@ -19,6 +22,8 @@ header-y += param.h
19header-y += poll.h 22header-y += poll.h
20header-y += posix_types.h 23header-y += posix_types.h
21header-y += ptrace.h 24header-y += ptrace.h
25header-y += ptrace_v10.h
26header-y += ptrace_v32.h
22header-y += resource.h 27header-y += resource.h
23header-y += rs485.h 28header-y += rs485.h
24header-y += sembuf.h 29header-y += sembuf.h
diff --git a/arch/cris/include/uapi/asm/auxvec.h b/arch/cris/include/uapi/asm/auxvec.h
deleted file mode 100644
index cb30b01bf19f..000000000000
--- a/arch/cris/include/uapi/asm/auxvec.h
+++ /dev/null
@@ -1,4 +0,0 @@
1#ifndef __ASMCRIS_AUXVEC_H
2#define __ASMCRIS_AUXVEC_H
3
4#endif
diff --git a/arch/cris/include/uapi/asm/bitsperlong.h b/arch/cris/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0c13b2..000000000000
--- a/arch/cris/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/bitsperlong.h>
diff --git a/arch/cris/include/asm/elf.h b/arch/cris/include/uapi/asm/elf.h
index c2a394ff55ff..a5df05bfee66 100644
--- a/arch/cris/include/asm/elf.h
+++ b/arch/cris/include/uapi/asm/elf.h
@@ -5,7 +5,11 @@
5 * ELF register definitions.. 5 * ELF register definitions..
6 */ 6 */
7 7
8#include <asm/user.h> 8#ifdef __arch_v32
9#include <asm/elf_v32.h>
10#else
11#include <asm/elf_v10.h>
12#endif
9 13
10#define R_CRIS_NONE 0 14#define R_CRIS_NONE 0
11#define R_CRIS_8 1 15#define R_CRIS_8 1
@@ -32,7 +36,6 @@ typedef unsigned long elf_greg_t;
32 36
33/* Note that NGREG is defined to ELF_NGREG in include/linux/elfcore.h, and is 37/* Note that NGREG is defined to ELF_NGREG in include/linux/elfcore.h, and is
34 thus exposed to user-space. */ 38 thus exposed to user-space. */
35#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
36typedef elf_greg_t elf_gregset_t[ELF_NGREG]; 39typedef elf_greg_t elf_gregset_t[ELF_NGREG];
37 40
38/* A placeholder; CRIS does not have any fp regs. */ 41/* A placeholder; CRIS does not have any fp regs. */
@@ -45,8 +48,6 @@ typedef unsigned long elf_fpregset_t;
45#define ELF_DATA ELFDATA2LSB 48#define ELF_DATA ELFDATA2LSB
46#define ELF_ARCH EM_CRIS 49#define ELF_ARCH EM_CRIS
47 50
48#include <arch/elf.h>
49
50/* The master for these definitions is {binutils}/include/elf/cris.h: */ 51/* The master for these definitions is {binutils}/include/elf/cris.h: */
51/* User symbols in this file have a leading underscore. */ 52/* User symbols in this file have a leading underscore. */
52#define EF_CRIS_UNDERSCORE 0x00000001 53#define EF_CRIS_UNDERSCORE 0x00000001
diff --git a/arch/cris/include/arch-v10/arch/elf.h b/arch/cris/include/uapi/asm/elf_v10.h
index 1eb638aeddb4..3ea65cef529d 100644
--- a/arch/cris/include/arch-v10/arch/elf.h
+++ b/arch/cris/include/uapi/asm/elf_v10.h
@@ -1,10 +1,11 @@
1#ifndef __ASMCRIS_ARCH_ELF_H 1#ifndef __ASMCRIS_ARCH_ELF_H
2#define __ASMCRIS_ARCH_ELF_H 2#define __ASMCRIS_ARCH_ELF_H
3 3
4#include <arch/system.h>
5
6#define ELF_MACH EF_CRIS_VARIANT_ANY_V0_V10 4#define ELF_MACH EF_CRIS_VARIANT_ANY_V0_V10
7 5
6/* Matches struct user_regs_struct */
7#define ELF_NGREG 35
8
8/* 9/*
9 * This is used to ensure we don't load something for the wrong architecture. 10 * This is used to ensure we don't load something for the wrong architecture.
10 */ 11 */
diff --git a/arch/cris/include/arch-v32/arch/elf.h b/arch/cris/include/uapi/asm/elf_v32.h
index c46d58291166..f09fe49005c0 100644
--- a/arch/cris/include/arch-v32/arch/elf.h
+++ b/arch/cris/include/uapi/asm/elf_v32.h
@@ -1,10 +1,11 @@
1#ifndef _ASM_CRIS_ELF_H 1#ifndef _ASM_CRIS_ELF_H
2#define _ASM_CRIS_ELF_H 2#define _ASM_CRIS_ELF_H
3 3
4#include <arch/system.h>
5
6#define ELF_CORE_EFLAGS EF_CRIS_VARIANT_V32 4#define ELF_CORE_EFLAGS EF_CRIS_VARIANT_V32
7 5
6/* Matches struct user_regs_struct */
7#define ELF_NGREG 32
8
8/* 9/*
9 * This is used to ensure we don't load something for the wrong architecture. 10 * This is used to ensure we don't load something for the wrong architecture.
10 */ 11 */
diff --git a/arch/cris/include/uapi/asm/errno.h b/arch/cris/include/uapi/asm/errno.h
deleted file mode 100644
index 2bf5eb5fa773..000000000000
--- a/arch/cris/include/uapi/asm/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _CRIS_ERRNO_H
2#define _CRIS_ERRNO_H
3
4#include <asm-generic/errno.h>
5
6#endif
diff --git a/arch/cris/include/uapi/asm/fcntl.h b/arch/cris/include/uapi/asm/fcntl.h
deleted file mode 100644
index 46ab12db5739..000000000000
--- a/arch/cris/include/uapi/asm/fcntl.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/fcntl.h>
diff --git a/arch/cris/include/uapi/asm/ioctl.h b/arch/cris/include/uapi/asm/ioctl.h
deleted file mode 100644
index b279fe06dfe5..000000000000
--- a/arch/cris/include/uapi/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ioctl.h>
diff --git a/arch/cris/include/uapi/asm/ipcbuf.h b/arch/cris/include/uapi/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51cb6d0..000000000000
--- a/arch/cris/include/uapi/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ipcbuf.h>
diff --git a/arch/cris/include/uapi/asm/kvm_para.h b/arch/cris/include/uapi/asm/kvm_para.h
deleted file mode 100644
index 14fab8f0b957..000000000000
--- a/arch/cris/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/kvm_para.h>
diff --git a/arch/cris/include/uapi/asm/mman.h b/arch/cris/include/uapi/asm/mman.h
deleted file mode 100644
index 8eebf89f5ab1..000000000000
--- a/arch/cris/include/uapi/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/mman.h>
diff --git a/arch/cris/include/uapi/asm/msgbuf.h b/arch/cris/include/uapi/asm/msgbuf.h
deleted file mode 100644
index ada63df1d574..000000000000
--- a/arch/cris/include/uapi/asm/msgbuf.h
+++ /dev/null
@@ -1,33 +0,0 @@
1#ifndef _CRIS_MSGBUF_H
2#define _CRIS_MSGBUF_H
3
4/* verbatim copy of asm-i386 version */
5
6/*
7 * The msqid64_ds structure for CRIS architecture.
8 * Note extra padding because this structure is passed back and forth
9 * between kernel and user space.
10 *
11 * Pad space is left for:
12 * - 64-bit time_t to solve y2038 problem
13 * - 2 miscellaneous 32-bit values
14 */
15
16struct msqid64_ds {
17 struct ipc64_perm msg_perm;
18 __kernel_time_t msg_stime; /* last msgsnd time */
19 unsigned long __unused1;
20 __kernel_time_t msg_rtime; /* last msgrcv time */
21 unsigned long __unused2;
22 __kernel_time_t msg_ctime; /* last change time */
23 unsigned long __unused3;
24 unsigned long msg_cbytes; /* current number of bytes on queue */
25 unsigned long msg_qnum; /* number of messages in queue */
26 unsigned long msg_qbytes; /* max number of bytes on queue */
27 __kernel_pid_t msg_lspid; /* pid of last msgsnd */
28 __kernel_pid_t msg_lrpid; /* last receive pid */
29 unsigned long __unused4;
30 unsigned long __unused5;
31};
32
33#endif /* _CRIS_MSGBUF_H */
diff --git a/arch/cris/include/uapi/asm/poll.h b/arch/cris/include/uapi/asm/poll.h
deleted file mode 100644
index c98509d3149e..000000000000
--- a/arch/cris/include/uapi/asm/poll.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/poll.h>
diff --git a/arch/cris/include/uapi/asm/ptrace.h b/arch/cris/include/uapi/asm/ptrace.h
index c689c9bbbe50..bd8946f83ed3 100644
--- a/arch/cris/include/uapi/asm/ptrace.h
+++ b/arch/cris/include/uapi/asm/ptrace.h
@@ -1 +1,5 @@
1#include <arch/ptrace.h> 1#ifdef __arch_v32
2#include <asm/ptrace_v32.h>
3#else
4#include <asm/ptrace_v10.h>
5#endif
diff --git a/arch/cris/include/arch-v10/arch/ptrace.h b/arch/cris/include/uapi/asm/ptrace_v10.h
index 1a232739565e..1a232739565e 100644
--- a/arch/cris/include/arch-v10/arch/ptrace.h
+++ b/arch/cris/include/uapi/asm/ptrace_v10.h
diff --git a/arch/cris/include/arch-v32/arch/ptrace.h b/arch/cris/include/uapi/asm/ptrace_v32.h
index 19773d3bd4c4..19773d3bd4c4 100644
--- a/arch/cris/include/arch-v32/arch/ptrace.h
+++ b/arch/cris/include/uapi/asm/ptrace_v32.h
diff --git a/arch/cris/include/uapi/asm/resource.h b/arch/cris/include/uapi/asm/resource.h
deleted file mode 100644
index b5d29448de4e..000000000000
--- a/arch/cris/include/uapi/asm/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _CRIS_RESOURCE_H
2#define _CRIS_RESOURCE_H
3
4#include <asm-generic/resource.h>
5
6#endif
diff --git a/arch/cris/include/uapi/asm/sembuf.h b/arch/cris/include/uapi/asm/sembuf.h
deleted file mode 100644
index 7fed9843796d..000000000000
--- a/arch/cris/include/uapi/asm/sembuf.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef _CRIS_SEMBUF_H
2#define _CRIS_SEMBUF_H
3
4/*
5 * The semid64_ds structure for CRIS architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct semid64_ds {
15 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
16 __kernel_time_t sem_otime; /* last semop time */
17 unsigned long __unused1;
18 __kernel_time_t sem_ctime; /* last change time */
19 unsigned long __unused2;
20 unsigned long sem_nsems; /* no. of semaphores in array */
21 unsigned long __unused3;
22 unsigned long __unused4;
23};
24
25#endif /* _CRIS_SEMBUF_H */
diff --git a/arch/cris/include/uapi/asm/shmbuf.h b/arch/cris/include/uapi/asm/shmbuf.h
deleted file mode 100644
index 3239e3f000e8..000000000000
--- a/arch/cris/include/uapi/asm/shmbuf.h
+++ /dev/null
@@ -1,42 +0,0 @@
1#ifndef _CRIS_SHMBUF_H
2#define _CRIS_SHMBUF_H
3
4/*
5 * The shmid64_ds structure for CRIS architecture (same as for i386)
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct shmid64_ds {
15 struct ipc64_perm shm_perm; /* operation perms */
16 size_t shm_segsz; /* size of segment (bytes) */
17 __kernel_time_t shm_atime; /* last attach time */
18 unsigned long __unused1;
19 __kernel_time_t shm_dtime; /* last detach time */
20 unsigned long __unused2;
21 __kernel_time_t shm_ctime; /* last change time */
22 unsigned long __unused3;
23 __kernel_pid_t shm_cpid; /* pid of creator */
24 __kernel_pid_t shm_lpid; /* pid of last operator */
25 unsigned long shm_nattch; /* no. of current attaches */
26 unsigned long __unused4;
27 unsigned long __unused5;
28};
29
30struct shminfo64 {
31 unsigned long shmmax;
32 unsigned long shmmin;
33 unsigned long shmmni;
34 unsigned long shmseg;
35 unsigned long shmall;
36 unsigned long __unused1;
37 unsigned long __unused2;
38 unsigned long __unused3;
39 unsigned long __unused4;
40};
41
42#endif /* _CRIS_SHMBUF_H */
diff --git a/arch/cris/include/uapi/asm/siginfo.h b/arch/cris/include/uapi/asm/siginfo.h
deleted file mode 100644
index c1cd6d16928b..000000000000
--- a/arch/cris/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _CRIS_SIGINFO_H
2#define _CRIS_SIGINFO_H
3
4#include <asm-generic/siginfo.h>
5
6#endif
diff --git a/arch/cris/include/uapi/asm/socket.h b/arch/cris/include/uapi/asm/socket.h
deleted file mode 100644
index e2503d9f1869..000000000000
--- a/arch/cris/include/uapi/asm/socket.h
+++ /dev/null
@@ -1,92 +0,0 @@
1#ifndef _ASM_SOCKET_H
2#define _ASM_SOCKET_H
3
4/* almost the same as asm-i386/socket.h */
5
6#include <asm/sockios.h>
7
8/* For setsockoptions(2) */
9#define SOL_SOCKET 1
10
11#define SO_DEBUG 1
12#define SO_REUSEADDR 2
13#define SO_TYPE 3
14#define SO_ERROR 4
15#define SO_DONTROUTE 5
16#define SO_BROADCAST 6
17#define SO_SNDBUF 7
18#define SO_RCVBUF 8
19#define SO_SNDBUFFORCE 32
20#define SO_RCVBUFFORCE 33
21#define SO_KEEPALIVE 9
22#define SO_OOBINLINE 10
23#define SO_NO_CHECK 11
24#define SO_PRIORITY 12
25#define SO_LINGER 13
26#define SO_BSDCOMPAT 14
27#define SO_REUSEPORT 15
28#define SO_PASSCRED 16
29#define SO_PEERCRED 17
30#define SO_RCVLOWAT 18
31#define SO_SNDLOWAT 19
32#define SO_RCVTIMEO 20
33#define SO_SNDTIMEO 21
34
35/* Security levels - as per NRL IPv6 - don't actually do anything */
36#define SO_SECURITY_AUTHENTICATION 22
37#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
38#define SO_SECURITY_ENCRYPTION_NETWORK 24
39
40#define SO_BINDTODEVICE 25
41
42/* Socket filtering */
43#define SO_ATTACH_FILTER 26
44#define SO_DETACH_FILTER 27
45#define SO_GET_FILTER SO_ATTACH_FILTER
46
47#define SO_PEERNAME 28
48#define SO_TIMESTAMP 29
49#define SCM_TIMESTAMP SO_TIMESTAMP
50
51#define SO_ACCEPTCONN 30
52
53#define SO_PEERSEC 31
54#define SO_PASSSEC 34
55#define SO_TIMESTAMPNS 35
56#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
57
58#define SO_MARK 36
59
60#define SO_TIMESTAMPING 37
61#define SCM_TIMESTAMPING SO_TIMESTAMPING
62
63#define SO_PROTOCOL 38
64#define SO_DOMAIN 39
65
66#define SO_RXQ_OVFL 40
67
68#define SO_WIFI_STATUS 41
69#define SCM_WIFI_STATUS SO_WIFI_STATUS
70#define SO_PEEK_OFF 42
71
72/* Instruct lower device to use last 4-bytes of skb data as FCS */
73#define SO_NOFCS 43
74
75#define SO_LOCK_FILTER 44
76
77#define SO_SELECT_ERR_QUEUE 45
78
79#define SO_BUSY_POLL 46
80
81#define SO_MAX_PACING_RATE 47
82
83#define SO_BPF_EXTENSIONS 48
84
85#define SO_INCOMING_CPU 49
86
87#define SO_ATTACH_BPF 50
88#define SO_DETACH_BPF SO_DETACH_FILTER
89
90#endif /* _ASM_SOCKET_H */
91
92
diff --git a/arch/cris/include/uapi/asm/sockios.h b/arch/cris/include/uapi/asm/sockios.h
deleted file mode 100644
index cfe7bfecf599..000000000000
--- a/arch/cris/include/uapi/asm/sockios.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ARCH_CRIS_SOCKIOS__
2#define __ARCH_CRIS_SOCKIOS__
3
4/* Socket-level I/O control calls. */
5#define FIOSETOWN 0x8901
6#define SIOCSPGRP 0x8902
7#define FIOGETOWN 0x8903
8#define SIOCGPGRP 0x8904
9#define SIOCATMARK 0x8905
10#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
11#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
12
13#endif
diff --git a/arch/cris/include/uapi/asm/statfs.h b/arch/cris/include/uapi/asm/statfs.h
deleted file mode 100644
index fdaf921844bc..000000000000
--- a/arch/cris/include/uapi/asm/statfs.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _CRIS_STATFS_H
2#define _CRIS_STATFS_H
3
4#include <asm-generic/statfs.h>
5
6#endif
diff --git a/arch/cris/include/uapi/asm/types.h b/arch/cris/include/uapi/asm/types.h
deleted file mode 100644
index 9ec9d4c5ac4d..000000000000
--- a/arch/cris/include/uapi/asm/types.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/int-ll64.h>
diff --git a/arch/cris/include/uapi/asm/unistd.h b/arch/cris/include/uapi/asm/unistd.h
index f3287face443..062b648b27e1 100644
--- a/arch/cris/include/uapi/asm/unistd.h
+++ b/arch/cris/include/uapi/asm/unistd.h
@@ -356,5 +356,13 @@
356#define __NR_process_vm_writev 349 356#define __NR_process_vm_writev 349
357#define __NR_kcmp 350 357#define __NR_kcmp 350
358#define __NR_finit_module 351 358#define __NR_finit_module 351
359#define __NR_sched_setattr 352
360#define __NR_sched_getattr 353
361#define __NR_renameat2 354
362#define __NR_seccomp 355
363#define __NR_getrandom 356
364#define __NR_memfd_create 357
365#define __NR_bpf 358
366#define __NR_execveat 359
359 367
360#endif /* _UAPI_ASM_CRIS_UNISTD_H_ */ 368#endif /* _UAPI_ASM_CRIS_UNISTD_H_ */
diff --git a/arch/cris/kernel/Makefile b/arch/cris/kernel/Makefile
index edef71f12bb8..5fae398ca915 100644
--- a/arch/cris/kernel/Makefile
+++ b/arch/cris/kernel/Makefile
@@ -8,6 +8,7 @@ extra-y := vmlinux.lds
8 8
9obj-y := process.o traps.o irq.o ptrace.o setup.o time.o sys_cris.o 9obj-y := process.o traps.o irq.o ptrace.o setup.o time.o sys_cris.o
10obj-y += devicetree.o 10obj-y += devicetree.o
11obj-y += stacktrace.o
11 12
12obj-$(CONFIG_MODULES) += crisksyms.o 13obj-$(CONFIG_MODULES) += crisksyms.o
13obj-$(CONFIG_MODULES) += module.o 14obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index dd0be5de55d5..694850e8f077 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -45,7 +45,11 @@
45asmlinkage void do_IRQ(int irq, struct pt_regs * regs) 45asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
46{ 46{
47 unsigned long sp; 47 unsigned long sp;
48 struct pt_regs *old_regs = set_irq_regs(regs); 48 struct pt_regs *old_regs;
49
50 trace_hardirqs_off();
51
52 old_regs = set_irq_regs(regs);
49 irq_enter(); 53 irq_enter();
50 sp = rdsp(); 54 sp = rdsp();
51 if (unlikely((sp & (PAGE_SIZE - 1)) < (PAGE_SIZE/8))) { 55 if (unlikely((sp & (PAGE_SIZE - 1)) < (PAGE_SIZE/8))) {
diff --git a/arch/cris/kernel/stacktrace.c b/arch/cris/kernel/stacktrace.c
new file mode 100644
index 000000000000..99838c74456d
--- /dev/null
+++ b/arch/cris/kernel/stacktrace.c
@@ -0,0 +1,76 @@
1#include <linux/sched.h>
2#include <linux/stacktrace.h>
3#include <linux/stacktrace.h>
4#include <asm/stacktrace.h>
5
6void walk_stackframe(unsigned long sp,
7 int (*fn)(unsigned long addr, void *data),
8 void *data)
9{
10 unsigned long high = ALIGN(sp, THREAD_SIZE);
11
12 for (; sp <= high - 4; sp += 4) {
13 unsigned long addr = *(unsigned long *) sp;
14
15 if (!kernel_text_address(addr))
16 continue;
17
18 if (fn(addr, data))
19 break;
20 }
21}
22
23struct stack_trace_data {
24 struct stack_trace *trace;
25 unsigned int no_sched_functions;
26 unsigned int skip;
27};
28
29#ifdef CONFIG_STACKTRACE
30
31static int save_trace(unsigned long addr, void *d)
32{
33 struct stack_trace_data *data = d;
34 struct stack_trace *trace = data->trace;
35
36 if (data->no_sched_functions && in_sched_functions(addr))
37 return 0;
38
39 if (data->skip) {
40 data->skip--;
41 return 0;
42 }
43
44 trace->entries[trace->nr_entries++] = addr;
45
46 return trace->nr_entries >= trace->max_entries;
47}
48
49void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
50{
51 struct stack_trace_data data;
52 unsigned long sp;
53
54 data.trace = trace;
55 data.skip = trace->skip;
56
57 if (tsk != current) {
58 data.no_sched_functions = 1;
59 sp = tsk->thread.ksp;
60 } else {
61 data.no_sched_functions = 0;
62 sp = rdsp();
63 }
64
65 walk_stackframe(sp, save_trace, &data);
66 if (trace->nr_entries < trace->max_entries)
67 trace->entries[trace->nr_entries++] = ULONG_MAX;
68}
69
70void save_stack_trace(struct stack_trace *trace)
71{
72 save_stack_trace_tsk(current, trace);
73}
74EXPORT_SYMBOL_GPL(save_stack_trace);
75
76#endif /* CONFIG_STACKTRACE */
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c
index f9c86c475bbd..f211839e2cae 100644
--- a/arch/frv/mb93090-mb00/pci-vdk.c
+++ b/arch/frv/mb93090-mb00/pci-vdk.c
@@ -294,6 +294,8 @@ void pcibios_fixup_bus(struct pci_bus *bus)
294 printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number); 294 printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number);
295#endif 295#endif
296 296
297 pci_read_bridge_bases(bus);
298
297 if (bus->number == 0) { 299 if (bus->number == 0) {
298 struct pci_dev *dev; 300 struct pci_dev *dev;
299 list_for_each_entry(dev, &bus->devices, bus_list) { 301 list_for_each_entry(dev, &bus->devices, bus_list) {
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 95c39b95e97e..99c96a5e6016 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -11,7 +11,7 @@
11 11
12 12
13 13
14#define NR_syscalls 319 /* length of syscall table */ 14#define NR_syscalls 321 /* length of syscall table */
15 15
16/* 16/*
17 * The following defines stop scripts/checksyscalls.sh from complaining about 17 * The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
index 461079560c78..98e94e19a5a0 100644
--- a/arch/ia64/include/uapi/asm/unistd.h
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -332,5 +332,7 @@
332#define __NR_memfd_create 1340 332#define __NR_memfd_create 1340
333#define __NR_bpf 1341 333#define __NR_bpf 1341
334#define __NR_execveat 1342 334#define __NR_execveat 1342
335#define __NR_userfaultfd 1343
336#define __NR_membarrier 1344
335 337
336#endif /* _UAPI_ASM_IA64_UNISTD_H */ 338#endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index ae0de7bf5525..37cc7a65cd3e 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1768,5 +1768,7 @@ sys_call_table:
1768 data8 sys_memfd_create // 1340 1768 data8 sys_memfd_create // 1340
1769 data8 sys_bpf 1769 data8 sys_bpf
1770 data8 sys_execveat 1770 data8 sys_execveat
1771 data8 sys_userfaultfd
1772 data8 sys_membarrier
1771 1773
1772 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1774 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index d89b6013c941..7cc3be9fa7c6 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -533,9 +533,10 @@ void pcibios_fixup_bus(struct pci_bus *b)
533{ 533{
534 struct pci_dev *dev; 534 struct pci_dev *dev;
535 535
536 if (b->self) 536 if (b->self) {
537 pci_read_bridge_bases(b);
537 pcibios_fixup_bridge_resources(b->self); 538 pcibios_fixup_bridge_resources(b->self);
538 539 }
539 list_for_each_entry(dev, &b->devices, bus_list) 540 list_for_each_entry(dev, &b->devices, bus_list)
540 pcibios_fixup_device_resources(dev); 541 pcibios_fixup_device_resources(dev);
541 platform_pci_fixup_bus(b); 542 platform_pci_fixup_bus(b);
diff --git a/arch/m68k/amiga/amiints.c b/arch/m68k/amiga/amiints.c
index 47b5f90002ab..7ff739e94896 100644
--- a/arch/m68k/amiga/amiints.c
+++ b/arch/m68k/amiga/amiints.c
@@ -46,7 +46,7 @@ static struct irq_chip amiga_irq_chip = {
46 * The builtin Amiga hardware interrupt handlers. 46 * The builtin Amiga hardware interrupt handlers.
47 */ 47 */
48 48
49static void ami_int1(unsigned int irq, struct irq_desc *desc) 49static void ami_int1(struct irq_desc *desc)
50{ 50{
51 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; 51 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
52 52
@@ -69,7 +69,7 @@ static void ami_int1(unsigned int irq, struct irq_desc *desc)
69 } 69 }
70} 70}
71 71
72static void ami_int3(unsigned int irq, struct irq_desc *desc) 72static void ami_int3(struct irq_desc *desc)
73{ 73{
74 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; 74 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
75 75
@@ -92,7 +92,7 @@ static void ami_int3(unsigned int irq, struct irq_desc *desc)
92 } 92 }
93} 93}
94 94
95static void ami_int4(unsigned int irq, struct irq_desc *desc) 95static void ami_int4(struct irq_desc *desc)
96{ 96{
97 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; 97 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
98 98
@@ -121,7 +121,7 @@ static void ami_int4(unsigned int irq, struct irq_desc *desc)
121 } 121 }
122} 122}
123 123
124static void ami_int5(unsigned int irq, struct irq_desc *desc) 124static void ami_int5(struct irq_desc *desc)
125{ 125{
126 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; 126 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
127 127
diff --git a/arch/m68k/coldfire/intc-5272.c b/arch/m68k/coldfire/intc-5272.c
index 47371de60427..b0a19e207a63 100644
--- a/arch/m68k/coldfire/intc-5272.c
+++ b/arch/m68k/coldfire/intc-5272.c
@@ -143,12 +143,10 @@ static int intc_irq_set_type(struct irq_data *d, unsigned int type)
143 * We need to be careful with the masking/acking due to the side effects 143 * We need to be careful with the masking/acking due to the side effects
144 * of masking an interrupt. 144 * of masking an interrupt.
145 */ 145 */
146static void intc_external_irq(unsigned int __irq, struct irq_desc *desc) 146static void intc_external_irq(struct irq_desc *desc)
147{ 147{
148 unsigned int irq = irq_desc_get_irq(desc);
149
150 irq_desc_get_chip(desc)->irq_ack(&desc->irq_data); 148 irq_desc_get_chip(desc)->irq_ack(&desc->irq_data);
151 handle_simple_irq(irq, desc); 149 handle_simple_irq(desc);
152} 150}
153 151
154static struct irq_chip intc_irq_chip = { 152static struct irq_chip intc_irq_chip = {
diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h
index 81ca118d58af..a644f4a53b94 100644
--- a/arch/m68k/include/asm/irq.h
+++ b/arch/m68k/include/asm/irq.h
@@ -64,8 +64,7 @@ extern void m68k_setup_auto_interrupt(void (*handler)(unsigned int,
64 struct pt_regs *)); 64 struct pt_regs *));
65extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt); 65extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt);
66extern void m68k_setup_irq_controller(struct irq_chip *, 66extern void m68k_setup_irq_controller(struct irq_chip *,
67 void (*handle)(unsigned int irq, 67 void (*handle)(struct irq_desc *desc),
68 struct irq_desc *desc),
69 unsigned int irq, unsigned int cnt); 68 unsigned int irq, unsigned int cnt);
70 69
71extern unsigned int irq_canonicalize(unsigned int irq); 70extern unsigned int irq_canonicalize(unsigned int irq);
diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h
index fe3fc9ae1b69..53c632c85b03 100644
--- a/arch/m68k/include/asm/mac_via.h
+++ b/arch/m68k/include/asm/mac_via.h
@@ -261,7 +261,7 @@ extern void via_irq_enable(int);
261extern void via_irq_disable(int); 261extern void via_irq_disable(int);
262extern void via_nubus_irq_startup(int irq); 262extern void via_nubus_irq_startup(int irq);
263extern void via_nubus_irq_shutdown(int irq); 263extern void via_nubus_irq_shutdown(int irq);
264extern void via1_irq(unsigned int irq, struct irq_desc *desc); 264extern void via1_irq(struct irq_desc *desc);
265extern void via1_set_head(int); 265extern void via1_set_head(int);
266extern int via2_scsi_drq_pending(void); 266extern int via2_scsi_drq_pending(void);
267 267
diff --git a/arch/m68k/mac/baboon.c b/arch/m68k/mac/baboon.c
index 3fe0e43d44f6..f6f7d42713ec 100644
--- a/arch/m68k/mac/baboon.c
+++ b/arch/m68k/mac/baboon.c
@@ -45,7 +45,7 @@ void __init baboon_init(void)
45 * Baboon interrupt handler. This works a lot like a VIA. 45 * Baboon interrupt handler. This works a lot like a VIA.
46 */ 46 */
47 47
48static void baboon_irq(unsigned int irq, struct irq_desc *desc) 48static void baboon_irq(struct irq_desc *desc)
49{ 49{
50 int irq_bit, irq_num; 50 int irq_bit, irq_num;
51 unsigned char events; 51 unsigned char events;
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
index 191610d97689..55d6592783f5 100644
--- a/arch/m68k/mac/oss.c
+++ b/arch/m68k/mac/oss.c
@@ -63,7 +63,7 @@ void __init oss_nubus_init(void)
63 * Handle miscellaneous OSS interrupts. 63 * Handle miscellaneous OSS interrupts.
64 */ 64 */
65 65
66static void oss_irq(unsigned int __irq, struct irq_desc *desc) 66static void oss_irq(struct irq_desc *desc)
67{ 67{
68 int events = oss->irq_pending & 68 int events = oss->irq_pending &
69 (OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM); 69 (OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM);
@@ -99,7 +99,7 @@ static void oss_irq(unsigned int __irq, struct irq_desc *desc)
99 * Unlike the VIA/RBV this is on its own autovector interrupt level. 99 * Unlike the VIA/RBV this is on its own autovector interrupt level.
100 */ 100 */
101 101
102static void oss_nubus_irq(unsigned int irq, struct irq_desc *desc) 102static void oss_nubus_irq(struct irq_desc *desc)
103{ 103{
104 int events, irq_bit, i; 104 int events, irq_bit, i;
105 105
diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c
index 3b9e302e7a37..cd38f29955c8 100644
--- a/arch/m68k/mac/psc.c
+++ b/arch/m68k/mac/psc.c
@@ -113,7 +113,7 @@ void __init psc_init(void)
113 * PSC interrupt handler. It's a lot like the VIA interrupt handler. 113 * PSC interrupt handler. It's a lot like the VIA interrupt handler.
114 */ 114 */
115 115
116static void psc_irq(unsigned int __irq, struct irq_desc *desc) 116static void psc_irq(struct irq_desc *desc)
117{ 117{
118 unsigned int offset = (unsigned int)irq_desc_get_handler_data(desc); 118 unsigned int offset = (unsigned int)irq_desc_get_handler_data(desc);
119 unsigned int irq = irq_desc_get_irq(desc); 119 unsigned int irq = irq_desc_get_irq(desc);
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index e198dec868e4..ce56e04386e7 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -446,7 +446,7 @@ void via_nubus_irq_shutdown(int irq)
446 * via6522.c :-), disable/pending masks added. 446 * via6522.c :-), disable/pending masks added.
447 */ 447 */
448 448
449void via1_irq(unsigned int irq, struct irq_desc *desc) 449void via1_irq(struct irq_desc *desc)
450{ 450{
451 int irq_num; 451 int irq_num;
452 unsigned char irq_bit, events; 452 unsigned char irq_bit, events;
@@ -467,7 +467,7 @@ void via1_irq(unsigned int irq, struct irq_desc *desc)
467 } while (events >= irq_bit); 467 } while (events >= irq_bit);
468} 468}
469 469
470static void via2_irq(unsigned int irq, struct irq_desc *desc) 470static void via2_irq(struct irq_desc *desc)
471{ 471{
472 int irq_num; 472 int irq_num;
473 unsigned char irq_bit, events; 473 unsigned char irq_bit, events;
@@ -493,7 +493,7 @@ static void via2_irq(unsigned int irq, struct irq_desc *desc)
493 * VIA2 dispatcher as a fast interrupt handler. 493 * VIA2 dispatcher as a fast interrupt handler.
494 */ 494 */
495 495
496void via_nubus_irq(unsigned int irq, struct irq_desc *desc) 496static void via_nubus_irq(struct irq_desc *desc)
497{ 497{
498 int slot_irq; 498 int slot_irq;
499 unsigned char slot_bit, events; 499 unsigned char slot_bit, events;
diff --git a/arch/metag/kernel/irq.c b/arch/metag/kernel/irq.c
index a336094a7a6c..3074b64793e6 100644
--- a/arch/metag/kernel/irq.c
+++ b/arch/metag/kernel/irq.c
@@ -94,13 +94,11 @@ void do_IRQ(int irq, struct pt_regs *regs)
94 "MOV D0.5,%0\n" 94 "MOV D0.5,%0\n"
95 "MOV D1Ar1,%1\n" 95 "MOV D1Ar1,%1\n"
96 "MOV D1RtP,%2\n" 96 "MOV D1RtP,%2\n"
97 "MOV D0Ar2,%3\n"
98 "SWAP A0StP,D0.5\n" 97 "SWAP A0StP,D0.5\n"
99 "SWAP PC,D1RtP\n" 98 "SWAP PC,D1RtP\n"
100 "MOV A0StP,D0.5\n" 99 "MOV A0StP,D0.5\n"
101 : 100 :
102 : "r" (isp), "r" (irq), "r" (desc->handle_irq), 101 : "r" (isp), "r" (desc), "r" (desc->handle_irq)
103 "r" (desc)
104 : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4", 102 : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
105 "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP", 103 "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
106 "D0.5" 104 "D0.5"
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 6b8b75266801..ae838ed5fcf2 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -863,7 +863,14 @@ void pcibios_setup_bus_devices(struct pci_bus *bus)
863 863
864void pcibios_fixup_bus(struct pci_bus *bus) 864void pcibios_fixup_bus(struct pci_bus *bus)
865{ 865{
866 /* Fixup the bus */ 866 /* When called from the generic PCI probe, read PCI<->PCI bridge
867 * bases. This is -not- called when generating the PCI tree from
868 * the OF device-tree.
869 */
870 if (bus->self != NULL)
871 pci_read_bridge_bases(bus);
872
873 /* Now fixup the bus bus */
867 pcibios_setup_bus_self(bus); 874 pcibios_setup_bus_self(bus);
868 875
869 /* Now fixup devices on that bus */ 876 /* Now fixup devices on that bus */
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c
index 4c496c50edf6..da9f9220048f 100644
--- a/arch/mips/alchemy/common/irq.c
+++ b/arch/mips/alchemy/common/irq.c
@@ -851,7 +851,7 @@ static struct syscore_ops alchemy_gpic_pmops = {
851 851
852/* create chained handlers for the 4 IC requests to the MIPS IRQ ctrl */ 852/* create chained handlers for the 4 IC requests to the MIPS IRQ ctrl */
853#define DISP(name, base, addr) \ 853#define DISP(name, base, addr) \
854static void au1000_##name##_dispatch(unsigned int irq, struct irq_desc *d) \ 854static void au1000_##name##_dispatch(struct irq_desc *d) \
855{ \ 855{ \
856 unsigned long r = __raw_readl((void __iomem *)KSEG1ADDR(addr)); \ 856 unsigned long r = __raw_readl((void __iomem *)KSEG1ADDR(addr)); \
857 if (likely(r)) \ 857 if (likely(r)) \
@@ -865,7 +865,7 @@ DISP(ic0r1, AU1000_INTC0_INT_BASE, AU1000_IC0_PHYS_ADDR + IC_REQ1INT)
865DISP(ic1r0, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ0INT) 865DISP(ic1r0, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ0INT)
866DISP(ic1r1, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ1INT) 866DISP(ic1r1, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ1INT)
867 867
868static void alchemy_gpic_dispatch(unsigned int irq, struct irq_desc *d) 868static void alchemy_gpic_dispatch(struct irq_desc *d)
869{ 869{
870 int i = __raw_readl(AU1300_GPIC_ADDR + AU1300_GPIC_PRIENC); 870 int i = __raw_readl(AU1300_GPIC_ADDR + AU1300_GPIC_PRIENC);
871 generic_handle_irq(ALCHEMY_GPIC_INT_BASE + i); 871 generic_handle_irq(ALCHEMY_GPIC_INT_BASE + i);
diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c
index 324ad72d7c36..faeddf119fd4 100644
--- a/arch/mips/alchemy/devboards/bcsr.c
+++ b/arch/mips/alchemy/devboards/bcsr.c
@@ -86,7 +86,7 @@ EXPORT_SYMBOL_GPL(bcsr_mod);
86/* 86/*
87 * DB1200/PB1200 CPLD IRQ muxer 87 * DB1200/PB1200 CPLD IRQ muxer
88 */ 88 */
89static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d) 89static void bcsr_csc_handler(struct irq_desc *d)
90{ 90{
91 unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT); 91 unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
92 struct irq_chip *chip = irq_desc_get_chip(d); 92 struct irq_chip *chip = irq_desc_get_chip(d);
diff --git a/arch/mips/ath25/ar2315.c b/arch/mips/ath25/ar2315.c
index ec9a371f1e62..8da996142d6a 100644
--- a/arch/mips/ath25/ar2315.c
+++ b/arch/mips/ath25/ar2315.c
@@ -69,7 +69,7 @@ static struct irqaction ar2315_ahb_err_interrupt = {
69 .name = "ar2315-ahb-error", 69 .name = "ar2315-ahb-error",
70}; 70};
71 71
72static void ar2315_misc_irq_handler(unsigned irq, struct irq_desc *desc) 72static void ar2315_misc_irq_handler(struct irq_desc *desc)
73{ 73{
74 u32 pending = ar2315_rst_reg_read(AR2315_ISR) & 74 u32 pending = ar2315_rst_reg_read(AR2315_ISR) &
75 ar2315_rst_reg_read(AR2315_IMR); 75 ar2315_rst_reg_read(AR2315_IMR);
diff --git a/arch/mips/ath25/ar5312.c b/arch/mips/ath25/ar5312.c
index e63e38fa4880..acd55a9cffe3 100644
--- a/arch/mips/ath25/ar5312.c
+++ b/arch/mips/ath25/ar5312.c
@@ -73,7 +73,7 @@ static struct irqaction ar5312_ahb_err_interrupt = {
73 .name = "ar5312-ahb-error", 73 .name = "ar5312-ahb-error",
74}; 74};
75 75
76static void ar5312_misc_irq_handler(unsigned irq, struct irq_desc *desc) 76static void ar5312_misc_irq_handler(struct irq_desc *desc)
77{ 77{
78 u32 pending = ar5312_rst_reg_read(AR5312_ISR) & 78 u32 pending = ar5312_rst_reg_read(AR5312_ISR) &
79 ar5312_rst_reg_read(AR5312_IMR); 79 ar5312_rst_reg_read(AR5312_IMR);
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
index 807132b838b2..15ecb4831e12 100644
--- a/arch/mips/ath79/irq.c
+++ b/arch/mips/ath79/irq.c
@@ -26,7 +26,7 @@
26#include "common.h" 26#include "common.h"
27#include "machtypes.h" 27#include "machtypes.h"
28 28
29static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc) 29static void ath79_misc_irq_handler(struct irq_desc *desc)
30{ 30{
31 void __iomem *base = ath79_reset_base; 31 void __iomem *base = ath79_reset_base;
32 u32 pending; 32 u32 pending;
@@ -119,7 +119,7 @@ static void __init ath79_misc_irq_init(void)
119 irq_set_chained_handler(ATH79_CPU_IRQ(6), ath79_misc_irq_handler); 119 irq_set_chained_handler(ATH79_CPU_IRQ(6), ath79_misc_irq_handler);
120} 120}
121 121
122static void ar934x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc) 122static void ar934x_ip2_irq_dispatch(struct irq_desc *desc)
123{ 123{
124 u32 status; 124 u32 status;
125 125
@@ -148,7 +148,7 @@ static void ar934x_ip2_irq_init(void)
148 irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch); 148 irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch);
149} 149}
150 150
151static void qca955x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc) 151static void qca955x_ip2_irq_dispatch(struct irq_desc *desc)
152{ 152{
153 u32 status; 153 u32 status;
154 154
@@ -171,7 +171,7 @@ static void qca955x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
171 } 171 }
172} 172}
173 173
174static void qca955x_ip3_irq_dispatch(unsigned int irq, struct irq_desc *desc) 174static void qca955x_ip3_irq_dispatch(struct irq_desc *desc)
175{ 175{
176 u32 status; 176 u32 status;
177 177
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index f26c3c661cca..0352bc8d56b3 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -2221,7 +2221,7 @@ static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data)
2221 if (irqd_get_trigger_type(irq_data) & 2221 if (irqd_get_trigger_type(irq_data) &
2222 IRQ_TYPE_EDGE_BOTH) 2222 IRQ_TYPE_EDGE_BOTH)
2223 cvmx_write_csr(host_data->raw_reg, 1ull << i); 2223 cvmx_write_csr(host_data->raw_reg, 1ull << i);
2224 generic_handle_irq_desc(irq, desc); 2224 generic_handle_irq_desc(desc);
2225 } 2225 }
2226 } 2226 }
2227 2227
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index e8c8d9d0c45f..5a1a882e0a75 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -61,6 +61,7 @@
61#define KVM_PRIVATE_MEM_SLOTS 0 61#define KVM_PRIVATE_MEM_SLOTS 0
62 62
63#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 63#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
64#define KVM_HALT_POLL_NS_DEFAULT 500000
64 65
65 66
66 67
@@ -128,6 +129,7 @@ struct kvm_vcpu_stat {
128 u32 msa_disabled_exits; 129 u32 msa_disabled_exits;
129 u32 flush_dcache_exits; 130 u32 flush_dcache_exits;
130 u32 halt_successful_poll; 131 u32 halt_successful_poll;
132 u32 halt_attempted_poll;
131 u32 halt_wakeup; 133 u32 halt_wakeup;
132}; 134};
133 135
diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h
index 2a4c128277e4..be52c2125d71 100644
--- a/arch/mips/include/asm/netlogic/common.h
+++ b/arch/mips/include/asm/netlogic/common.h
@@ -57,8 +57,8 @@
57#include <asm/mach-netlogic/multi-node.h> 57#include <asm/mach-netlogic/multi-node.h>
58 58
59struct irq_desc; 59struct irq_desc;
60void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc); 60void nlm_smp_function_ipi_handler(struct irq_desc *desc);
61void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc); 61void nlm_smp_resched_ipi_handler(struct irq_desc *desc);
62void nlm_smp_irq_init(int hwcpuid); 62void nlm_smp_irq_init(int hwcpuid);
63void nlm_boot_secondary_cpus(void); 63void nlm_boot_secondary_cpus(void);
64int nlm_wakeup_secondary_cpus(void); 64int nlm_wakeup_secondary_cpus(void);
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
index 6cd69fdaa1c5..a74e181058b0 100644
--- a/arch/mips/jz4740/gpio.c
+++ b/arch/mips/jz4740/gpio.c
@@ -291,7 +291,7 @@ static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int i
291 writel(mask, reg); 291 writel(mask, reg);
292} 292}
293 293
294static void jz_gpio_irq_demux_handler(unsigned int irq, struct irq_desc *desc) 294static void jz_gpio_irq_demux_handler(struct irq_desc *desc)
295{ 295{
296 uint32_t flag; 296 uint32_t flag;
297 unsigned int gpio_irq; 297 unsigned int gpio_irq;
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index cd4c129ce743..49ff3bfc007e 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -55,6 +55,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
55 { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU }, 55 { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
56 { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, 56 { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
57 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU }, 57 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
58 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
58 { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, 59 { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
59 {NULL} 60 {NULL}
60}; 61};
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index 0136b4f9c9cd..10d86d54880a 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -82,7 +82,7 @@ void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
82} 82}
83 83
84/* IRQ_IPI_SMP_FUNCTION Handler */ 84/* IRQ_IPI_SMP_FUNCTION Handler */
85void nlm_smp_function_ipi_handler(unsigned int __irq, struct irq_desc *desc) 85void nlm_smp_function_ipi_handler(struct irq_desc *desc)
86{ 86{
87 unsigned int irq = irq_desc_get_irq(desc); 87 unsigned int irq = irq_desc_get_irq(desc);
88 clear_c0_eimr(irq); 88 clear_c0_eimr(irq);
@@ -92,7 +92,7 @@ void nlm_smp_function_ipi_handler(unsigned int __irq, struct irq_desc *desc)
92} 92}
93 93
94/* IRQ_IPI_SMP_RESCHEDULE handler */ 94/* IRQ_IPI_SMP_RESCHEDULE handler */
95void nlm_smp_resched_ipi_handler(unsigned int __irq, struct irq_desc *desc) 95void nlm_smp_resched_ipi_handler(struct irq_desc *desc)
96{ 96{
97 unsigned int irq = irq_desc_get_irq(desc); 97 unsigned int irq = irq_desc_get_irq(desc);
98 clear_c0_eimr(irq); 98 clear_c0_eimr(irq);
diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c
index f8d0acb4f973..b4fa6413c4e5 100644
--- a/arch/mips/pci/pci-ar2315.c
+++ b/arch/mips/pci/pci-ar2315.c
@@ -318,7 +318,7 @@ static int ar2315_pci_host_setup(struct ar2315_pci_ctrl *apc)
318 return 0; 318 return 0;
319} 319}
320 320
321static void ar2315_pci_irq_handler(unsigned irq, struct irq_desc *desc) 321static void ar2315_pci_irq_handler(struct irq_desc *desc)
322{ 322{
323 struct ar2315_pci_ctrl *apc = irq_desc_get_handler_data(desc); 323 struct ar2315_pci_ctrl *apc = irq_desc_get_handler_data(desc);
324 u32 pending = ar2315_pci_reg_read(apc, AR2315_PCI_ISR) & 324 u32 pending = ar2315_pci_reg_read(apc, AR2315_PCI_ISR) &
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
index ad35a5e6a56c..7db963deec73 100644
--- a/arch/mips/pci/pci-ar71xx.c
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -226,7 +226,7 @@ static struct pci_ops ar71xx_pci_ops = {
226 .write = ar71xx_pci_write_config, 226 .write = ar71xx_pci_write_config,
227}; 227};
228 228
229static void ar71xx_pci_irq_handler(unsigned int irq, struct irq_desc *desc) 229static void ar71xx_pci_irq_handler(struct irq_desc *desc)
230{ 230{
231 struct ar71xx_pci_controller *apc; 231 struct ar71xx_pci_controller *apc;
232 void __iomem *base = ath79_reset_base; 232 void __iomem *base = ath79_reset_base;
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 907d11dd921b..2013dad700df 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -225,7 +225,7 @@ static struct pci_ops ar724x_pci_ops = {
225 .write = ar724x_pci_write, 225 .write = ar724x_pci_write,
226}; 226};
227 227
228static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc) 228static void ar724x_pci_irq_handler(struct irq_desc *desc)
229{ 229{
230 struct ar724x_pci_controller *apc; 230 struct ar724x_pci_controller *apc;
231 void __iomem *base; 231 void __iomem *base;
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c
index 53c8efaf1572..ed6732f9aa87 100644
--- a/arch/mips/pci/pci-rt3883.c
+++ b/arch/mips/pci/pci-rt3883.c
@@ -129,7 +129,7 @@ static void rt3883_pci_write_cfg32(struct rt3883_pci_controller *rpc,
129 rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA); 129 rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA);
130} 130}
131 131
132static void rt3883_pci_irq_handler(unsigned int __irq, struct irq_desc *desc) 132static void rt3883_pci_irq_handler(struct irq_desc *desc)
133{ 133{
134 struct rt3883_pci_controller *rpc; 134 struct rt3883_pci_controller *rpc;
135 u32 pending; 135 u32 pending;
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index c6996cf67a5c..b8a0bf5766f2 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -311,6 +311,12 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
311 311
312void pcibios_fixup_bus(struct pci_bus *bus) 312void pcibios_fixup_bus(struct pci_bus *bus)
313{ 313{
314 struct pci_dev *dev = bus->self;
315
316 if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
317 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
318 pci_read_bridge_bases(bus);
319 }
314} 320}
315 321
316EXPORT_SYMBOL(PCIBIOS_MIN_IO); 322EXPORT_SYMBOL(PCIBIOS_MIN_IO);
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 8c624a8b9ea2..4cf77f358395 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -96,7 +96,7 @@ unsigned int get_c0_compare_int(void)
96 return CP0_LEGACY_COMPARE_IRQ; 96 return CP0_LEGACY_COMPARE_IRQ;
97} 97}
98 98
99static void ralink_intc_irq_handler(unsigned int irq, struct irq_desc *desc) 99static void ralink_intc_irq_handler(struct irq_desc *desc)
100{ 100{
101 u32 pending = rt_intc_r32(INTC_REG_STATUS0); 101 u32 pending = rt_intc_r32(INTC_REG_STATUS0);
102 102
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index deaa893efba5..3dfe2d31c67b 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -324,6 +324,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
324 struct pci_dev *dev; 324 struct pci_dev *dev;
325 325
326 if (bus->self) { 326 if (bus->self) {
327 pci_read_bridge_bases(bus);
327 pcibios_fixup_bridge_resources(bus->self); 328 pcibios_fixup_bridge_resources(bus->self);
328 } 329 }
329 330
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 73eddda53b8e..4eec430d8fa8 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -28,6 +28,9 @@ BOOTCFLAGS += -m64
28endif 28endif
29ifdef CONFIG_CPU_BIG_ENDIAN 29ifdef CONFIG_CPU_BIG_ENDIAN
30BOOTCFLAGS += -mbig-endian 30BOOTCFLAGS += -mbig-endian
31else
32BOOTCFLAGS += -mlittle-endian
33BOOTCFLAGS += $(call cc-option,-mabi=elfv2)
31endif 34endif
32 35
33BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc 36BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 98eebbf66340..827a38d7a9db 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -44,6 +44,7 @@
44#ifdef CONFIG_KVM_MMIO 44#ifdef CONFIG_KVM_MMIO
45#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 45#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
46#endif 46#endif
47#define KVM_HALT_POLL_NS_DEFAULT 500000
47 48
48/* These values are internal and can be increased later */ 49/* These values are internal and can be increased later */
49#define KVM_NR_IRQCHIPS 1 50#define KVM_NR_IRQCHIPS 1
@@ -108,6 +109,7 @@ struct kvm_vcpu_stat {
108 u32 dec_exits; 109 u32 dec_exits;
109 u32 ext_intr_exits; 110 u32 ext_intr_exits;
110 u32 halt_successful_poll; 111 u32 halt_successful_poll;
112 u32 halt_attempted_poll;
111 u32 halt_wakeup; 113 u32 halt_wakeup;
112 u32 dbell_exits; 114 u32 dbell_exits;
113 u32 gdbell_exits; 115 u32 gdbell_exits;
diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h
index 25784cc959a0..1e155ca6d33c 100644
--- a/arch/powerpc/include/asm/qe_ic.h
+++ b/arch/powerpc/include/asm/qe_ic.h
@@ -59,14 +59,14 @@ enum qe_ic_grp_id {
59 59
60#ifdef CONFIG_QUICC_ENGINE 60#ifdef CONFIG_QUICC_ENGINE
61void qe_ic_init(struct device_node *node, unsigned int flags, 61void qe_ic_init(struct device_node *node, unsigned int flags,
62 void (*low_handler)(unsigned int irq, struct irq_desc *desc), 62 void (*low_handler)(struct irq_desc *desc),
63 void (*high_handler)(unsigned int irq, struct irq_desc *desc)); 63 void (*high_handler)(struct irq_desc *desc));
64unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic); 64unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
65unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic); 65unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
66#else 66#else
67static inline void qe_ic_init(struct device_node *node, unsigned int flags, 67static inline void qe_ic_init(struct device_node *node, unsigned int flags,
68 void (*low_handler)(unsigned int irq, struct irq_desc *desc), 68 void (*low_handler)(struct irq_desc *desc),
69 void (*high_handler)(unsigned int irq, struct irq_desc *desc)) 69 void (*high_handler)(struct irq_desc *desc))
70{} 70{}
71static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) 71static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
72{ return 0; } 72{ return 0; }
@@ -78,8 +78,7 @@ void qe_ic_set_highest_priority(unsigned int virq, int high);
78int qe_ic_set_priority(unsigned int virq, unsigned int priority); 78int qe_ic_set_priority(unsigned int virq, unsigned int priority);
79int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high); 79int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
80 80
81static inline void qe_ic_cascade_low_ipic(unsigned int irq, 81static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
82 struct irq_desc *desc)
83{ 82{
84 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); 83 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
85 unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); 84 unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
@@ -88,8 +87,7 @@ static inline void qe_ic_cascade_low_ipic(unsigned int irq,
88 generic_handle_irq(cascade_irq); 87 generic_handle_irq(cascade_irq);
89} 88}
90 89
91static inline void qe_ic_cascade_high_ipic(unsigned int irq, 90static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
92 struct irq_desc *desc)
93{ 91{
94 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); 92 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
95 unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); 93 unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
@@ -98,8 +96,7 @@ static inline void qe_ic_cascade_high_ipic(unsigned int irq,
98 generic_handle_irq(cascade_irq); 96 generic_handle_irq(cascade_irq);
99} 97}
100 98
101static inline void qe_ic_cascade_low_mpic(unsigned int irq, 99static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
102 struct irq_desc *desc)
103{ 100{
104 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); 101 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
105 unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); 102 unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
@@ -111,8 +108,7 @@ static inline void qe_ic_cascade_low_mpic(unsigned int irq,
111 chip->irq_eoi(&desc->irq_data); 108 chip->irq_eoi(&desc->irq_data);
112} 109}
113 110
114static inline void qe_ic_cascade_high_mpic(unsigned int irq, 111static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
115 struct irq_desc *desc)
116{ 112{
117 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); 113 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
118 unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); 114 unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
@@ -124,8 +120,7 @@ static inline void qe_ic_cascade_high_mpic(unsigned int irq,
124 chip->irq_eoi(&desc->irq_data); 120 chip->irq_eoi(&desc->irq_data);
125} 121}
126 122
127static inline void qe_ic_cascade_muxed_mpic(unsigned int irq, 123static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
128 struct irq_desc *desc)
129{ 124{
130 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); 125 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
131 unsigned int cascade_irq; 126 unsigned int cascade_irq;
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 71f2b3f02cf8..126d0c4f9b7d 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -368,3 +368,5 @@ SYSCALL_SPU(memfd_create)
368SYSCALL_SPU(bpf) 368SYSCALL_SPU(bpf)
369COMPAT_SYS(execveat) 369COMPAT_SYS(execveat)
370PPC64ONLY(switch_endian) 370PPC64ONLY(switch_endian)
371SYSCALL_SPU(userfaultfd)
372SYSCALL_SPU(membarrier)
diff --git a/arch/powerpc/include/asm/tsi108_pci.h b/arch/powerpc/include/asm/tsi108_pci.h
index 5653d7cc3e24..ae59d5b672b0 100644
--- a/arch/powerpc/include/asm/tsi108_pci.h
+++ b/arch/powerpc/include/asm/tsi108_pci.h
@@ -39,7 +39,7 @@
39 39
40extern int tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary); 40extern int tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary);
41extern void tsi108_pci_int_init(struct device_node *node); 41extern void tsi108_pci_int_init(struct device_node *node);
42extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc); 42extern void tsi108_irq_cascade(struct irq_desc *desc);
43extern void tsi108_clear_pci_cfg_error(void); 43extern void tsi108_clear_pci_cfg_error(void);
44 44
45#endif /* _ASM_POWERPC_TSI108_PCI_H */ 45#endif /* _ASM_POWERPC_TSI108_PCI_H */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index f4f8b667d75b..13411be86041 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define __NR_syscalls 364 15#define __NR_syscalls 366
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18#define NR_syscalls __NR_syscalls 18#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index e4aa173dae62..6337738018aa 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -386,5 +386,7 @@
386#define __NR_bpf 361 386#define __NR_bpf 361
387#define __NR_execveat 362 387#define __NR_execveat 362
388#define __NR_switch_endian 363 388#define __NR_switch_endian 363
389#define __NR_userfaultfd 364
390#define __NR_membarrier 365
389 391
390#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 392#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 45096033d37b..290559df1e8b 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -441,7 +441,7 @@ void migrate_irqs(void)
441 441
442 chip = irq_data_get_irq_chip(data); 442 chip = irq_data_get_irq_chip(data);
443 443
444 cpumask_and(mask, data->affinity, map); 444 cpumask_and(mask, irq_data_get_affinity_mask(data), map);
445 if (cpumask_any(mask) >= nr_cpu_ids) { 445 if (cpumask_any(mask) >= nr_cpu_ids) {
446 pr_warn("Breaking affinity for irq %i\n", irq); 446 pr_warn("Breaking affinity for irq %i\n", irq);
447 cpumask_copy(mask, map); 447 cpumask_copy(mask, map);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index a1d0632d97c6..7587b2ae5f77 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1032,7 +1032,13 @@ void pcibios_set_master(struct pci_dev *dev)
1032 1032
1033void pcibios_fixup_bus(struct pci_bus *bus) 1033void pcibios_fixup_bus(struct pci_bus *bus)
1034{ 1034{
1035 /* Fixup the bus */ 1035 /* When called from the generic PCI probe, read PCI<->PCI bridge
1036 * bases. This is -not- called when generating the PCI tree from
1037 * the OF device-tree.
1038 */
1039 pci_read_bridge_bases(bus);
1040
1041 /* Now fixup the bus bus */
1036 pcibios_setup_bus_self(bus); 1042 pcibios_setup_bus_self(bus);
1037 1043
1038 /* Now fixup devices on that bus */ 1044 /* Now fixup devices on that bus */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index bb02e9f6944e..ad8c9db61237 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -38,6 +38,7 @@
38#include <asm/udbg.h> 38#include <asm/udbg.h>
39#include <asm/mmu_context.h> 39#include <asm/mmu_context.h>
40#include <asm/epapr_hcalls.h> 40#include <asm/epapr_hcalls.h>
41#include <asm/code-patching.h>
41 42
42#define DBG(fmt...) 43#define DBG(fmt...)
43 44
@@ -109,6 +110,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
109 * This is called very early on the boot process, after a minimal 110 * This is called very early on the boot process, after a minimal
110 * MMU environment has been set up but before MMU_init is called. 111 * MMU environment has been set up but before MMU_init is called.
111 */ 112 */
113extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */
114
112notrace void __init machine_init(u64 dt_ptr) 115notrace void __init machine_init(u64 dt_ptr)
113{ 116{
114 lockdep_init(); 117 lockdep_init();
@@ -116,6 +119,9 @@ notrace void __init machine_init(u64 dt_ptr)
116 /* Enable early debugging if any specified (see udbg.h) */ 119 /* Enable early debugging if any specified (see udbg.h) */
117 udbg_early_init(); 120 udbg_early_init();
118 121
122 patch_instruction((unsigned int *)&memcpy, PPC_INST_NOP);
123 patch_instruction(&memset_nocache_branch, PPC_INST_NOP);
124
119 /* Do some early initialization based on the flat device tree */ 125 /* Do some early initialization based on the flat device tree */
120 early_init_devtree(__va(dt_ptr)); 126 early_init_devtree(__va(dt_ptr));
121 127
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index d75bf325f54a..099c79d8c160 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -53,6 +53,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
53 { "ext_intr", VCPU_STAT(ext_intr_exits) }, 53 { "ext_intr", VCPU_STAT(ext_intr_exits) },
54 { "queue_intr", VCPU_STAT(queue_intr) }, 54 { "queue_intr", VCPU_STAT(queue_intr) },
55 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), }, 55 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
56 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
56 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 57 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
57 { "pf_storage", VCPU_STAT(pf_storage) }, 58 { "pf_storage", VCPU_STAT(pf_storage) },
58 { "sp_storage", VCPU_STAT(sp_storage) }, 59 { "sp_storage", VCPU_STAT(sp_storage) },
@@ -828,12 +829,15 @@ int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
828 unsigned long size = kvmppc_get_gpr(vcpu, 4); 829 unsigned long size = kvmppc_get_gpr(vcpu, 4);
829 unsigned long addr = kvmppc_get_gpr(vcpu, 5); 830 unsigned long addr = kvmppc_get_gpr(vcpu, 5);
830 u64 buf; 831 u64 buf;
832 int srcu_idx;
831 int ret; 833 int ret;
832 834
833 if (!is_power_of_2(size) || (size > sizeof(buf))) 835 if (!is_power_of_2(size) || (size > sizeof(buf)))
834 return H_TOO_HARD; 836 return H_TOO_HARD;
835 837
838 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
836 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf); 839 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
840 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
837 if (ret != 0) 841 if (ret != 0)
838 return H_TOO_HARD; 842 return H_TOO_HARD;
839 843
@@ -868,6 +872,7 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
868 unsigned long addr = kvmppc_get_gpr(vcpu, 5); 872 unsigned long addr = kvmppc_get_gpr(vcpu, 5);
869 unsigned long val = kvmppc_get_gpr(vcpu, 6); 873 unsigned long val = kvmppc_get_gpr(vcpu, 6);
870 u64 buf; 874 u64 buf;
875 int srcu_idx;
871 int ret; 876 int ret;
872 877
873 switch (size) { 878 switch (size) {
@@ -891,7 +896,9 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
891 return H_TOO_HARD; 896 return H_TOO_HARD;
892 } 897 }
893 898
899 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
894 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf); 900 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
901 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
895 if (ret != 0) 902 if (ret != 0)
896 return H_TOO_HARD; 903 return H_TOO_HARD;
897 904
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 9754e6815e52..228049786888 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2692,9 +2692,13 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2692 2692
2693 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && 2693 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
2694 (vc->vcore_state == VCORE_RUNNING || 2694 (vc->vcore_state == VCORE_RUNNING ||
2695 vc->vcore_state == VCORE_EXITING)) 2695 vc->vcore_state == VCORE_EXITING ||
2696 vc->vcore_state == VCORE_PIGGYBACK))
2696 kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE); 2697 kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE);
2697 2698
2699 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
2700 kvmppc_vcore_end_preempt(vc);
2701
2698 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { 2702 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
2699 kvmppc_remove_runnable(vc, vcpu); 2703 kvmppc_remove_runnable(vc, vcpu);
2700 vcpu->stat.signal_exits++; 2704 vcpu->stat.signal_exits++;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 2273dcacef39..b98889e9851d 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1257,6 +1257,7 @@ mc_cont:
1257 bl kvmhv_accumulate_time 1257 bl kvmhv_accumulate_time
1258#endif 1258#endif
1259 1259
1260 mr r3, r12
1260 /* Increment exit count, poke other threads to exit */ 1261 /* Increment exit count, poke other threads to exit */
1261 bl kvmhv_commence_exit 1262 bl kvmhv_commence_exit
1262 nop 1263 nop
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index ae458f0fd061..fd5875179e5c 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -63,6 +63,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
63 { "dec", VCPU_STAT(dec_exits) }, 63 { "dec", VCPU_STAT(dec_exits) },
64 { "ext_intr", VCPU_STAT(ext_intr_exits) }, 64 { "ext_intr", VCPU_STAT(ext_intr_exits) },
65 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, 65 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
66 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
66 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 67 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
67 { "doorbell", VCPU_STAT(dbell_exits) }, 68 { "doorbell", VCPU_STAT(dbell_exits) },
68 { "guest doorbell", VCPU_STAT(gdbell_exits) }, 69 { "guest doorbell", VCPU_STAT(gdbell_exits) },
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index 2ef50c629470..c44df2dbedd5 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -73,6 +73,10 @@ CACHELINE_MASK = (L1_CACHE_BYTES-1)
73 * Use dcbz on the complete cache lines in the destination 73 * Use dcbz on the complete cache lines in the destination
74 * to set them to zero. This requires that the destination 74 * to set them to zero. This requires that the destination
75 * area is cacheable. -- paulus 75 * area is cacheable. -- paulus
76 *
77 * During early init, cache might not be active yet, so dcbz cannot be used.
78 * We therefore skip the optimised bloc that uses dcbz. This jump is
79 * replaced by a nop once cache is active. This is done in machine_init()
76 */ 80 */
77_GLOBAL(memset) 81_GLOBAL(memset)
78 rlwimi r4,r4,8,16,23 82 rlwimi r4,r4,8,16,23
@@ -88,6 +92,8 @@ _GLOBAL(memset)
88 subf r6,r0,r6 92 subf r6,r0,r6
89 cmplwi 0,r4,0 93 cmplwi 0,r4,0
90 bne 2f /* Use normal procedure if r4 is not zero */ 94 bne 2f /* Use normal procedure if r4 is not zero */
95_GLOBAL(memset_nocache_branch)
96 b 2f /* Skip optimised bloc until cache is enabled */
91 97
92 clrlwi r7,r6,32-LG_CACHELINE_BYTES 98 clrlwi r7,r6,32-LG_CACHELINE_BYTES
93 add r8,r7,r5 99 add r8,r7,r5
@@ -128,6 +134,10 @@ _GLOBAL(memset)
128 * the destination area is cacheable. 134 * the destination area is cacheable.
129 * We only use this version if the source and dest don't overlap. 135 * We only use this version if the source and dest don't overlap.
130 * -- paulus. 136 * -- paulus.
137 *
138 * During early init, cache might not be active yet, so dcbz cannot be used.
139 * We therefore jump to generic_memcpy which doesn't use dcbz. This jump is
140 * replaced by a nop once cache is active. This is done in machine_init()
131 */ 141 */
132_GLOBAL(memmove) 142_GLOBAL(memmove)
133 cmplw 0,r3,r4 143 cmplw 0,r3,r4
@@ -135,6 +145,7 @@ _GLOBAL(memmove)
135 /* fall through */ 145 /* fall through */
136 146
137_GLOBAL(memcpy) 147_GLOBAL(memcpy)
148 b generic_memcpy
138 add r7,r3,r5 /* test if the src & dst overlap */ 149 add r7,r3,r5 /* test if the src & dst overlap */
139 add r8,r4,r5 150 add r8,r4,r5
140 cmplw 0,r4,r7 151 cmplw 0,r4,r7
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 43dafb9d6a46..4d87122cf6a7 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -85,7 +85,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
85 BUG_ON(index >= 4096); 85 BUG_ON(index >= 4096);
86 86
87 vpn = hpt_vpn(ea, vsid, ssize); 87 vpn = hpt_vpn(ea, vsid, ssize);
88 hash = hpt_hash(vpn, shift, ssize);
89 hpte_slot_array = get_hpte_slot_array(pmdp); 88 hpte_slot_array = get_hpte_slot_array(pmdp);
90 if (psize == MMU_PAGE_4K) { 89 if (psize == MMU_PAGE_4K) {
91 /* 90 /*
@@ -101,6 +100,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
101 valid = hpte_valid(hpte_slot_array, index); 100 valid = hpte_valid(hpte_slot_array, index);
102 if (valid) { 101 if (valid) {
103 /* update the hpte bits */ 102 /* update the hpte bits */
103 hash = hpt_hash(vpn, shift, ssize);
104 hidx = hpte_hash_index(hpte_slot_array, index); 104 hidx = hpte_hash_index(hpte_slot_array, index);
105 if (hidx & _PTEIDX_SECONDARY) 105 if (hidx & _PTEIDX_SECONDARY)
106 hash = ~hash; 106 hash = ~hash;
@@ -126,6 +126,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
126 if (!valid) { 126 if (!valid) {
127 unsigned long hpte_group; 127 unsigned long hpte_group;
128 128
129 hash = hpt_hash(vpn, shift, ssize);
129 /* insert new entry */ 130 /* insert new entry */
130 pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT; 131 pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
131 new_pmd |= _PAGE_HASHPTE; 132 new_pmd |= _PAGE_HASHPTE;
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
index 11090ab4bf59..0035d146df73 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
@@ -104,9 +104,10 @@ cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp,
104 return irq_linear_revmap(cpld_pic_host, cpld_irq); 104 return irq_linear_revmap(cpld_pic_host, cpld_irq);
105} 105}
106 106
107static void 107static void cpld_pic_cascade(struct irq_desc *desc)
108cpld_pic_cascade(unsigned int irq, struct irq_desc *desc)
109{ 108{
109 unsigned int irq;
110
110 irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status, 111 irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status,
111 &cpld_regs->pci_mask); 112 &cpld_regs->pci_mask);
112 if (irq != NO_IRQ) { 113 if (irq != NO_IRQ) {
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 32cae33c4266..8fb95480fd73 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -80,7 +80,7 @@ static struct irq_chip media5200_irq_chip = {
80 .irq_mask_ack = media5200_irq_mask, 80 .irq_mask_ack = media5200_irq_mask,
81}; 81};
82 82
83void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) 83static void media5200_irq_cascade(struct irq_desc *desc)
84{ 84{
85 struct irq_chip *chip = irq_desc_get_chip(desc); 85 struct irq_chip *chip = irq_desc_get_chip(desc);
86 int sub_virq, val; 86 int sub_virq, val;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 63016621aff8..78ac19aefa4d 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -191,7 +191,7 @@ static struct irq_chip mpc52xx_gpt_irq_chip = {
191 .irq_set_type = mpc52xx_gpt_irq_set_type, 191 .irq_set_type = mpc52xx_gpt_irq_set_type,
192}; 192};
193 193
194void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc) 194static void mpc52xx_gpt_irq_cascade(struct irq_desc *desc)
195{ 195{
196 struct mpc52xx_gpt_priv *gpt = irq_desc_get_handler_data(desc); 196 struct mpc52xx_gpt_priv *gpt = irq_desc_get_handler_data(desc);
197 int sub_virq; 197 int sub_virq;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 2944bc84b9d6..4fe2074c88cb 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -196,7 +196,7 @@ static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type)
196 ctrl_reg |= (type << (22 - (l2irq * 2))); 196 ctrl_reg |= (type << (22 - (l2irq * 2)));
197 out_be32(&intr->ctrl, ctrl_reg); 197 out_be32(&intr->ctrl, ctrl_reg);
198 198
199 __irq_set_handler_locked(d->irq, handler); 199 irq_set_handler_locked(d, handler);
200 200
201 return 0; 201 return 0;
202} 202}
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
index 74861a7fb807..60e89fc9c753 100644
--- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
+++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
@@ -78,7 +78,7 @@ static struct irq_chip pq2ads_pci_ic = {
78 .irq_disable = pq2ads_pci_mask_irq 78 .irq_disable = pq2ads_pci_mask_irq
79}; 79};
80 80
81static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) 81static void pq2ads_pci_irq_demux(struct irq_desc *desc)
82{ 82{
83 struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc); 83 struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc);
84 u32 stat, mask, pend; 84 u32 stat, mask, pend;
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index 7bfb9b184dd4..23791de7b688 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -49,7 +49,7 @@ int __init mpc85xx_common_publish_devices(void)
49 return of_platform_bus_probe(NULL, mpc85xx_common_ids, NULL); 49 return of_platform_bus_probe(NULL, mpc85xx_common_ids, NULL);
50} 50}
51#ifdef CONFIG_CPM2 51#ifdef CONFIG_CPM2
52static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) 52static void cpm2_cascade(struct irq_desc *desc)
53{ 53{
54 struct irq_chip *chip = irq_desc_get_chip(desc); 54 struct irq_chip *chip = irq_desc_get_chip(desc);
55 int cascade_irq; 55 int cascade_irq;
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index b0753e222086..5ac70de3e48a 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -192,8 +192,7 @@ void mpc85xx_cds_fixup_bus(struct pci_bus *bus)
192} 192}
193 193
194#ifdef CONFIG_PPC_I8259 194#ifdef CONFIG_PPC_I8259
195static void mpc85xx_8259_cascade_handler(unsigned int irq, 195static void mpc85xx_8259_cascade_handler(struct irq_desc *desc)
196 struct irq_desc *desc)
197{ 196{
198 unsigned int cascade_irq = i8259_irq(); 197 unsigned int cascade_irq = i8259_irq();
199 198
@@ -202,7 +201,7 @@ static void mpc85xx_8259_cascade_handler(unsigned int irq,
202 generic_handle_irq(cascade_irq); 201 generic_handle_irq(cascade_irq);
203 202
204 /* check for any interrupts from the shared IRQ line */ 203 /* check for any interrupts from the shared IRQ line */
205 handle_fasteoi_irq(irq, desc); 204 handle_fasteoi_irq(desc);
206} 205}
207 206
208static irqreturn_t mpc85xx_8259_cascade_action(int irq, void *dev_id) 207static irqreturn_t mpc85xx_8259_cascade_action(int irq, void *dev_id)
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index ffdf02121a7c..f858306dba6a 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -46,7 +46,7 @@
46#endif 46#endif
47 47
48#ifdef CONFIG_PPC_I8259 48#ifdef CONFIG_PPC_I8259
49static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc) 49static void mpc85xx_8259_cascade(struct irq_desc *desc)
50{ 50{
51 struct irq_chip *chip = irq_desc_get_chip(desc); 51 struct irq_chip *chip = irq_desc_get_chip(desc);
52 unsigned int cascade_irq = i8259_irq(); 52 unsigned int cascade_irq = i8259_irq();
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
index 55a9682b9529..b02d6a5bb035 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
@@ -91,9 +91,10 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq)
91 (irq_hw_number_t)i); 91 (irq_hw_number_t)i);
92} 92}
93 93
94void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc) 94static void socrates_fpga_pic_cascade(struct irq_desc *desc)
95{ 95{
96 struct irq_chip *chip = irq_desc_get_chip(desc); 96 struct irq_chip *chip = irq_desc_get_chip(desc);
97 unsigned int irq = irq_desc_get_irq(desc);
97 unsigned int cascade_irq; 98 unsigned int cascade_irq;
98 99
99 /* 100 /*
diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c
index d5b98c0f958a..845defa1fd19 100644
--- a/arch/powerpc/platforms/86xx/pic.c
+++ b/arch/powerpc/platforms/86xx/pic.c
@@ -17,7 +17,7 @@
17#include <asm/i8259.h> 17#include <asm/i8259.h>
18 18
19#ifdef CONFIG_PPC_I8259 19#ifdef CONFIG_PPC_I8259
20static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc) 20static void mpc86xx_8259_cascade(struct irq_desc *desc)
21{ 21{
22 struct irq_chip *chip = irq_desc_get_chip(desc); 22 struct irq_chip *chip = irq_desc_get_chip(desc);
23 unsigned int cascade_irq = i8259_irq(); 23 unsigned int cascade_irq = i8259_irq();
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
index d3037747031d..c289fc77b4ba 100644
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
@@ -214,7 +214,7 @@ void mpc8xx_restart(char *cmd)
214 panic("Restart failed\n"); 214 panic("Restart failed\n");
215} 215}
216 216
217static void cpm_cascade(unsigned int irq, struct irq_desc *desc) 217static void cpm_cascade(struct irq_desc *desc)
218{ 218{
219 struct irq_chip *chip = irq_desc_get_chip(desc); 219 struct irq_chip *chip = irq_desc_get_chip(desc);
220 int cascade_irq = cpm_get_irq(); 220 int cascade_irq = cpm_get_irq();
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 306888acb737..e0e68a1c0d3c 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -93,7 +93,7 @@ static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
93 dcr_write(msic->dcr_host, dcr_n, val); 93 dcr_write(msic->dcr_host, dcr_n, val);
94} 94}
95 95
96static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) 96static void axon_msi_cascade(struct irq_desc *desc)
97{ 97{
98 struct irq_chip *chip = irq_desc_get_chip(desc); 98 struct irq_chip *chip = irq_desc_get_chip(desc);
99 struct axon_msic *msic = irq_desc_get_handler_data(desc); 99 struct axon_msic *msic = irq_desc_get_handler_data(desc);
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index a15f1efc295f..9f609fc8d331 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -99,11 +99,12 @@ static void iic_ioexc_eoi(struct irq_data *d)
99{ 99{
100} 100}
101 101
102static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc) 102static void iic_ioexc_cascade(struct irq_desc *desc)
103{ 103{
104 struct irq_chip *chip = irq_desc_get_chip(desc); 104 struct irq_chip *chip = irq_desc_get_chip(desc);
105 struct cbe_iic_regs __iomem *node_iic = 105 struct cbe_iic_regs __iomem *node_iic =
106 (void __iomem *)irq_desc_get_handler_data(desc); 106 (void __iomem *)irq_desc_get_handler_data(desc);
107 unsigned int irq = irq_desc_get_irq(desc);
107 unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC; 108 unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
108 unsigned long bits, ack; 109 unsigned long bits, ack;
109 int cascade; 110 int cascade;
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 1f72f4ab6353..9d27de62dc62 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -199,7 +199,7 @@ static const struct irq_domain_ops spider_host_ops = {
199 .xlate = spider_host_xlate, 199 .xlate = spider_host_xlate,
200}; 200};
201 201
202static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc) 202static void spider_irq_cascade(struct irq_desc *desc)
203{ 203{
204 struct irq_chip *chip = irq_desc_get_chip(desc); 204 struct irq_chip *chip = irq_desc_get_chip(desc);
205 struct spider_pic *pic = irq_desc_get_handler_data(desc); 205 struct spider_pic *pic = irq_desc_get_handler_data(desc);
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 15ebc4e8a151..987d1b8d68e3 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -363,7 +363,7 @@ void __init chrp_setup_arch(void)
363 if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0); 363 if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0);
364} 364}
365 365
366static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc) 366static void chrp_8259_cascade(struct irq_desc *desc)
367{ 367{
368 struct irq_chip *chip = irq_desc_get_chip(desc); 368 struct irq_chip *chip = irq_desc_get_chip(desc);
369 unsigned int cascade_irq = i8259_irq(); 369 unsigned int cascade_irq = i8259_irq();
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index 9dd154d6f89a..9b7975706bfc 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -120,8 +120,7 @@ static unsigned int __hlwd_pic_get_irq(struct irq_domain *h)
120 return irq_linear_revmap(h, irq); 120 return irq_linear_revmap(h, irq);
121} 121}
122 122
123static void hlwd_pic_irq_cascade(unsigned int cascade_virq, 123static void hlwd_pic_irq_cascade(struct irq_desc *desc)
124 struct irq_desc *desc)
125{ 124{
126 struct irq_chip *chip = irq_desc_get_chip(desc); 125 struct irq_chip *chip = irq_desc_get_chip(desc);
127 struct irq_domain *irq_domain = irq_desc_get_handler_data(desc); 126 struct irq_domain *irq_domain = irq_desc_get_handler_data(desc);
diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c
index 1613303177e6..8f65aa3747f5 100644
--- a/arch/powerpc/platforms/embedded6xx/mvme5100.c
+++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c
@@ -42,7 +42,7 @@
42static phys_addr_t pci_membase; 42static phys_addr_t pci_membase;
43static u_char *restart; 43static u_char *restart;
44 44
45static void mvme5100_8259_cascade(unsigned int irq, struct irq_desc *desc) 45static void mvme5100_8259_cascade(struct irq_desc *desc)
46{ 46{
47 struct irq_chip *chip = irq_desc_get_chip(desc); 47 struct irq_chip *chip = irq_desc_get_chip(desc);
48 unsigned int cascade_irq = i8259_irq(); 48 unsigned int cascade_irq = i8259_irq();
diff --git a/arch/powerpc/platforms/pasemi/msi.c b/arch/powerpc/platforms/pasemi/msi.c
index e66ef1943338..b304a9fe55cc 100644
--- a/arch/powerpc/platforms/pasemi/msi.c
+++ b/arch/powerpc/platforms/pasemi/msi.c
@@ -63,6 +63,7 @@ static struct irq_chip mpic_pasemi_msi_chip = {
63static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev) 63static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
64{ 64{
65 struct msi_desc *entry; 65 struct msi_desc *entry;
66 irq_hw_number_t hwirq;
66 67
67 pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev); 68 pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
68 69
@@ -70,10 +71,10 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
70 if (entry->irq == NO_IRQ) 71 if (entry->irq == NO_IRQ)
71 continue; 72 continue;
72 73
74 hwirq = virq_to_hw(entry->irq);
73 irq_set_msi_desc(entry->irq, NULL); 75 irq_set_msi_desc(entry->irq, NULL);
74 msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
75 virq_to_hw(entry->irq), ALLOC_CHUNK);
76 irq_dispose_mapping(entry->irq); 76 irq_dispose_mapping(entry->irq);
77 msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, ALLOC_CHUNK);
77 } 78 }
78 79
79 return; 80 return;
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 2927cd5c8303..414fd1a00fda 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2049,9 +2049,23 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
2049 struct iommu_table *tbl = NULL; 2049 struct iommu_table *tbl = NULL;
2050 long rc; 2050 long rc;
2051 2051
2052 /*
2053 * crashkernel= specifies the kdump kernel's maximum memory at
2054 * some offset and there is no guaranteed the result is a power
2055 * of 2, which will cause errors later.
2056 */
2057 const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max());
2058
2059 /*
2060 * In memory constrained environments, e.g. kdump kernel, the
2061 * DMA window can be larger than available memory, which will
2062 * cause errors later.
2063 */
2064 const u64 window_size = min((u64)pe->table_group.tce32_size, max_memory);
2065
2052 rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, 2066 rc = pnv_pci_ioda2_create_table(&pe->table_group, 0,
2053 IOMMU_PAGE_SHIFT_4K, 2067 IOMMU_PAGE_SHIFT_4K,
2054 pe->table_group.tce32_size, 2068 window_size,
2055 POWERNV_IOMMU_DEFAULT_LEVELS, &tbl); 2069 POWERNV_IOMMU_DEFAULT_LEVELS, &tbl);
2056 if (rc) { 2070 if (rc) {
2057 pe_err(pe, "Failed to create 32-bit TCE table, err %ld", 2071 pe_err(pe, "Failed to create 32-bit TCE table, err %ld",
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 9b2480b265c0..f2dd77234240 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -99,6 +99,7 @@ void pnv_teardown_msi_irqs(struct pci_dev *pdev)
99 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 99 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
100 struct pnv_phb *phb = hose->private_data; 100 struct pnv_phb *phb = hose->private_data;
101 struct msi_desc *entry; 101 struct msi_desc *entry;
102 irq_hw_number_t hwirq;
102 103
103 if (WARN_ON(!phb)) 104 if (WARN_ON(!phb))
104 return; 105 return;
@@ -106,10 +107,10 @@ void pnv_teardown_msi_irqs(struct pci_dev *pdev)
106 for_each_pci_msi_entry(entry, pdev) { 107 for_each_pci_msi_entry(entry, pdev) {
107 if (entry->irq == NO_IRQ) 108 if (entry->irq == NO_IRQ)
108 continue; 109 continue;
110 hwirq = virq_to_hw(entry->irq);
109 irq_set_msi_desc(entry->irq, NULL); 111 irq_set_msi_desc(entry->irq, NULL);
110 msi_bitmap_free_hwirqs(&phb->msi_bmp,
111 virq_to_hw(entry->irq) - phb->msi_base, 1);
112 irq_dispose_mapping(entry->irq); 112 irq_dispose_mapping(entry->irq);
113 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
113 } 114 }
114} 115}
115#endif /* CONFIG_PCI_MSI */ 116#endif /* CONFIG_PCI_MSI */
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 47d9cebe7159..db17827eb746 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -422,8 +422,10 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
422 422
423 dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); 423 dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
424 of_node_put(parent); 424 of_node_put(parent);
425 if (!dn) 425 if (!dn) {
426 dlpar_release_drc(drc_index);
426 return -EINVAL; 427 return -EINVAL;
428 }
427 429
428 rc = dlpar_attach_node(dn); 430 rc = dlpar_attach_node(dn);
429 if (rc) { 431 if (rc) {
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 39a74fad3e04..9a83eb71b030 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -111,7 +111,7 @@ static void __init fwnmi_init(void)
111 fwnmi_active = 1; 111 fwnmi_active = 1;
112} 112}
113 113
114static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc) 114static void pseries_8259_cascade(struct irq_desc *desc)
115{ 115{
116 struct irq_chip *chip = irq_desc_get_chip(desc); 116 struct irq_chip *chip = irq_desc_get_chip(desc);
117 unsigned int cascade_irq = i8259_irq(); 117 unsigned int cascade_irq = i8259_irq();
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index a11bd1d433ad..9e86074719a9 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -155,9 +155,9 @@ static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type)
155 155
156 irqd_set_trigger_type(d, flow_type); 156 irqd_set_trigger_type(d, flow_type);
157 if (flow_type & IRQ_TYPE_LEVEL_LOW) 157 if (flow_type & IRQ_TYPE_LEVEL_LOW)
158 __irq_set_handler_locked(d->irq, handle_level_irq); 158 irq_set_handler_locked(d, handle_level_irq);
159 else 159 else
160 __irq_set_handler_locked(d->irq, handle_edge_irq); 160 irq_set_handler_locked(d, handle_edge_irq);
161 161
162 /* internal IRQ senses are LEVEL_LOW 162 /* internal IRQ senses are LEVEL_LOW
163 * EXT IRQ and Port C IRQ senses are programmable 163 * EXT IRQ and Port C IRQ senses are programmable
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 5916da1856a7..48a576aa47b9 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -128,15 +128,16 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
128{ 128{
129 struct msi_desc *entry; 129 struct msi_desc *entry;
130 struct fsl_msi *msi_data; 130 struct fsl_msi *msi_data;
131 irq_hw_number_t hwirq;
131 132
132 for_each_pci_msi_entry(entry, pdev) { 133 for_each_pci_msi_entry(entry, pdev) {
133 if (entry->irq == NO_IRQ) 134 if (entry->irq == NO_IRQ)
134 continue; 135 continue;
136 hwirq = virq_to_hw(entry->irq);
135 msi_data = irq_get_chip_data(entry->irq); 137 msi_data = irq_get_chip_data(entry->irq);
136 irq_set_msi_desc(entry->irq, NULL); 138 irq_set_msi_desc(entry->irq, NULL);
137 msi_bitmap_free_hwirqs(&msi_data->bitmap,
138 virq_to_hw(entry->irq), 1);
139 irq_dispose_mapping(entry->irq); 139 irq_dispose_mapping(entry->irq);
140 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
140 } 141 }
141 142
142 return; 143 return;
diff --git a/arch/powerpc/sysdev/ge/ge_pic.c b/arch/powerpc/sysdev/ge/ge_pic.c
index 2bcb78bb3a15..d57b77573068 100644
--- a/arch/powerpc/sysdev/ge/ge_pic.c
+++ b/arch/powerpc/sysdev/ge/ge_pic.c
@@ -91,7 +91,7 @@ static int gef_pic_cascade_irq;
91 * should be masked out. 91 * should be masked out.
92 */ 92 */
93 93
94void gef_pic_cascade(unsigned int irq, struct irq_desc *desc) 94static void gef_pic_cascade(struct irq_desc *desc)
95{ 95{
96 struct irq_chip *chip = irq_desc_get_chip(desc); 96 struct irq_chip *chip = irq_desc_get_chip(desc);
97 unsigned int cascade_irq; 97 unsigned int cascade_irq;
diff --git a/arch/powerpc/sysdev/ge/ge_pic.h b/arch/powerpc/sysdev/ge/ge_pic.h
index 908dbd9826b6..5bf7e4b81e36 100644
--- a/arch/powerpc/sysdev/ge/ge_pic.h
+++ b/arch/powerpc/sysdev/ge/ge_pic.h
@@ -1,8 +1,6 @@
1#ifndef __GEF_PIC_H__ 1#ifndef __GEF_PIC_H__
2#define __GEF_PIC_H__ 2#define __GEF_PIC_H__
3 3
4
5void gef_pic_cascade(unsigned int, struct irq_desc *);
6unsigned int gef_pic_get_irq(void); 4unsigned int gef_pic_get_irq(void);
7void gef_pic_init(struct device_node *); 5void gef_pic_init(struct device_node *);
8 6
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 6b2b68914810..b1297ab1599b 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -624,10 +624,10 @@ static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type)
624 624
625 irqd_set_trigger_type(d, flow_type); 625 irqd_set_trigger_type(d, flow_type);
626 if (flow_type & IRQ_TYPE_LEVEL_LOW) { 626 if (flow_type & IRQ_TYPE_LEVEL_LOW) {
627 __irq_set_handler_locked(d->irq, handle_level_irq); 627 irq_set_handler_locked(d, handle_level_irq);
628 d->chip = &ipic_level_irq_chip; 628 d->chip = &ipic_level_irq_chip;
629 } else { 629 } else {
630 __irq_set_handler_locked(d->irq, handle_edge_irq); 630 irq_set_handler_locked(d, handle_edge_irq);
631 d->chip = &ipic_edge_irq_chip; 631 d->chip = &ipic_edge_irq_chip;
632 } 632 }
633 633
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index d93a78be4346..9a423975853a 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -55,7 +55,7 @@ static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type)
55 unsigned int siel = in_be32(&siu_reg->sc_siel); 55 unsigned int siel = in_be32(&siu_reg->sc_siel);
56 siel |= mpc8xx_irqd_to_bit(d); 56 siel |= mpc8xx_irqd_to_bit(d);
57 out_be32(&siu_reg->sc_siel, siel); 57 out_be32(&siu_reg->sc_siel, siel);
58 __irq_set_handler_locked(d->irq, handle_edge_irq); 58 irq_set_handler_locked(d, handle_edge_irq);
59 } 59 }
60 return 0; 60 return 0;
61} 61}
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 97a8ae8f94dd..537e5db85a06 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1181,7 +1181,7 @@ static int mpic_host_xlate(struct irq_domain *h, struct device_node *ct,
1181} 1181}
1182 1182
1183/* IRQ handler for a secondary MPIC cascaded from another IRQ controller */ 1183/* IRQ handler for a secondary MPIC cascaded from another IRQ controller */
1184static void mpic_cascade(unsigned int irq, struct irq_desc *desc) 1184static void mpic_cascade(struct irq_desc *desc)
1185{ 1185{
1186 struct irq_chip *chip = irq_desc_get_chip(desc); 1186 struct irq_chip *chip = irq_desc_get_chip(desc);
1187 struct mpic *mpic = irq_desc_get_handler_data(desc); 1187 struct mpic *mpic = irq_desc_get_handler_data(desc);
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index 70fbd5694a8b..2cbc7e29b85f 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -107,15 +107,16 @@ static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq)
107static void u3msi_teardown_msi_irqs(struct pci_dev *pdev) 107static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
108{ 108{
109 struct msi_desc *entry; 109 struct msi_desc *entry;
110 irq_hw_number_t hwirq;
110 111
111 for_each_pci_msi_entry(entry, pdev) { 112 for_each_pci_msi_entry(entry, pdev) {
112 if (entry->irq == NO_IRQ) 113 if (entry->irq == NO_IRQ)
113 continue; 114 continue;
114 115
116 hwirq = virq_to_hw(entry->irq);
115 irq_set_msi_desc(entry->irq, NULL); 117 irq_set_msi_desc(entry->irq, NULL);
116 msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
117 virq_to_hw(entry->irq), 1);
118 irq_dispose_mapping(entry->irq); 118 irq_dispose_mapping(entry->irq);
119 msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
119 } 120 }
120 121
121 return; 122 return;
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
index 24d0470c1698..8fb806135043 100644
--- a/arch/powerpc/sysdev/ppc4xx_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_msi.c
@@ -124,16 +124,17 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
124{ 124{
125 struct msi_desc *entry; 125 struct msi_desc *entry;
126 struct ppc4xx_msi *msi_data = &ppc4xx_msi; 126 struct ppc4xx_msi *msi_data = &ppc4xx_msi;
127 irq_hw_number_t hwirq;
127 128
128 dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n"); 129 dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
129 130
130 for_each_pci_msi_entry(entry, dev) { 131 for_each_pci_msi_entry(entry, dev) {
131 if (entry->irq == NO_IRQ) 132 if (entry->irq == NO_IRQ)
132 continue; 133 continue;
134 hwirq = virq_to_hw(entry->irq);
133 irq_set_msi_desc(entry->irq, NULL); 135 irq_set_msi_desc(entry->irq, NULL);
134 msi_bitmap_free_hwirqs(&msi_data->bitmap,
135 virq_to_hw(entry->irq), 1);
136 irq_dispose_mapping(entry->irq); 136 irq_dispose_mapping(entry->irq);
137 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
137 } 138 }
138} 139}
139 140
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index 47b352e4bc74..fbcc1f855a7f 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -311,8 +311,8 @@ unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
311} 311}
312 312
313void __init qe_ic_init(struct device_node *node, unsigned int flags, 313void __init qe_ic_init(struct device_node *node, unsigned int flags,
314 void (*low_handler)(unsigned int irq, struct irq_desc *desc), 314 void (*low_handler)(struct irq_desc *desc),
315 void (*high_handler)(unsigned int irq, struct irq_desc *desc)) 315 void (*high_handler)(struct irq_desc *desc))
316{ 316{
317 struct qe_ic *qe_ic; 317 struct qe_ic *qe_ic;
318 struct resource res; 318 struct resource res;
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 57b54476e747..379de955aae3 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -428,7 +428,7 @@ void __init tsi108_pci_int_init(struct device_node *node)
428 init_pci_source(); 428 init_pci_source();
429} 429}
430 430
431void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc) 431void tsi108_irq_cascade(struct irq_desc *desc)
432{ 432{
433 struct irq_chip *chip = irq_desc_get_chip(desc); 433 struct irq_chip *chip = irq_desc_get_chip(desc);
434 unsigned int cascade_irq = get_pci_source(); 434 unsigned int cascade_irq = get_pci_source();
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index d77345338671..6893d8f236df 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -194,7 +194,7 @@ static const struct irq_domain_ops uic_host_ops = {
194 .xlate = irq_domain_xlate_twocell, 194 .xlate = irq_domain_xlate_twocell,
195}; 195};
196 196
197void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) 197static void uic_irq_cascade(struct irq_desc *desc)
198{ 198{
199 struct irq_chip *chip = irq_desc_get_chip(desc); 199 struct irq_chip *chip = irq_desc_get_chip(desc);
200 struct irq_data *idata = irq_desc_get_irq_data(desc); 200 struct irq_data *idata = irq_desc_get_irq_data(desc);
diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c
index 11ac964d5175..27c936c080a6 100644
--- a/arch/powerpc/sysdev/xics/ics-opal.c
+++ b/arch/powerpc/sysdev/xics/ics-opal.c
@@ -54,7 +54,7 @@ static void ics_opal_unmask_irq(struct irq_data *d)
54 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) 54 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
55 return; 55 return;
56 56
57 server = xics_get_irq_server(d->irq, d->affinity, 0); 57 server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0);
58 server = ics_opal_mangle_server(server); 58 server = ics_opal_mangle_server(server);
59 59
60 rc = opal_set_xive(hw_irq, server, DEFAULT_PRIORITY); 60 rc = opal_set_xive(hw_irq, server, DEFAULT_PRIORITY);
diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c
index d1c625c4cc5a..3854dd41558d 100644
--- a/arch/powerpc/sysdev/xics/ics-rtas.c
+++ b/arch/powerpc/sysdev/xics/ics-rtas.c
@@ -47,7 +47,7 @@ static void ics_rtas_unmask_irq(struct irq_data *d)
47 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) 47 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
48 return; 48 return;
49 49
50 server = xics_get_irq_server(d->irq, d->affinity, 0); 50 server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0);
51 51
52 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server, 52 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server,
53 DEFAULT_PRIORITY); 53 DEFAULT_PRIORITY);
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
index 43b8b275bc5c..0f52d7955796 100644
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ b/arch/powerpc/sysdev/xilinx_intc.c
@@ -222,7 +222,7 @@ int xilinx_intc_get_irq(void)
222/* 222/*
223 * Support code for cascading to 8259 interrupt controllers 223 * Support code for cascading to 8259 interrupt controllers
224 */ 224 */
225static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc) 225static void xilinx_i8259_cascade(struct irq_desc *desc)
226{ 226{
227 struct irq_chip *chip = irq_desc_get_chip(desc); 227 struct irq_chip *chip = irq_desc_get_chip(desc);
228 unsigned int cascade_irq = i8259_irq(); 228 unsigned int cascade_irq = i8259_irq();
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 1b0184a0f7f2..92805d604173 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -1,7 +1,6 @@
1# CONFIG_SWAP is not set 1# CONFIG_SWAP is not set
2CONFIG_NO_HZ=y 2CONFIG_NO_HZ=y
3CONFIG_HIGH_RES_TIMERS=y 3CONFIG_HIGH_RES_TIMERS=y
4CONFIG_RCU_FAST_NO_HZ=y
5CONFIG_BLK_DEV_INITRD=y 4CONFIG_BLK_DEV_INITRD=y
6CONFIG_CC_OPTIMIZE_FOR_SIZE=y 5CONFIG_CC_OPTIMIZE_FOR_SIZE=y
7# CONFIG_COMPAT_BRK is not set 6# CONFIG_COMPAT_BRK is not set
@@ -54,10 +53,6 @@ CONFIG_RAW_DRIVER=y
54# CONFIG_MONWRITER is not set 53# CONFIG_MONWRITER is not set
55# CONFIG_S390_VMUR is not set 54# CONFIG_S390_VMUR is not set
56# CONFIG_HID is not set 55# CONFIG_HID is not set
57CONFIG_MEMSTICK=y
58CONFIG_MEMSTICK_DEBUG=y
59CONFIG_MEMSTICK_UNSAFE_RESUME=y
60CONFIG_MSPRO_BLOCK=y
61# CONFIG_IOMMU_SUPPORT is not set 56# CONFIG_IOMMU_SUPPORT is not set
62CONFIG_EXT2_FS=y 57CONFIG_EXT2_FS=y
63CONFIG_EXT3_FS=y 58CONFIG_EXT3_FS=y
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 3d012e071647..8ced426091e1 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -35,6 +35,7 @@
35 */ 35 */
36#define KVM_NR_IRQCHIPS 1 36#define KVM_NR_IRQCHIPS 1
37#define KVM_IRQCHIP_NUM_PINS 4096 37#define KVM_IRQCHIP_NUM_PINS 4096
38#define KVM_HALT_POLL_NS_DEFAULT 0
38 39
39#define SIGP_CTRL_C 0x80 40#define SIGP_CTRL_C 0x80
40#define SIGP_CTRL_SCN_MASK 0x3f 41#define SIGP_CTRL_SCN_MASK 0x3f
@@ -210,6 +211,7 @@ struct kvm_vcpu_stat {
210 u32 exit_validity; 211 u32 exit_validity;
211 u32 exit_instruction; 212 u32 exit_instruction;
212 u32 halt_successful_poll; 213 u32 halt_successful_poll;
214 u32 halt_attempted_poll;
213 u32 halt_wakeup; 215 u32 halt_wakeup;
214 u32 instruction_lctl; 216 u32 instruction_lctl;
215 u32 instruction_lctlg; 217 u32 instruction_lctlg;
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 525cef73b085..02613bad8bbb 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -8,28 +8,8 @@
8 8
9#include <uapi/asm/unistd.h> 9#include <uapi/asm/unistd.h>
10 10
11
12#define __IGNORE_time 11#define __IGNORE_time
13 12
14/* Ignore system calls that are also reachable via sys_socketcall */
15#define __IGNORE_recvmmsg
16#define __IGNORE_sendmmsg
17#define __IGNORE_socket
18#define __IGNORE_socketpair
19#define __IGNORE_bind
20#define __IGNORE_connect
21#define __IGNORE_listen
22#define __IGNORE_accept4
23#define __IGNORE_getsockopt
24#define __IGNORE_setsockopt
25#define __IGNORE_getsockname
26#define __IGNORE_getpeername
27#define __IGNORE_sendto
28#define __IGNORE_sendmsg
29#define __IGNORE_recvfrom
30#define __IGNORE_recvmsg
31#define __IGNORE_shutdown
32
33#define __ARCH_WANT_OLD_READDIR 13#define __ARCH_WANT_OLD_READDIR
34#define __ARCH_WANT_SYS_ALARM 14#define __ARCH_WANT_SYS_ALARM
35#define __ARCH_WANT_SYS_GETHOSTNAME 15#define __ARCH_WANT_SYS_GETHOSTNAME
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 59d2bb4e2d0c..a848adba1504 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -290,7 +290,26 @@
290#define __NR_s390_pci_mmio_write 352 290#define __NR_s390_pci_mmio_write 352
291#define __NR_s390_pci_mmio_read 353 291#define __NR_s390_pci_mmio_read 353
292#define __NR_execveat 354 292#define __NR_execveat 354
293#define NR_syscalls 355 293#define __NR_userfaultfd 355
294#define __NR_membarrier 356
295#define __NR_recvmmsg 357
296#define __NR_sendmmsg 358
297#define __NR_socket 359
298#define __NR_socketpair 360
299#define __NR_bind 361
300#define __NR_connect 362
301#define __NR_listen 363
302#define __NR_accept4 364
303#define __NR_getsockopt 365
304#define __NR_setsockopt 366
305#define __NR_getsockname 367
306#define __NR_getpeername 368
307#define __NR_sendto 369
308#define __NR_sendmsg 370
309#define __NR_recvfrom 371
310#define __NR_recvmsg 372
311#define __NR_shutdown 373
312#define NR_syscalls 374
294 313
295/* 314/*
296 * There are some system calls that are not present on 64 bit, some 315 * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index eb4664238613..e0f9d270b30f 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -48,6 +48,19 @@ typedef struct
48 struct ucontext32 uc; 48 struct ucontext32 uc;
49} rt_sigframe32; 49} rt_sigframe32;
50 50
51static inline void sigset_to_sigset32(unsigned long *set64,
52 compat_sigset_word *set32)
53{
54 set32[0] = (compat_sigset_word) set64[0];
55 set32[1] = (compat_sigset_word)(set64[0] >> 32);
56}
57
58static inline void sigset32_to_sigset(compat_sigset_word *set32,
59 unsigned long *set64)
60{
61 set64[0] = (unsigned long) set32[0] | ((unsigned long) set32[1] << 32);
62}
63
51int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) 64int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
52{ 65{
53 int err; 66 int err;
@@ -281,10 +294,12 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
281{ 294{
282 struct pt_regs *regs = task_pt_regs(current); 295 struct pt_regs *regs = task_pt_regs(current);
283 sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15]; 296 sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
297 compat_sigset_t cset;
284 sigset_t set; 298 sigset_t set;
285 299
286 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32)) 300 if (__copy_from_user(&cset.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
287 goto badframe; 301 goto badframe;
302 sigset32_to_sigset(cset.sig, set.sig);
288 set_current_blocked(&set); 303 set_current_blocked(&set);
289 save_fpu_regs(); 304 save_fpu_regs();
290 if (restore_sigregs32(regs, &frame->sregs)) 305 if (restore_sigregs32(regs, &frame->sregs))
@@ -302,10 +317,12 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
302{ 317{
303 struct pt_regs *regs = task_pt_regs(current); 318 struct pt_regs *regs = task_pt_regs(current);
304 rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15]; 319 rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
320 compat_sigset_t cset;
305 sigset_t set; 321 sigset_t set;
306 322
307 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 323 if (__copy_from_user(&cset, &frame->uc.uc_sigmask, sizeof(cset)))
308 goto badframe; 324 goto badframe;
325 sigset32_to_sigset(cset.sig, set.sig);
309 set_current_blocked(&set); 326 set_current_blocked(&set);
310 if (compat_restore_altstack(&frame->uc.uc_stack)) 327 if (compat_restore_altstack(&frame->uc.uc_stack))
311 goto badframe; 328 goto badframe;
@@ -377,7 +394,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
377 return -EFAULT; 394 return -EFAULT;
378 395
379 /* Create struct sigcontext32 on the signal stack */ 396 /* Create struct sigcontext32 on the signal stack */
380 memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32); 397 sigset_to_sigset32(set->sig, sc.oldmask);
381 sc.sregs = (__u32)(unsigned long __force) &frame->sregs; 398 sc.sregs = (__u32)(unsigned long __force) &frame->sregs;
382 if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc))) 399 if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
383 return -EFAULT; 400 return -EFAULT;
@@ -438,6 +455,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
438static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set, 455static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
439 struct pt_regs *regs) 456 struct pt_regs *regs)
440{ 457{
458 compat_sigset_t cset;
441 rt_sigframe32 __user *frame; 459 rt_sigframe32 __user *frame;
442 unsigned long restorer; 460 unsigned long restorer;
443 size_t frame_size; 461 size_t frame_size;
@@ -485,11 +503,12 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
485 store_sigregs(); 503 store_sigregs();
486 504
487 /* Create ucontext on the signal stack. */ 505 /* Create ucontext on the signal stack. */
506 sigset_to_sigset32(set->sig, cset.sig);
488 if (__put_user(uc_flags, &frame->uc.uc_flags) || 507 if (__put_user(uc_flags, &frame->uc.uc_flags) ||
489 __put_user(0, &frame->uc.uc_link) || 508 __put_user(0, &frame->uc.uc_link) ||
490 __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) || 509 __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
491 save_sigregs32(regs, &frame->uc.uc_mcontext) || 510 save_sigregs32(regs, &frame->uc.uc_mcontext) ||
492 __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) || 511 __copy_to_user(&frame->uc.uc_sigmask, &cset, sizeof(cset)) ||
493 save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext)) 512 save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
494 return -EFAULT; 513 return -EFAULT;
495 514
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index f8498dde67b1..09f194052df3 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -52,15 +52,13 @@
52 * the regular system call wrappers. 52 * the regular system call wrappers.
53 */ 53 */
54#define COMPAT_SYSCALL_WRAPx(x, name, ...) \ 54#define COMPAT_SYSCALL_WRAPx(x, name, ...) \
55 asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ 55asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
56 asmlinkage long compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__));\ 56asmlinkage long notrace compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__));\
57 asmlinkage long compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__)) \ 57asmlinkage long notrace compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__)) \
58 { \ 58{ \
59 return sys##name(__MAP(x,__SC_COMPAT_CAST,__VA_ARGS__)); \ 59 return sys##name(__MAP(x,__SC_COMPAT_CAST,__VA_ARGS__)); \
60 } 60}
61 61
62COMPAT_SYSCALL_WRAP1(exit, int, error_code);
63COMPAT_SYSCALL_WRAP1(close, unsigned int, fd);
64COMPAT_SYSCALL_WRAP2(creat, const char __user *, pathname, umode_t, mode); 62COMPAT_SYSCALL_WRAP2(creat, const char __user *, pathname, umode_t, mode);
65COMPAT_SYSCALL_WRAP2(link, const char __user *, oldname, const char __user *, newname); 63COMPAT_SYSCALL_WRAP2(link, const char __user *, oldname, const char __user *, newname);
66COMPAT_SYSCALL_WRAP1(unlink, const char __user *, pathname); 64COMPAT_SYSCALL_WRAP1(unlink, const char __user *, pathname);
@@ -68,23 +66,16 @@ COMPAT_SYSCALL_WRAP1(chdir, const char __user *, filename);
68COMPAT_SYSCALL_WRAP3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev); 66COMPAT_SYSCALL_WRAP3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev);
69COMPAT_SYSCALL_WRAP2(chmod, const char __user *, filename, umode_t, mode); 67COMPAT_SYSCALL_WRAP2(chmod, const char __user *, filename, umode_t, mode);
70COMPAT_SYSCALL_WRAP1(oldumount, char __user *, name); 68COMPAT_SYSCALL_WRAP1(oldumount, char __user *, name);
71COMPAT_SYSCALL_WRAP1(alarm, unsigned int, seconds);
72COMPAT_SYSCALL_WRAP2(access, const char __user *, filename, int, mode); 69COMPAT_SYSCALL_WRAP2(access, const char __user *, filename, int, mode);
73COMPAT_SYSCALL_WRAP1(nice, int, increment);
74COMPAT_SYSCALL_WRAP2(kill, int, pid, int, sig);
75COMPAT_SYSCALL_WRAP2(rename, const char __user *, oldname, const char __user *, newname); 70COMPAT_SYSCALL_WRAP2(rename, const char __user *, oldname, const char __user *, newname);
76COMPAT_SYSCALL_WRAP2(mkdir, const char __user *, pathname, umode_t, mode); 71COMPAT_SYSCALL_WRAP2(mkdir, const char __user *, pathname, umode_t, mode);
77COMPAT_SYSCALL_WRAP1(rmdir, const char __user *, pathname); 72COMPAT_SYSCALL_WRAP1(rmdir, const char __user *, pathname);
78COMPAT_SYSCALL_WRAP1(dup, unsigned int, fildes);
79COMPAT_SYSCALL_WRAP1(pipe, int __user *, fildes); 73COMPAT_SYSCALL_WRAP1(pipe, int __user *, fildes);
80COMPAT_SYSCALL_WRAP1(brk, unsigned long, brk); 74COMPAT_SYSCALL_WRAP1(brk, unsigned long, brk);
81COMPAT_SYSCALL_WRAP2(signal, int, sig, __sighandler_t, handler); 75COMPAT_SYSCALL_WRAP2(signal, int, sig, __sighandler_t, handler);
82COMPAT_SYSCALL_WRAP1(acct, const char __user *, name); 76COMPAT_SYSCALL_WRAP1(acct, const char __user *, name);
83COMPAT_SYSCALL_WRAP2(umount, char __user *, name, int, flags); 77COMPAT_SYSCALL_WRAP2(umount, char __user *, name, int, flags);
84COMPAT_SYSCALL_WRAP2(setpgid, pid_t, pid, pid_t, pgid);
85COMPAT_SYSCALL_WRAP1(umask, int, mask);
86COMPAT_SYSCALL_WRAP1(chroot, const char __user *, filename); 78COMPAT_SYSCALL_WRAP1(chroot, const char __user *, filename);
87COMPAT_SYSCALL_WRAP2(dup2, unsigned int, oldfd, unsigned int, newfd);
88COMPAT_SYSCALL_WRAP3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask); 79COMPAT_SYSCALL_WRAP3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask);
89COMPAT_SYSCALL_WRAP2(sethostname, char __user *, name, int, len); 80COMPAT_SYSCALL_WRAP2(sethostname, char __user *, name, int, len);
90COMPAT_SYSCALL_WRAP2(symlink, const char __user *, old, const char __user *, new); 81COMPAT_SYSCALL_WRAP2(symlink, const char __user *, old, const char __user *, new);
@@ -93,37 +84,23 @@ COMPAT_SYSCALL_WRAP1(uselib, const char __user *, library);
93COMPAT_SYSCALL_WRAP2(swapon, const char __user *, specialfile, int, swap_flags); 84COMPAT_SYSCALL_WRAP2(swapon, const char __user *, specialfile, int, swap_flags);
94COMPAT_SYSCALL_WRAP4(reboot, int, magic1, int, magic2, unsigned int, cmd, void __user *, arg); 85COMPAT_SYSCALL_WRAP4(reboot, int, magic1, int, magic2, unsigned int, cmd, void __user *, arg);
95COMPAT_SYSCALL_WRAP2(munmap, unsigned long, addr, size_t, len); 86COMPAT_SYSCALL_WRAP2(munmap, unsigned long, addr, size_t, len);
96COMPAT_SYSCALL_WRAP2(fchmod, unsigned int, fd, umode_t, mode);
97COMPAT_SYSCALL_WRAP2(getpriority, int, which, int, who);
98COMPAT_SYSCALL_WRAP3(setpriority, int, which, int, who, int, niceval);
99COMPAT_SYSCALL_WRAP3(syslog, int, type, char __user *, buf, int, len); 87COMPAT_SYSCALL_WRAP3(syslog, int, type, char __user *, buf, int, len);
100COMPAT_SYSCALL_WRAP1(swapoff, const char __user *, specialfile); 88COMPAT_SYSCALL_WRAP1(swapoff, const char __user *, specialfile);
101COMPAT_SYSCALL_WRAP1(fsync, unsigned int, fd);
102COMPAT_SYSCALL_WRAP2(setdomainname, char __user *, name, int, len); 89COMPAT_SYSCALL_WRAP2(setdomainname, char __user *, name, int, len);
103COMPAT_SYSCALL_WRAP1(newuname, struct new_utsname __user *, name); 90COMPAT_SYSCALL_WRAP1(newuname, struct new_utsname __user *, name);
104COMPAT_SYSCALL_WRAP3(mprotect, unsigned long, start, size_t, len, unsigned long, prot); 91COMPAT_SYSCALL_WRAP3(mprotect, unsigned long, start, size_t, len, unsigned long, prot);
105COMPAT_SYSCALL_WRAP3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs); 92COMPAT_SYSCALL_WRAP3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs);
106COMPAT_SYSCALL_WRAP2(delete_module, const char __user *, name_user, unsigned int, flags); 93COMPAT_SYSCALL_WRAP2(delete_module, const char __user *, name_user, unsigned int, flags);
107COMPAT_SYSCALL_WRAP4(quotactl, unsigned int, cmd, const char __user *, special, qid_t, id, void __user *, addr); 94COMPAT_SYSCALL_WRAP4(quotactl, unsigned int, cmd, const char __user *, special, qid_t, id, void __user *, addr);
108COMPAT_SYSCALL_WRAP1(getpgid, pid_t, pid);
109COMPAT_SYSCALL_WRAP1(fchdir, unsigned int, fd);
110COMPAT_SYSCALL_WRAP2(bdflush, int, func, long, data); 95COMPAT_SYSCALL_WRAP2(bdflush, int, func, long, data);
111COMPAT_SYSCALL_WRAP3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2); 96COMPAT_SYSCALL_WRAP3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2);
112COMPAT_SYSCALL_WRAP1(s390_personality, unsigned int, personality);
113COMPAT_SYSCALL_WRAP5(llseek, unsigned int, fd, unsigned long, high, unsigned long, low, loff_t __user *, result, unsigned int, whence); 97COMPAT_SYSCALL_WRAP5(llseek, unsigned int, fd, unsigned long, high, unsigned long, low, loff_t __user *, result, unsigned int, whence);
114COMPAT_SYSCALL_WRAP2(flock, unsigned int, fd, unsigned int, cmd);
115COMPAT_SYSCALL_WRAP3(msync, unsigned long, start, size_t, len, int, flags); 98COMPAT_SYSCALL_WRAP3(msync, unsigned long, start, size_t, len, int, flags);
116COMPAT_SYSCALL_WRAP1(getsid, pid_t, pid);
117COMPAT_SYSCALL_WRAP1(fdatasync, unsigned int, fd);
118COMPAT_SYSCALL_WRAP2(mlock, unsigned long, start, size_t, len); 99COMPAT_SYSCALL_WRAP2(mlock, unsigned long, start, size_t, len);
119COMPAT_SYSCALL_WRAP2(munlock, unsigned long, start, size_t, len); 100COMPAT_SYSCALL_WRAP2(munlock, unsigned long, start, size_t, len);
120COMPAT_SYSCALL_WRAP1(mlockall, int, flags);
121COMPAT_SYSCALL_WRAP2(sched_setparam, pid_t, pid, struct sched_param __user *, param); 101COMPAT_SYSCALL_WRAP2(sched_setparam, pid_t, pid, struct sched_param __user *, param);
122COMPAT_SYSCALL_WRAP2(sched_getparam, pid_t, pid, struct sched_param __user *, param); 102COMPAT_SYSCALL_WRAP2(sched_getparam, pid_t, pid, struct sched_param __user *, param);
123COMPAT_SYSCALL_WRAP3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param); 103COMPAT_SYSCALL_WRAP3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param);
124COMPAT_SYSCALL_WRAP1(sched_getscheduler, pid_t, pid);
125COMPAT_SYSCALL_WRAP1(sched_get_priority_max, int, policy);
126COMPAT_SYSCALL_WRAP1(sched_get_priority_min, int, policy);
127COMPAT_SYSCALL_WRAP5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr); 104COMPAT_SYSCALL_WRAP5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr);
128COMPAT_SYSCALL_WRAP3(poll, struct pollfd __user *, ufds, unsigned int, nfds, int, timeout); 105COMPAT_SYSCALL_WRAP3(poll, struct pollfd __user *, ufds, unsigned int, nfds, int, timeout);
129COMPAT_SYSCALL_WRAP5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5); 106COMPAT_SYSCALL_WRAP5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5);
@@ -131,20 +108,11 @@ COMPAT_SYSCALL_WRAP2(getcwd, char __user *, buf, unsigned long, size);
131COMPAT_SYSCALL_WRAP2(capget, cap_user_header_t, header, cap_user_data_t, dataptr); 108COMPAT_SYSCALL_WRAP2(capget, cap_user_header_t, header, cap_user_data_t, dataptr);
132COMPAT_SYSCALL_WRAP2(capset, cap_user_header_t, header, const cap_user_data_t, data); 109COMPAT_SYSCALL_WRAP2(capset, cap_user_header_t, header, const cap_user_data_t, data);
133COMPAT_SYSCALL_WRAP3(lchown, const char __user *, filename, uid_t, user, gid_t, group); 110COMPAT_SYSCALL_WRAP3(lchown, const char __user *, filename, uid_t, user, gid_t, group);
134COMPAT_SYSCALL_WRAP2(setreuid, uid_t, ruid, uid_t, euid);
135COMPAT_SYSCALL_WRAP2(setregid, gid_t, rgid, gid_t, egid);
136COMPAT_SYSCALL_WRAP2(getgroups, int, gidsetsize, gid_t __user *, grouplist); 111COMPAT_SYSCALL_WRAP2(getgroups, int, gidsetsize, gid_t __user *, grouplist);
137COMPAT_SYSCALL_WRAP2(setgroups, int, gidsetsize, gid_t __user *, grouplist); 112COMPAT_SYSCALL_WRAP2(setgroups, int, gidsetsize, gid_t __user *, grouplist);
138COMPAT_SYSCALL_WRAP3(fchown, unsigned int, fd, uid_t, user, gid_t, group);
139COMPAT_SYSCALL_WRAP3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid);
140COMPAT_SYSCALL_WRAP3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid); 113COMPAT_SYSCALL_WRAP3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid);
141COMPAT_SYSCALL_WRAP3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid);
142COMPAT_SYSCALL_WRAP3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid); 114COMPAT_SYSCALL_WRAP3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid);
143COMPAT_SYSCALL_WRAP3(chown, const char __user *, filename, uid_t, user, gid_t, group); 115COMPAT_SYSCALL_WRAP3(chown, const char __user *, filename, uid_t, user, gid_t, group);
144COMPAT_SYSCALL_WRAP1(setuid, uid_t, uid);
145COMPAT_SYSCALL_WRAP1(setgid, gid_t, gid);
146COMPAT_SYSCALL_WRAP1(setfsuid, uid_t, uid);
147COMPAT_SYSCALL_WRAP1(setfsgid, gid_t, gid);
148COMPAT_SYSCALL_WRAP2(pivot_root, const char __user *, new_root, const char __user *, put_old); 116COMPAT_SYSCALL_WRAP2(pivot_root, const char __user *, new_root, const char __user *, put_old);
149COMPAT_SYSCALL_WRAP3(mincore, unsigned long, start, size_t, len, unsigned char __user *, vec); 117COMPAT_SYSCALL_WRAP3(mincore, unsigned long, start, size_t, len, unsigned char __user *, vec);
150COMPAT_SYSCALL_WRAP3(madvise, unsigned long, start, size_t, len, int, behavior); 118COMPAT_SYSCALL_WRAP3(madvise, unsigned long, start, size_t, len, int, behavior);
@@ -161,23 +129,16 @@ COMPAT_SYSCALL_WRAP3(flistxattr, int, fd, char __user *, list, size_t, size);
161COMPAT_SYSCALL_WRAP2(removexattr, const char __user *, path, const char __user *, name); 129COMPAT_SYSCALL_WRAP2(removexattr, const char __user *, path, const char __user *, name);
162COMPAT_SYSCALL_WRAP2(lremovexattr, const char __user *, path, const char __user *, name); 130COMPAT_SYSCALL_WRAP2(lremovexattr, const char __user *, path, const char __user *, name);
163COMPAT_SYSCALL_WRAP2(fremovexattr, int, fd, const char __user *, name); 131COMPAT_SYSCALL_WRAP2(fremovexattr, int, fd, const char __user *, name);
164COMPAT_SYSCALL_WRAP1(exit_group, int, error_code);
165COMPAT_SYSCALL_WRAP1(set_tid_address, int __user *, tidptr); 132COMPAT_SYSCALL_WRAP1(set_tid_address, int __user *, tidptr);
166COMPAT_SYSCALL_WRAP1(epoll_create, int, size);
167COMPAT_SYSCALL_WRAP4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event); 133COMPAT_SYSCALL_WRAP4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event);
168COMPAT_SYSCALL_WRAP4(epoll_wait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout); 134COMPAT_SYSCALL_WRAP4(epoll_wait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout);
169COMPAT_SYSCALL_WRAP1(timer_getoverrun, timer_t, timer_id);
170COMPAT_SYSCALL_WRAP1(timer_delete, compat_timer_t, compat_timer_id);
171COMPAT_SYSCALL_WRAP1(io_destroy, aio_context_t, ctx); 135COMPAT_SYSCALL_WRAP1(io_destroy, aio_context_t, ctx);
172COMPAT_SYSCALL_WRAP3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, struct io_event __user *, result); 136COMPAT_SYSCALL_WRAP3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, struct io_event __user *, result);
173COMPAT_SYSCALL_WRAP1(mq_unlink, const char __user *, name); 137COMPAT_SYSCALL_WRAP1(mq_unlink, const char __user *, name);
174COMPAT_SYSCALL_WRAP5(add_key, const char __user *, tp, const char __user *, dsc, const void __user *, pld, size_t, len, key_serial_t, id); 138COMPAT_SYSCALL_WRAP5(add_key, const char __user *, tp, const char __user *, dsc, const void __user *, pld, size_t, len, key_serial_t, id);
175COMPAT_SYSCALL_WRAP4(request_key, const char __user *, tp, const char __user *, dsc, const char __user *, info, key_serial_t, id); 139COMPAT_SYSCALL_WRAP4(request_key, const char __user *, tp, const char __user *, dsc, const char __user *, info, key_serial_t, id);
176COMPAT_SYSCALL_WRAP5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags); 140COMPAT_SYSCALL_WRAP5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags);
177COMPAT_SYSCALL_WRAP3(ioprio_set, int, which, int, who, int, ioprio);
178COMPAT_SYSCALL_WRAP2(ioprio_get, int, which, int, who);
179COMPAT_SYSCALL_WRAP3(inotify_add_watch, int, fd, const char __user *, path, u32, mask); 141COMPAT_SYSCALL_WRAP3(inotify_add_watch, int, fd, const char __user *, path, u32, mask);
180COMPAT_SYSCALL_WRAP2(inotify_rm_watch, int, fd, __s32, wd);
181COMPAT_SYSCALL_WRAP3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode); 142COMPAT_SYSCALL_WRAP3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode);
182COMPAT_SYSCALL_WRAP4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, unsigned, dev); 143COMPAT_SYSCALL_WRAP4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, unsigned, dev);
183COMPAT_SYSCALL_WRAP5(fchownat, int, dfd, const char __user *, filename, uid_t, user, gid_t, group, int, flag); 144COMPAT_SYSCALL_WRAP5(fchownat, int, dfd, const char __user *, filename, uid_t, user, gid_t, group, int, flag);
@@ -192,23 +153,11 @@ COMPAT_SYSCALL_WRAP1(unshare, unsigned long, unshare_flags);
192COMPAT_SYSCALL_WRAP6(splice, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags); 153COMPAT_SYSCALL_WRAP6(splice, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);
193COMPAT_SYSCALL_WRAP4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags); 154COMPAT_SYSCALL_WRAP4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags);
194COMPAT_SYSCALL_WRAP3(getcpu, unsigned __user *, cpu, unsigned __user *, node, struct getcpu_cache __user *, cache); 155COMPAT_SYSCALL_WRAP3(getcpu, unsigned __user *, cpu, unsigned __user *, node, struct getcpu_cache __user *, cache);
195COMPAT_SYSCALL_WRAP1(eventfd, unsigned int, count);
196COMPAT_SYSCALL_WRAP2(timerfd_create, int, clockid, int, flags);
197COMPAT_SYSCALL_WRAP2(eventfd2, unsigned int, count, int, flags);
198COMPAT_SYSCALL_WRAP1(inotify_init1, int, flags);
199COMPAT_SYSCALL_WRAP2(pipe2, int __user *, fildes, int, flags); 156COMPAT_SYSCALL_WRAP2(pipe2, int __user *, fildes, int, flags);
200COMPAT_SYSCALL_WRAP3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags);
201COMPAT_SYSCALL_WRAP1(epoll_create1, int, flags);
202COMPAT_SYSCALL_WRAP2(tkill, int, pid, int, sig);
203COMPAT_SYSCALL_WRAP3(tgkill, int, tgid, int, pid, int, sig);
204COMPAT_SYSCALL_WRAP5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags); 157COMPAT_SYSCALL_WRAP5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags);
205COMPAT_SYSCALL_WRAP5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls); 158COMPAT_SYSCALL_WRAP5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls);
206COMPAT_SYSCALL_WRAP2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags);
207COMPAT_SYSCALL_WRAP4(prlimit64, pid_t, pid, unsigned int, resource, const struct rlimit64 __user *, new_rlim, struct rlimit64 __user *, old_rlim); 159COMPAT_SYSCALL_WRAP4(prlimit64, pid_t, pid, unsigned int, resource, const struct rlimit64 __user *, new_rlim, struct rlimit64 __user *, old_rlim);
208COMPAT_SYSCALL_WRAP5(name_to_handle_at, int, dfd, const char __user *, name, struct file_handle __user *, handle, int __user *, mnt_id, int, flag); 160COMPAT_SYSCALL_WRAP5(name_to_handle_at, int, dfd, const char __user *, name, struct file_handle __user *, handle, int __user *, mnt_id, int, flag);
209COMPAT_SYSCALL_WRAP1(syncfs, int, fd);
210COMPAT_SYSCALL_WRAP2(setns, int, fd, int, nstype);
211COMPAT_SYSCALL_WRAP2(s390_runtime_instr, int, command, int, signum);
212COMPAT_SYSCALL_WRAP5(kcmp, pid_t, pid1, pid_t, pid2, int, type, unsigned long, idx1, unsigned long, idx2); 161COMPAT_SYSCALL_WRAP5(kcmp, pid_t, pid1, pid_t, pid2, int, type, unsigned long, idx1, unsigned long, idx2);
213COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, flags); 162COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, flags);
214COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags); 163COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags);
@@ -220,3 +169,10 @@ COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, fla
220COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size); 169COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size);
221COMPAT_SYSCALL_WRAP3(s390_pci_mmio_write, const unsigned long, mmio_addr, const void __user *, user_buffer, const size_t, length); 170COMPAT_SYSCALL_WRAP3(s390_pci_mmio_write, const unsigned long, mmio_addr, const void __user *, user_buffer, const size_t, length);
222COMPAT_SYSCALL_WRAP3(s390_pci_mmio_read, const unsigned long, mmio_addr, void __user *, user_buffer, const size_t, length); 171COMPAT_SYSCALL_WRAP3(s390_pci_mmio_read, const unsigned long, mmio_addr, void __user *, user_buffer, const size_t, length);
172COMPAT_SYSCALL_WRAP4(socketpair, int, family, int, type, int, protocol, int __user *, usockvec);
173COMPAT_SYSCALL_WRAP3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen);
174COMPAT_SYSCALL_WRAP3(connect, int, fd, struct sockaddr __user *, uservaddr, int, addrlen);
175COMPAT_SYSCALL_WRAP4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen, int, flags);
176COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
177COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
178COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 247b7aae4c6d..09b039d7983d 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -1191,6 +1191,7 @@ cleanup_critical:
1191 clg %r9,BASED(.Lcleanup_save_fpu_fpc_end) 1191 clg %r9,BASED(.Lcleanup_save_fpu_fpc_end)
1192 jhe 1f 1192 jhe 1f
1193 lg %r2,__LC_CURRENT 1193 lg %r2,__LC_CURRENT
1194 aghi %r2,__TASK_thread
11940: # Store floating-point controls 11950: # Store floating-point controls
1195 stfpc __THREAD_FPU_fpc(%r2) 1196 stfpc __THREAD_FPU_fpc(%r2)
11961: # Load register save area and check if VX is active 11971: # Load register save area and check if VX is active
@@ -1252,6 +1253,7 @@ cleanup_critical:
1252 clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl) 1253 clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl)
1253 jhe 6f 1254 jhe 6f
1254 lg %r4,__LC_CURRENT 1255 lg %r4,__LC_CURRENT
1256 aghi %r4,__TASK_thread
1255 lfpc __THREAD_FPU_fpc(%r4) 1257 lfpc __THREAD_FPU_fpc(%r4)
1256 tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? 1258 tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ?
1257 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area 1259 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 56fdad479115..a9563409c36e 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -157,10 +157,14 @@ static int validate_ctr_auth(const struct hw_perf_event *hwc)
157 157
158 cpuhw = &get_cpu_var(cpu_hw_events); 158 cpuhw = &get_cpu_var(cpu_hw_events);
159 159
160 /* check authorization for cpu counter sets */ 160 /* Check authorization for cpu counter sets.
161 * If the particular CPU counter set is not authorized,
162 * return with -ENOENT in order to fall back to other
163 * PMUs that might suffice the event request.
164 */
161 ctrs_state = cpumf_state_ctl[hwc->config_base]; 165 ctrs_state = cpumf_state_ctl[hwc->config_base];
162 if (!(ctrs_state & cpuhw->info.auth_ctl)) 166 if (!(ctrs_state & cpuhw->info.auth_ctl))
163 err = -EPERM; 167 err = -ENOENT;
164 168
165 put_cpu_var(cpu_hw_events); 169 put_cpu_var(cpu_hw_events);
166 return err; 170 return err;
@@ -536,7 +540,7 @@ static int cpumf_pmu_add(struct perf_event *event, int flags)
536 */ 540 */
537 if (!(cpuhw->flags & PERF_EVENT_TXN)) 541 if (!(cpuhw->flags & PERF_EVENT_TXN))
538 if (validate_ctr_auth(&event->hw)) 542 if (validate_ctr_auth(&event->hw))
539 return -EPERM; 543 return -ENOENT;
540 544
541 ctr_set_enable(&cpuhw->state, event->hw.config_base); 545 ctr_set_enable(&cpuhw->state, event->hw.config_base);
542 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; 546 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
@@ -611,7 +615,7 @@ static int cpumf_pmu_commit_txn(struct pmu *pmu)
611 state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); 615 state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
612 state >>= CPUMF_LCCTL_ENABLE_SHIFT; 616 state >>= CPUMF_LCCTL_ENABLE_SHIFT;
613 if ((state & cpuhw->info.auth_ctl) != state) 617 if ((state & cpuhw->info.auth_ctl) != state)
614 return -EPERM; 618 return -ENOENT;
615 619
616 cpuhw->flags &= ~PERF_EVENT_TXN; 620 cpuhw->flags &= ~PERF_EVENT_TXN;
617 perf_pmu_enable(pmu); 621 perf_pmu_enable(pmu);
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index ca6294645dd3..2d6b6e81f812 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -30,6 +30,9 @@ ENTRY(swsusp_arch_suspend)
30 aghi %r15,-STACK_FRAME_OVERHEAD 30 aghi %r15,-STACK_FRAME_OVERHEAD
31 stg %r1,__SF_BACKCHAIN(%r15) 31 stg %r1,__SF_BACKCHAIN(%r15)
32 32
33 /* Store FPU registers */
34 brasl %r14,save_fpu_regs
35
33 /* Deactivate DAT */ 36 /* Deactivate DAT */
34 stnsm __SF_EMPTY(%r15),0xfb 37 stnsm __SF_EMPTY(%r15),0xfb
35 38
@@ -47,23 +50,6 @@ ENTRY(swsusp_arch_suspend)
47 50
48 /* Store registers */ 51 /* Store registers */
49 mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ 52 mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */
50 stfpc 0x31c(%r1) /* store fpu control */
51 std 0,0x200(%r1) /* store f0 */
52 std 1,0x208(%r1) /* store f1 */
53 std 2,0x210(%r1) /* store f2 */
54 std 3,0x218(%r1) /* store f3 */
55 std 4,0x220(%r1) /* store f4 */
56 std 5,0x228(%r1) /* store f5 */
57 std 6,0x230(%r1) /* store f6 */
58 std 7,0x238(%r1) /* store f7 */
59 std 8,0x240(%r1) /* store f8 */
60 std 9,0x248(%r1) /* store f9 */
61 std 10,0x250(%r1) /* store f10 */
62 std 11,0x258(%r1) /* store f11 */
63 std 12,0x260(%r1) /* store f12 */
64 std 13,0x268(%r1) /* store f13 */
65 std 14,0x270(%r1) /* store f14 */
66 std 15,0x278(%r1) /* store f15 */
67 stam %a0,%a15,0x340(%r1) /* store access registers */ 53 stam %a0,%a15,0x340(%r1) /* store access registers */
68 stctg %c0,%c15,0x380(%r1) /* store control registers */ 54 stctg %c0,%c15,0x380(%r1) /* store control registers */
69 stmg %r0,%r15,0x280(%r1) /* store general registers */ 55 stmg %r0,%r15,0x280(%r1) /* store general registers */
@@ -249,24 +235,6 @@ restore_registers:
249 lctlg %c0,%c15,0x380(%r13) /* load control registers */ 235 lctlg %c0,%c15,0x380(%r13) /* load control registers */
250 lam %a0,%a15,0x340(%r13) /* load access registers */ 236 lam %a0,%a15,0x340(%r13) /* load access registers */
251 237
252 lfpc 0x31c(%r13) /* load fpu control */
253 ld 0,0x200(%r13) /* load f0 */
254 ld 1,0x208(%r13) /* load f1 */
255 ld 2,0x210(%r13) /* load f2 */
256 ld 3,0x218(%r13) /* load f3 */
257 ld 4,0x220(%r13) /* load f4 */
258 ld 5,0x228(%r13) /* load f5 */
259 ld 6,0x230(%r13) /* load f6 */
260 ld 7,0x238(%r13) /* load f7 */
261 ld 8,0x240(%r13) /* load f8 */
262 ld 9,0x248(%r13) /* load f9 */
263 ld 10,0x250(%r13) /* load f10 */
264 ld 11,0x258(%r13) /* load f11 */
265 ld 12,0x260(%r13) /* load f12 */
266 ld 13,0x268(%r13) /* load f13 */
267 ld 14,0x270(%r13) /* load f14 */
268 ld 15,0x278(%r13) /* load f15 */
269
270 /* Load old stack */ 238 /* Load old stack */
271 lg %r15,0x2f8(%r13) 239 lg %r15,0x2f8(%r13)
272 240
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index f3f4a137aef6..8c56929c8d82 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -9,12 +9,12 @@
9#define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall) 9#define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall)
10 10
11NI_SYSCALL /* 0 */ 11NI_SYSCALL /* 0 */
12SYSCALL(sys_exit,compat_sys_exit) 12SYSCALL(sys_exit,sys_exit)
13SYSCALL(sys_fork,sys_fork) 13SYSCALL(sys_fork,sys_fork)
14SYSCALL(sys_read,compat_sys_s390_read) 14SYSCALL(sys_read,compat_sys_s390_read)
15SYSCALL(sys_write,compat_sys_s390_write) 15SYSCALL(sys_write,compat_sys_s390_write)
16SYSCALL(sys_open,compat_sys_open) /* 5 */ 16SYSCALL(sys_open,compat_sys_open) /* 5 */
17SYSCALL(sys_close,compat_sys_close) 17SYSCALL(sys_close,sys_close)
18SYSCALL(sys_restart_syscall,sys_restart_syscall) 18SYSCALL(sys_restart_syscall,sys_restart_syscall)
19SYSCALL(sys_creat,compat_sys_creat) 19SYSCALL(sys_creat,compat_sys_creat)
20SYSCALL(sys_link,compat_sys_link) 20SYSCALL(sys_link,compat_sys_link)
@@ -35,21 +35,21 @@ SYSCALL(sys_ni_syscall,compat_sys_s390_setuid16) /* old setuid16 syscall*/
35SYSCALL(sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/ 35SYSCALL(sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/
36SYSCALL(sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */ 36SYSCALL(sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */
37SYSCALL(sys_ptrace,compat_sys_ptrace) 37SYSCALL(sys_ptrace,compat_sys_ptrace)
38SYSCALL(sys_alarm,compat_sys_alarm) 38SYSCALL(sys_alarm,sys_alarm)
39NI_SYSCALL /* old fstat syscall */ 39NI_SYSCALL /* old fstat syscall */
40SYSCALL(sys_pause,sys_pause) 40SYSCALL(sys_pause,sys_pause)
41SYSCALL(sys_utime,compat_sys_utime) /* 30 */ 41SYSCALL(sys_utime,compat_sys_utime) /* 30 */
42NI_SYSCALL /* old stty syscall */ 42NI_SYSCALL /* old stty syscall */
43NI_SYSCALL /* old gtty syscall */ 43NI_SYSCALL /* old gtty syscall */
44SYSCALL(sys_access,compat_sys_access) 44SYSCALL(sys_access,compat_sys_access)
45SYSCALL(sys_nice,compat_sys_nice) 45SYSCALL(sys_nice,sys_nice)
46NI_SYSCALL /* 35 old ftime syscall */ 46NI_SYSCALL /* 35 old ftime syscall */
47SYSCALL(sys_sync,sys_sync) 47SYSCALL(sys_sync,sys_sync)
48SYSCALL(sys_kill,compat_sys_kill) 48SYSCALL(sys_kill,sys_kill)
49SYSCALL(sys_rename,compat_sys_rename) 49SYSCALL(sys_rename,compat_sys_rename)
50SYSCALL(sys_mkdir,compat_sys_mkdir) 50SYSCALL(sys_mkdir,compat_sys_mkdir)
51SYSCALL(sys_rmdir,compat_sys_rmdir) /* 40 */ 51SYSCALL(sys_rmdir,compat_sys_rmdir) /* 40 */
52SYSCALL(sys_dup,compat_sys_dup) 52SYSCALL(sys_dup,sys_dup)
53SYSCALL(sys_pipe,compat_sys_pipe) 53SYSCALL(sys_pipe,compat_sys_pipe)
54SYSCALL(sys_times,compat_sys_times) 54SYSCALL(sys_times,compat_sys_times)
55NI_SYSCALL /* old prof syscall */ 55NI_SYSCALL /* old prof syscall */
@@ -65,13 +65,13 @@ NI_SYSCALL /* old lock syscall */
65SYSCALL(sys_ioctl,compat_sys_ioctl) 65SYSCALL(sys_ioctl,compat_sys_ioctl)
66SYSCALL(sys_fcntl,compat_sys_fcntl) /* 55 */ 66SYSCALL(sys_fcntl,compat_sys_fcntl) /* 55 */
67NI_SYSCALL /* intel mpx syscall */ 67NI_SYSCALL /* intel mpx syscall */
68SYSCALL(sys_setpgid,compat_sys_setpgid) 68SYSCALL(sys_setpgid,sys_setpgid)
69NI_SYSCALL /* old ulimit syscall */ 69NI_SYSCALL /* old ulimit syscall */
70NI_SYSCALL /* old uname syscall */ 70NI_SYSCALL /* old uname syscall */
71SYSCALL(sys_umask,compat_sys_umask) /* 60 */ 71SYSCALL(sys_umask,sys_umask) /* 60 */
72SYSCALL(sys_chroot,compat_sys_chroot) 72SYSCALL(sys_chroot,compat_sys_chroot)
73SYSCALL(sys_ustat,compat_sys_ustat) 73SYSCALL(sys_ustat,compat_sys_ustat)
74SYSCALL(sys_dup2,compat_sys_dup2) 74SYSCALL(sys_dup2,sys_dup2)
75SYSCALL(sys_getppid,sys_getppid) 75SYSCALL(sys_getppid,sys_getppid)
76SYSCALL(sys_getpgrp,sys_getpgrp) /* 65 */ 76SYSCALL(sys_getpgrp,sys_getpgrp) /* 65 */
77SYSCALL(sys_setsid,sys_setsid) 77SYSCALL(sys_setsid,sys_setsid)
@@ -102,10 +102,10 @@ SYSCALL(sys_old_mmap,compat_sys_s390_old_mmap) /* 90 */
102SYSCALL(sys_munmap,compat_sys_munmap) 102SYSCALL(sys_munmap,compat_sys_munmap)
103SYSCALL(sys_truncate,compat_sys_truncate) 103SYSCALL(sys_truncate,compat_sys_truncate)
104SYSCALL(sys_ftruncate,compat_sys_ftruncate) 104SYSCALL(sys_ftruncate,compat_sys_ftruncate)
105SYSCALL(sys_fchmod,compat_sys_fchmod) 105SYSCALL(sys_fchmod,sys_fchmod)
106SYSCALL(sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/ 106SYSCALL(sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/
107SYSCALL(sys_getpriority,compat_sys_getpriority) 107SYSCALL(sys_getpriority,sys_getpriority)
108SYSCALL(sys_setpriority,compat_sys_setpriority) 108SYSCALL(sys_setpriority,sys_setpriority)
109NI_SYSCALL /* old profil syscall */ 109NI_SYSCALL /* old profil syscall */
110SYSCALL(sys_statfs,compat_sys_statfs) 110SYSCALL(sys_statfs,compat_sys_statfs)
111SYSCALL(sys_fstatfs,compat_sys_fstatfs) /* 100 */ 111SYSCALL(sys_fstatfs,compat_sys_fstatfs) /* 100 */
@@ -126,7 +126,7 @@ SYSCALL(sys_wait4,compat_sys_wait4)
126SYSCALL(sys_swapoff,compat_sys_swapoff) /* 115 */ 126SYSCALL(sys_swapoff,compat_sys_swapoff) /* 115 */
127SYSCALL(sys_sysinfo,compat_sys_sysinfo) 127SYSCALL(sys_sysinfo,compat_sys_sysinfo)
128SYSCALL(sys_s390_ipc,compat_sys_s390_ipc) 128SYSCALL(sys_s390_ipc,compat_sys_s390_ipc)
129SYSCALL(sys_fsync,compat_sys_fsync) 129SYSCALL(sys_fsync,sys_fsync)
130SYSCALL(sys_sigreturn,compat_sys_sigreturn) 130SYSCALL(sys_sigreturn,compat_sys_sigreturn)
131SYSCALL(sys_clone,compat_sys_clone) /* 120 */ 131SYSCALL(sys_clone,compat_sys_clone) /* 120 */
132SYSCALL(sys_setdomainname,compat_sys_setdomainname) 132SYSCALL(sys_setdomainname,compat_sys_setdomainname)
@@ -140,35 +140,35 @@ SYSCALL(sys_init_module,compat_sys_init_module)
140SYSCALL(sys_delete_module,compat_sys_delete_module) 140SYSCALL(sys_delete_module,compat_sys_delete_module)
141NI_SYSCALL /* 130: old get_kernel_syms */ 141NI_SYSCALL /* 130: old get_kernel_syms */
142SYSCALL(sys_quotactl,compat_sys_quotactl) 142SYSCALL(sys_quotactl,compat_sys_quotactl)
143SYSCALL(sys_getpgid,compat_sys_getpgid) 143SYSCALL(sys_getpgid,sys_getpgid)
144SYSCALL(sys_fchdir,compat_sys_fchdir) 144SYSCALL(sys_fchdir,sys_fchdir)
145SYSCALL(sys_bdflush,compat_sys_bdflush) 145SYSCALL(sys_bdflush,compat_sys_bdflush)
146SYSCALL(sys_sysfs,compat_sys_sysfs) /* 135 */ 146SYSCALL(sys_sysfs,compat_sys_sysfs) /* 135 */
147SYSCALL(sys_s390_personality,compat_sys_s390_personality) 147SYSCALL(sys_s390_personality,sys_s390_personality)
148NI_SYSCALL /* for afs_syscall */ 148NI_SYSCALL /* for afs_syscall */
149SYSCALL(sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */ 149SYSCALL(sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */
150SYSCALL(sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */ 150SYSCALL(sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */
151SYSCALL(sys_llseek,compat_sys_llseek) /* 140 */ 151SYSCALL(sys_llseek,compat_sys_llseek) /* 140 */
152SYSCALL(sys_getdents,compat_sys_getdents) 152SYSCALL(sys_getdents,compat_sys_getdents)
153SYSCALL(sys_select,compat_sys_select) 153SYSCALL(sys_select,compat_sys_select)
154SYSCALL(sys_flock,compat_sys_flock) 154SYSCALL(sys_flock,sys_flock)
155SYSCALL(sys_msync,compat_sys_msync) 155SYSCALL(sys_msync,compat_sys_msync)
156SYSCALL(sys_readv,compat_sys_readv) /* 145 */ 156SYSCALL(sys_readv,compat_sys_readv) /* 145 */
157SYSCALL(sys_writev,compat_sys_writev) 157SYSCALL(sys_writev,compat_sys_writev)
158SYSCALL(sys_getsid,compat_sys_getsid) 158SYSCALL(sys_getsid,sys_getsid)
159SYSCALL(sys_fdatasync,compat_sys_fdatasync) 159SYSCALL(sys_fdatasync,sys_fdatasync)
160SYSCALL(sys_sysctl,compat_sys_sysctl) 160SYSCALL(sys_sysctl,compat_sys_sysctl)
161SYSCALL(sys_mlock,compat_sys_mlock) /* 150 */ 161SYSCALL(sys_mlock,compat_sys_mlock) /* 150 */
162SYSCALL(sys_munlock,compat_sys_munlock) 162SYSCALL(sys_munlock,compat_sys_munlock)
163SYSCALL(sys_mlockall,compat_sys_mlockall) 163SYSCALL(sys_mlockall,sys_mlockall)
164SYSCALL(sys_munlockall,sys_munlockall) 164SYSCALL(sys_munlockall,sys_munlockall)
165SYSCALL(sys_sched_setparam,compat_sys_sched_setparam) 165SYSCALL(sys_sched_setparam,compat_sys_sched_setparam)
166SYSCALL(sys_sched_getparam,compat_sys_sched_getparam) /* 155 */ 166SYSCALL(sys_sched_getparam,compat_sys_sched_getparam) /* 155 */
167SYSCALL(sys_sched_setscheduler,compat_sys_sched_setscheduler) 167SYSCALL(sys_sched_setscheduler,compat_sys_sched_setscheduler)
168SYSCALL(sys_sched_getscheduler,compat_sys_sched_getscheduler) 168SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler)
169SYSCALL(sys_sched_yield,sys_sched_yield) 169SYSCALL(sys_sched_yield,sys_sched_yield)
170SYSCALL(sys_sched_get_priority_max,compat_sys_sched_get_priority_max) 170SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max)
171SYSCALL(sys_sched_get_priority_min,compat_sys_sched_get_priority_min) /* 160 */ 171SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min) /* 160 */
172SYSCALL(sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval) 172SYSCALL(sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval)
173SYSCALL(sys_nanosleep,compat_sys_nanosleep) 173SYSCALL(sys_nanosleep,compat_sys_nanosleep)
174SYSCALL(sys_mremap,compat_sys_mremap) 174SYSCALL(sys_mremap,compat_sys_mremap)
@@ -211,20 +211,20 @@ SYSCALL(sys_getuid,sys_getuid)
211SYSCALL(sys_getgid,sys_getgid) /* 200 */ 211SYSCALL(sys_getgid,sys_getgid) /* 200 */
212SYSCALL(sys_geteuid,sys_geteuid) 212SYSCALL(sys_geteuid,sys_geteuid)
213SYSCALL(sys_getegid,sys_getegid) 213SYSCALL(sys_getegid,sys_getegid)
214SYSCALL(sys_setreuid,compat_sys_setreuid) 214SYSCALL(sys_setreuid,sys_setreuid)
215SYSCALL(sys_setregid,compat_sys_setregid) 215SYSCALL(sys_setregid,sys_setregid)
216SYSCALL(sys_getgroups,compat_sys_getgroups) /* 205 */ 216SYSCALL(sys_getgroups,compat_sys_getgroups) /* 205 */
217SYSCALL(sys_setgroups,compat_sys_setgroups) 217SYSCALL(sys_setgroups,compat_sys_setgroups)
218SYSCALL(sys_fchown,compat_sys_fchown) 218SYSCALL(sys_fchown,sys_fchown)
219SYSCALL(sys_setresuid,compat_sys_setresuid) 219SYSCALL(sys_setresuid,sys_setresuid)
220SYSCALL(sys_getresuid,compat_sys_getresuid) 220SYSCALL(sys_getresuid,compat_sys_getresuid)
221SYSCALL(sys_setresgid,compat_sys_setresgid) /* 210 */ 221SYSCALL(sys_setresgid,sys_setresgid) /* 210 */
222SYSCALL(sys_getresgid,compat_sys_getresgid) 222SYSCALL(sys_getresgid,compat_sys_getresgid)
223SYSCALL(sys_chown,compat_sys_chown) 223SYSCALL(sys_chown,compat_sys_chown)
224SYSCALL(sys_setuid,compat_sys_setuid) 224SYSCALL(sys_setuid,sys_setuid)
225SYSCALL(sys_setgid,compat_sys_setgid) 225SYSCALL(sys_setgid,sys_setgid)
226SYSCALL(sys_setfsuid,compat_sys_setfsuid) /* 215 */ 226SYSCALL(sys_setfsuid,sys_setfsuid) /* 215 */
227SYSCALL(sys_setfsgid,compat_sys_setfsgid) 227SYSCALL(sys_setfsgid,sys_setfsgid)
228SYSCALL(sys_pivot_root,compat_sys_pivot_root) 228SYSCALL(sys_pivot_root,compat_sys_pivot_root)
229SYSCALL(sys_mincore,compat_sys_mincore) 229SYSCALL(sys_mincore,compat_sys_mincore)
230SYSCALL(sys_madvise,compat_sys_madvise) 230SYSCALL(sys_madvise,compat_sys_madvise)
@@ -245,19 +245,19 @@ SYSCALL(sys_removexattr,compat_sys_removexattr)
245SYSCALL(sys_lremovexattr,compat_sys_lremovexattr) 245SYSCALL(sys_lremovexattr,compat_sys_lremovexattr)
246SYSCALL(sys_fremovexattr,compat_sys_fremovexattr) /* 235 */ 246SYSCALL(sys_fremovexattr,compat_sys_fremovexattr) /* 235 */
247SYSCALL(sys_gettid,sys_gettid) 247SYSCALL(sys_gettid,sys_gettid)
248SYSCALL(sys_tkill,compat_sys_tkill) 248SYSCALL(sys_tkill,sys_tkill)
249SYSCALL(sys_futex,compat_sys_futex) 249SYSCALL(sys_futex,compat_sys_futex)
250SYSCALL(sys_sched_setaffinity,compat_sys_sched_setaffinity) 250SYSCALL(sys_sched_setaffinity,compat_sys_sched_setaffinity)
251SYSCALL(sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */ 251SYSCALL(sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */
252SYSCALL(sys_tgkill,compat_sys_tgkill) 252SYSCALL(sys_tgkill,sys_tgkill)
253NI_SYSCALL /* reserved for TUX */ 253NI_SYSCALL /* reserved for TUX */
254SYSCALL(sys_io_setup,compat_sys_io_setup) 254SYSCALL(sys_io_setup,compat_sys_io_setup)
255SYSCALL(sys_io_destroy,compat_sys_io_destroy) 255SYSCALL(sys_io_destroy,compat_sys_io_destroy)
256SYSCALL(sys_io_getevents,compat_sys_io_getevents) /* 245 */ 256SYSCALL(sys_io_getevents,compat_sys_io_getevents) /* 245 */
257SYSCALL(sys_io_submit,compat_sys_io_submit) 257SYSCALL(sys_io_submit,compat_sys_io_submit)
258SYSCALL(sys_io_cancel,compat_sys_io_cancel) 258SYSCALL(sys_io_cancel,compat_sys_io_cancel)
259SYSCALL(sys_exit_group,compat_sys_exit_group) 259SYSCALL(sys_exit_group,sys_exit_group)
260SYSCALL(sys_epoll_create,compat_sys_epoll_create) 260SYSCALL(sys_epoll_create,sys_epoll_create)
261SYSCALL(sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */ 261SYSCALL(sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */
262SYSCALL(sys_epoll_wait,compat_sys_epoll_wait) 262SYSCALL(sys_epoll_wait,compat_sys_epoll_wait)
263SYSCALL(sys_set_tid_address,compat_sys_set_tid_address) 263SYSCALL(sys_set_tid_address,compat_sys_set_tid_address)
@@ -265,8 +265,8 @@ SYSCALL(sys_fadvise64_64,compat_sys_s390_fadvise64)
265SYSCALL(sys_timer_create,compat_sys_timer_create) 265SYSCALL(sys_timer_create,compat_sys_timer_create)
266SYSCALL(sys_timer_settime,compat_sys_timer_settime) /* 255 */ 266SYSCALL(sys_timer_settime,compat_sys_timer_settime) /* 255 */
267SYSCALL(sys_timer_gettime,compat_sys_timer_gettime) 267SYSCALL(sys_timer_gettime,compat_sys_timer_gettime)
268SYSCALL(sys_timer_getoverrun,compat_sys_timer_getoverrun) 268SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun)
269SYSCALL(sys_timer_delete,compat_sys_timer_delete) 269SYSCALL(sys_timer_delete,sys_timer_delete)
270SYSCALL(sys_clock_settime,compat_sys_clock_settime) 270SYSCALL(sys_clock_settime,compat_sys_clock_settime)
271SYSCALL(sys_clock_gettime,compat_sys_clock_gettime) /* 260 */ 271SYSCALL(sys_clock_gettime,compat_sys_clock_gettime) /* 260 */
272SYSCALL(sys_clock_getres,compat_sys_clock_getres) 272SYSCALL(sys_clock_getres,compat_sys_clock_getres)
@@ -290,11 +290,11 @@ SYSCALL(sys_add_key,compat_sys_add_key)
290SYSCALL(sys_request_key,compat_sys_request_key) 290SYSCALL(sys_request_key,compat_sys_request_key)
291SYSCALL(sys_keyctl,compat_sys_keyctl) /* 280 */ 291SYSCALL(sys_keyctl,compat_sys_keyctl) /* 280 */
292SYSCALL(sys_waitid,compat_sys_waitid) 292SYSCALL(sys_waitid,compat_sys_waitid)
293SYSCALL(sys_ioprio_set,compat_sys_ioprio_set) 293SYSCALL(sys_ioprio_set,sys_ioprio_set)
294SYSCALL(sys_ioprio_get,compat_sys_ioprio_get) 294SYSCALL(sys_ioprio_get,sys_ioprio_get)
295SYSCALL(sys_inotify_init,sys_inotify_init) 295SYSCALL(sys_inotify_init,sys_inotify_init)
296SYSCALL(sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */ 296SYSCALL(sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */
297SYSCALL(sys_inotify_rm_watch,compat_sys_inotify_rm_watch) 297SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch)
298SYSCALL(sys_migrate_pages,compat_sys_migrate_pages) 298SYSCALL(sys_migrate_pages,compat_sys_migrate_pages)
299SYSCALL(sys_openat,compat_sys_openat) 299SYSCALL(sys_openat,compat_sys_openat)
300SYSCALL(sys_mkdirat,compat_sys_mkdirat) 300SYSCALL(sys_mkdirat,compat_sys_mkdirat)
@@ -326,31 +326,31 @@ SYSCALL(sys_fallocate,compat_sys_s390_fallocate)
326SYSCALL(sys_utimensat,compat_sys_utimensat) /* 315 */ 326SYSCALL(sys_utimensat,compat_sys_utimensat) /* 315 */
327SYSCALL(sys_signalfd,compat_sys_signalfd) 327SYSCALL(sys_signalfd,compat_sys_signalfd)
328NI_SYSCALL /* 317 old sys_timer_fd */ 328NI_SYSCALL /* 317 old sys_timer_fd */
329SYSCALL(sys_eventfd,compat_sys_eventfd) 329SYSCALL(sys_eventfd,sys_eventfd)
330SYSCALL(sys_timerfd_create,compat_sys_timerfd_create) 330SYSCALL(sys_timerfd_create,sys_timerfd_create)
331SYSCALL(sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */ 331SYSCALL(sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */
332SYSCALL(sys_timerfd_gettime,compat_sys_timerfd_gettime) 332SYSCALL(sys_timerfd_gettime,compat_sys_timerfd_gettime)
333SYSCALL(sys_signalfd4,compat_sys_signalfd4) 333SYSCALL(sys_signalfd4,compat_sys_signalfd4)
334SYSCALL(sys_eventfd2,compat_sys_eventfd2) 334SYSCALL(sys_eventfd2,sys_eventfd2)
335SYSCALL(sys_inotify_init1,compat_sys_inotify_init1) 335SYSCALL(sys_inotify_init1,sys_inotify_init1)
336SYSCALL(sys_pipe2,compat_sys_pipe2) /* 325 */ 336SYSCALL(sys_pipe2,compat_sys_pipe2) /* 325 */
337SYSCALL(sys_dup3,compat_sys_dup3) 337SYSCALL(sys_dup3,sys_dup3)
338SYSCALL(sys_epoll_create1,compat_sys_epoll_create1) 338SYSCALL(sys_epoll_create1,sys_epoll_create1)
339SYSCALL(sys_preadv,compat_sys_preadv) 339SYSCALL(sys_preadv,compat_sys_preadv)
340SYSCALL(sys_pwritev,compat_sys_pwritev) 340SYSCALL(sys_pwritev,compat_sys_pwritev)
341SYSCALL(sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */ 341SYSCALL(sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */
342SYSCALL(sys_perf_event_open,compat_sys_perf_event_open) 342SYSCALL(sys_perf_event_open,compat_sys_perf_event_open)
343SYSCALL(sys_fanotify_init,compat_sys_fanotify_init) 343SYSCALL(sys_fanotify_init,sys_fanotify_init)
344SYSCALL(sys_fanotify_mark,compat_sys_fanotify_mark) 344SYSCALL(sys_fanotify_mark,compat_sys_fanotify_mark)
345SYSCALL(sys_prlimit64,compat_sys_prlimit64) 345SYSCALL(sys_prlimit64,compat_sys_prlimit64)
346SYSCALL(sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */ 346SYSCALL(sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */
347SYSCALL(sys_open_by_handle_at,compat_sys_open_by_handle_at) 347SYSCALL(sys_open_by_handle_at,compat_sys_open_by_handle_at)
348SYSCALL(sys_clock_adjtime,compat_sys_clock_adjtime) 348SYSCALL(sys_clock_adjtime,compat_sys_clock_adjtime)
349SYSCALL(sys_syncfs,compat_sys_syncfs) 349SYSCALL(sys_syncfs,sys_syncfs)
350SYSCALL(sys_setns,compat_sys_setns) 350SYSCALL(sys_setns,sys_setns)
351SYSCALL(sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */ 351SYSCALL(sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */
352SYSCALL(sys_process_vm_writev,compat_sys_process_vm_writev) 352SYSCALL(sys_process_vm_writev,compat_sys_process_vm_writev)
353SYSCALL(sys_s390_runtime_instr,compat_sys_s390_runtime_instr) 353SYSCALL(sys_s390_runtime_instr,sys_s390_runtime_instr)
354SYSCALL(sys_kcmp,compat_sys_kcmp) 354SYSCALL(sys_kcmp,compat_sys_kcmp)
355SYSCALL(sys_finit_module,compat_sys_finit_module) 355SYSCALL(sys_finit_module,compat_sys_finit_module)
356SYSCALL(sys_sched_setattr,compat_sys_sched_setattr) /* 345 */ 356SYSCALL(sys_sched_setattr,compat_sys_sched_setattr) /* 345 */
@@ -363,3 +363,22 @@ SYSCALL(sys_bpf,compat_sys_bpf)
363SYSCALL(sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) 363SYSCALL(sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
364SYSCALL(sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) 364SYSCALL(sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
365SYSCALL(sys_execveat,compat_sys_execveat) 365SYSCALL(sys_execveat,compat_sys_execveat)
366SYSCALL(sys_userfaultfd,sys_userfaultfd) /* 355 */
367SYSCALL(sys_membarrier,sys_membarrier)
368SYSCALL(sys_recvmmsg,compat_sys_recvmmsg)
369SYSCALL(sys_sendmmsg,compat_sys_sendmmsg)
370SYSCALL(sys_socket,sys_socket)
371SYSCALL(sys_socketpair,compat_sys_socketpair) /* 360 */
372SYSCALL(sys_bind,sys_bind)
373SYSCALL(sys_connect,sys_connect)
374SYSCALL(sys_listen,sys_listen)
375SYSCALL(sys_accept4,sys_accept4)
376SYSCALL(sys_getsockopt,compat_sys_getsockopt) /* 365 */
377SYSCALL(sys_setsockopt,compat_sys_setsockopt)
378SYSCALL(sys_getsockname,compat_sys_getsockname)
379SYSCALL(sys_getpeername,compat_sys_getpeername)
380SYSCALL(sys_sendto,compat_sys_sendto)
381SYSCALL(sys_sendmsg,compat_sys_sendmsg) /* 370 */
382SYSCALL(sys_recvfrom,compat_sys_recvfrom)
383SYSCALL(sys_recvmsg,compat_sys_recvmsg)
384SYSCALL(sys_shutdown,sys_shutdown)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index b9ce650e9e99..c8653435c70d 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -89,17 +89,21 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
89 if (smp_cpu_mtid && 89 if (smp_cpu_mtid &&
90 time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) { 90 time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) {
91 u64 cycles_new[32], *cycles_old; 91 u64 cycles_new[32], *cycles_old;
92 u64 delta, mult, div; 92 u64 delta, fac, mult, div;
93 93
94 cycles_old = this_cpu_ptr(mt_cycles); 94 cycles_old = this_cpu_ptr(mt_cycles);
95 if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) { 95 if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
96 fac = 1;
96 mult = div = 0; 97 mult = div = 0;
97 for (i = 0; i <= smp_cpu_mtid; i++) { 98 for (i = 0; i <= smp_cpu_mtid; i++) {
98 delta = cycles_new[i] - cycles_old[i]; 99 delta = cycles_new[i] - cycles_old[i];
99 mult += delta; 100 div += delta;
100 div += (i + 1) * delta; 101 mult *= i + 1;
102 mult += delta * fac;
103 fac *= i + 1;
101 } 104 }
102 if (mult > 0) { 105 div *= fac;
106 if (div > 0) {
103 /* Update scaling factor */ 107 /* Update scaling factor */
104 __this_cpu_write(mt_scaling_mult, mult); 108 __this_cpu_write(mt_scaling_mult, mult);
105 __this_cpu_write(mt_scaling_div, div); 109 __this_cpu_write(mt_scaling_div, div);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c91eb941b444..0a67c40eece9 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -63,6 +63,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
63 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, 63 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
64 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, 64 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
65 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, 65 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
66 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
66 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 67 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
67 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, 68 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
68 { "instruction_lctl", VCPU_STAT(instruction_lctl) }, 69 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
@@ -1574,7 +1575,7 @@ static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1574 1575
1575static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) 1576static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1576{ 1577{
1577 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); 1578 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1578} 1579}
1579 1580
1580/* 1581/*
diff --git a/arch/sh/boards/mach-se/7343/irq.c b/arch/sh/boards/mach-se/7343/irq.c
index 6f97a8f0d0d6..6129aef6db76 100644
--- a/arch/sh/boards/mach-se/7343/irq.c
+++ b/arch/sh/boards/mach-se/7343/irq.c
@@ -29,7 +29,7 @@
29static void __iomem *se7343_irq_regs; 29static void __iomem *se7343_irq_regs;
30struct irq_domain *se7343_irq_domain; 30struct irq_domain *se7343_irq_domain;
31 31
32static void se7343_irq_demux(unsigned int irq, struct irq_desc *desc) 32static void se7343_irq_demux(struct irq_desc *desc)
33{ 33{
34 struct irq_data *data = irq_desc_get_irq_data(desc); 34 struct irq_data *data = irq_desc_get_irq_data(desc);
35 struct irq_chip *chip = irq_data_get_irq_chip(data); 35 struct irq_chip *chip = irq_data_get_irq_chip(data);
diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c
index 60aebd14ccf8..24c74a88290c 100644
--- a/arch/sh/boards/mach-se/7722/irq.c
+++ b/arch/sh/boards/mach-se/7722/irq.c
@@ -28,7 +28,7 @@
28static void __iomem *se7722_irq_regs; 28static void __iomem *se7722_irq_regs;
29struct irq_domain *se7722_irq_domain; 29struct irq_domain *se7722_irq_domain;
30 30
31static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc) 31static void se7722_irq_demux(struct irq_desc *desc)
32{ 32{
33 struct irq_data *data = irq_desc_get_irq_data(desc); 33 struct irq_data *data = irq_desc_get_irq_data(desc);
34 struct irq_chip *chip = irq_data_get_irq_chip(data); 34 struct irq_chip *chip = irq_data_get_irq_chip(data);
diff --git a/arch/sh/boards/mach-se/7724/irq.c b/arch/sh/boards/mach-se/7724/irq.c
index 9f2033898652..64e681e66c57 100644
--- a/arch/sh/boards/mach-se/7724/irq.c
+++ b/arch/sh/boards/mach-se/7724/irq.c
@@ -92,7 +92,7 @@ static struct irq_chip se7724_irq_chip __read_mostly = {
92 .irq_unmask = enable_se7724_irq, 92 .irq_unmask = enable_se7724_irq,
93}; 93};
94 94
95static void se7724_irq_demux(unsigned int __irq, struct irq_desc *desc) 95static void se7724_irq_demux(struct irq_desc *desc)
96{ 96{
97 unsigned int irq = irq_desc_get_irq(desc); 97 unsigned int irq = irq_desc_get_irq(desc);
98 struct fpga_irq set = get_fpga_irq(irq); 98 struct fpga_irq set = get_fpga_irq(irq);
diff --git a/arch/sh/boards/mach-x3proto/gpio.c b/arch/sh/boards/mach-x3proto/gpio.c
index 24555c364d5b..1fb2cbee25f2 100644
--- a/arch/sh/boards/mach-x3proto/gpio.c
+++ b/arch/sh/boards/mach-x3proto/gpio.c
@@ -60,7 +60,7 @@ static int x3proto_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
60 return virq; 60 return virq;
61} 61}
62 62
63static void x3proto_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) 63static void x3proto_gpio_irq_handler(struct irq_desc *desc)
64{ 64{
65 struct irq_data *data = irq_desc_get_irq_data(desc); 65 struct irq_data *data = irq_desc_get_irq_data(desc);
66 struct irq_chip *chip = irq_data_get_irq_chip(data); 66 struct irq_chip *chip = irq_data_get_irq_chip(data);
diff --git a/arch/sh/cchips/hd6446x/hd64461.c b/arch/sh/cchips/hd6446x/hd64461.c
index e9735616bdc8..8180092502f7 100644
--- a/arch/sh/cchips/hd6446x/hd64461.c
+++ b/arch/sh/cchips/hd6446x/hd64461.c
@@ -56,7 +56,7 @@ static struct irq_chip hd64461_irq_chip = {
56 .irq_unmask = hd64461_unmask_irq, 56 .irq_unmask = hd64461_unmask_irq,
57}; 57};
58 58
59static void hd64461_irq_demux(unsigned int irq, struct irq_desc *desc) 59static void hd64461_irq_demux(struct irq_desc *desc)
60{ 60{
61 unsigned short intv = __raw_readw(HD64461_NIRR); 61 unsigned short intv = __raw_readw(HD64461_NIRR);
62 unsigned int ext_irq = HD64461_IRQBASE; 62 unsigned int ext_irq = HD64461_IRQBASE;
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 0299f052a2ef..42efcf85f721 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -53,7 +53,7 @@ static inline unsigned int leon_eirq_get(int cpu)
53} 53}
54 54
55/* Handle one or multiple IRQs from the extended interrupt controller */ 55/* Handle one or multiple IRQs from the extended interrupt controller */
56static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc) 56static void leon_handle_ext_irq(struct irq_desc *desc)
57{ 57{
58 unsigned int eirq; 58 unsigned int eirq;
59 struct irq_bucket *p; 59 struct irq_bucket *p;
diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c
index 3382f7b3eeef..1e77128a8f88 100644
--- a/arch/sparc/kernel/leon_pci_grpci1.c
+++ b/arch/sparc/kernel/leon_pci_grpci1.c
@@ -357,7 +357,7 @@ static struct irq_chip grpci1_irq = {
357}; 357};
358 358
359/* Handle one or multiple IRQs from the PCI core */ 359/* Handle one or multiple IRQs from the PCI core */
360static void grpci1_pci_flow_irq(unsigned int irq, struct irq_desc *desc) 360static void grpci1_pci_flow_irq(struct irq_desc *desc)
361{ 361{
362 struct grpci1_priv *priv = grpci1priv; 362 struct grpci1_priv *priv = grpci1priv;
363 int i, ack = 0; 363 int i, ack = 0;
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
index 814fb1729b12..f727c4de1316 100644
--- a/arch/sparc/kernel/leon_pci_grpci2.c
+++ b/arch/sparc/kernel/leon_pci_grpci2.c
@@ -498,7 +498,7 @@ static struct irq_chip grpci2_irq = {
498}; 498};
499 499
500/* Handle one or multiple IRQs from the PCI core */ 500/* Handle one or multiple IRQs from the PCI core */
501static void grpci2_pci_flow_irq(unsigned int irq, struct irq_desc *desc) 501static void grpci2_pci_flow_irq(struct irq_desc *desc)
502{ 502{
503 struct grpci2_priv *priv = grpci2priv; 503 struct grpci2_priv *priv = grpci2priv;
504 int i, ack = 0; 504 int i, ack = 0;
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
index b3f73fd764a3..4c017d0d2de8 100644
--- a/arch/tile/kernel/pci_gx.c
+++ b/arch/tile/kernel/pci_gx.c
@@ -304,17 +304,16 @@ static struct irq_chip tilegx_legacy_irq_chip = {
304 * to Linux which just calls handle_level_irq() after clearing the 304 * to Linux which just calls handle_level_irq() after clearing the
305 * MAC INTx Assert status bit associated with this interrupt. 305 * MAC INTx Assert status bit associated with this interrupt.
306 */ 306 */
307static void trio_handle_level_irq(unsigned int __irq, struct irq_desc *desc) 307static void trio_handle_level_irq(struct irq_desc *desc)
308{ 308{
309 struct pci_controller *controller = irq_desc_get_handler_data(desc); 309 struct pci_controller *controller = irq_desc_get_handler_data(desc);
310 gxio_trio_context_t *trio_context = controller->trio; 310 gxio_trio_context_t *trio_context = controller->trio;
311 uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc); 311 uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc);
312 unsigned int irq = irq_desc_get_irq(desc);
313 int mac = controller->mac; 312 int mac = controller->mac;
314 unsigned int reg_offset; 313 unsigned int reg_offset;
315 uint64_t level_mask; 314 uint64_t level_mask;
316 315
317 handle_level_irq(irq, desc); 316 handle_level_irq(desc);
318 317
319 /* 318 /*
320 * Clear the INTx Level status, otherwise future interrupts are 319 * Clear the INTx Level status, otherwise future interrupts are
diff --git a/arch/unicore32/kernel/irq.c b/arch/unicore32/kernel/irq.c
index c53729d92e8d..eb1fd0030359 100644
--- a/arch/unicore32/kernel/irq.c
+++ b/arch/unicore32/kernel/irq.c
@@ -112,7 +112,7 @@ static struct irq_chip puv3_low_gpio_chip = {
112 * irq_controller_lock held, and IRQs disabled. Decode the IRQ 112 * irq_controller_lock held, and IRQs disabled. Decode the IRQ
113 * and call the handler. 113 * and call the handler.
114 */ 114 */
115static void puv3_gpio_handler(unsigned int __irq, struct irq_desc *desc) 115static void puv3_gpio_handler(struct irq_desc *desc)
116{ 116{
117 unsigned int mask, irq; 117 unsigned int mask, irq;
118 118
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7aef2d52daa0..328c8352480c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1006,7 +1006,7 @@ config X86_THERMAL_VECTOR
1006 depends on X86_MCE_INTEL 1006 depends on X86_MCE_INTEL
1007 1007
1008config X86_LEGACY_VM86 1008config X86_LEGACY_VM86
1009 bool "Legacy VM86 support (obsolete)" 1009 bool "Legacy VM86 support"
1010 default n 1010 default n
1011 depends on X86_32 1011 depends on X86_32
1012 ---help--- 1012 ---help---
@@ -1018,19 +1018,20 @@ config X86_LEGACY_VM86
1018 available to accelerate real mode DOS programs. However, any 1018 available to accelerate real mode DOS programs. However, any
1019 recent version of DOSEMU, X, or vbetool should be fully 1019 recent version of DOSEMU, X, or vbetool should be fully
1020 functional even without kernel VM86 support, as they will all 1020 functional even without kernel VM86 support, as they will all
1021 fall back to (pretty well performing) software emulation. 1021 fall back to software emulation. Nevertheless, if you are using
1022 a 16-bit DOS program where 16-bit performance matters, vm86
1023 mode might be faster than emulation and you might want to
1024 enable this option.
1022 1025
1023 Anything that works on a 64-bit kernel is unlikely to need 1026 Note that any app that works on a 64-bit kernel is unlikely to
1024 this option, as 64-bit kernels don't, and can't, support V8086 1027 need this option, as 64-bit kernels don't, and can't, support
1025 mode. This option is also unrelated to 16-bit protected mode 1028 V8086 mode. This option is also unrelated to 16-bit protected
1026 and is not needed to run most 16-bit programs under Wine. 1029 mode and is not needed to run most 16-bit programs under Wine.
1027 1030
1028 Enabling this option adds considerable attack surface to the 1031 Enabling this option increases the complexity of the kernel
1029 kernel and slows down system calls and exception handling. 1032 and slows down exception handling a tiny bit.
1030 1033
1031 Unless you use very old userspace or need the last drop of 1034 If unsure, say N here.
1032 performance in your real mode DOS games and can't use KVM,
1033 say N here.
1034 1035
1035config VM86 1036config VM86
1036 bool 1037 bool
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 477bfa6db370..7663c455b9f6 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -381,3 +381,4 @@
381372 i386 recvmsg sys_recvmsg compat_sys_recvmsg 381372 i386 recvmsg sys_recvmsg compat_sys_recvmsg
382373 i386 shutdown sys_shutdown 382373 i386 shutdown sys_shutdown
383374 i386 userfaultfd sys_userfaultfd 383374 i386 userfaultfd sys_userfaultfd
384375 i386 membarrier sys_membarrier
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 81c490634db9..278842fdf1f6 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -330,6 +330,7 @@
330321 common bpf sys_bpf 330321 common bpf sys_bpf
331322 64 execveat stub_execveat 331322 64 execveat stub_execveat
332323 common userfaultfd sys_userfaultfd 332323 common userfaultfd sys_userfaultfd
333324 common membarrier sys_membarrier
333 334
334# 335#
335# x32-specific system call numbers start at 512 to avoid cache impact 336# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 477fc28050e4..e6cf2ad350d1 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -241,6 +241,7 @@
241#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ 241#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
242#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ 242#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
243#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ 243#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
244#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
244 245
245/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ 246/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
246#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ 247#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 155162ea0e00..ab5f1d447ef9 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -86,6 +86,16 @@ extern u64 asmlinkage efi_call(void *fp, ...);
86extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, 86extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
87 u32 type, u64 attribute); 87 u32 type, u64 attribute);
88 88
89/*
90 * CONFIG_KASAN may redefine memset to __memset. __memset function is present
91 * only in kernel binary. Since the EFI stub linked into a separate binary it
92 * doesn't have __memset(). So we should use standard memset from
93 * arch/x86/boot/compressed/string.c. The same applies to memcpy and memmove.
94 */
95#undef memcpy
96#undef memset
97#undef memmove
98
89#endif /* CONFIG_X86_32 */ 99#endif /* CONFIG_X86_32 */
90 100
91extern struct efi_scratch efi_scratch; 101extern struct efi_scratch efi_scratch;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c12e845f59e6..2beee0382088 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -40,6 +40,7 @@
40 40
41#define KVM_PIO_PAGE_OFFSET 1 41#define KVM_PIO_PAGE_OFFSET 1
42#define KVM_COALESCED_MMIO_PAGE_OFFSET 2 42#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
43#define KVM_HALT_POLL_NS_DEFAULT 500000
43 44
44#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS 45#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
45 46
@@ -711,6 +712,7 @@ struct kvm_vcpu_stat {
711 u32 nmi_window_exits; 712 u32 nmi_window_exits;
712 u32 halt_exits; 713 u32 halt_exits;
713 u32 halt_successful_poll; 714 u32 halt_successful_poll;
715 u32 halt_attempted_poll;
714 u32 halt_wakeup; 716 u32 halt_wakeup;
715 u32 request_irq_exits; 717 u32 request_irq_exits;
716 u32 irq_exits; 718 u32 irq_exits;
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index c1c0a1c14344..b98b471a3b7e 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -331,6 +331,7 @@
331/* C1E active bits in int pending message */ 331/* C1E active bits in int pending message */
332#define K8_INTP_C1E_ACTIVE_MASK 0x18000000 332#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
333#define MSR_K8_TSEG_ADDR 0xc0010112 333#define MSR_K8_TSEG_ADDR 0xc0010112
334#define MSR_K8_TSEG_MASK 0xc0010113
334#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ 335#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
335#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ 336#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
336#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */ 337#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index ce029e4fa7c6..31247b5bff7c 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -97,7 +97,6 @@ struct pv_lazy_ops {
97struct pv_time_ops { 97struct pv_time_ops {
98 unsigned long long (*sched_clock)(void); 98 unsigned long long (*sched_clock)(void);
99 unsigned long long (*steal_clock)(int cpu); 99 unsigned long long (*steal_clock)(int cpu);
100 unsigned long (*get_tsc_khz)(void);
101}; 100};
102 101
103struct pv_cpu_ops { 102struct pv_cpu_ops {
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 9d51fae1cba3..eaba08076030 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -39,18 +39,27 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
39} 39}
40#endif 40#endif
41 41
42#define virt_queued_spin_lock virt_queued_spin_lock 42#ifdef CONFIG_PARAVIRT
43 43#define virt_spin_lock virt_spin_lock
44static inline bool virt_queued_spin_lock(struct qspinlock *lock) 44static inline bool virt_spin_lock(struct qspinlock *lock)
45{ 45{
46 if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) 46 if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
47 return false; 47 return false;
48 48
49 while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0) 49 /*
50 cpu_relax(); 50 * On hypervisors without PARAVIRT_SPINLOCKS support we fall
51 * back to a Test-and-Set spinlock, because fair locks have
52 * horrible lock 'holder' preemption issues.
53 */
54
55 do {
56 while (atomic_read(&lock->val) != 0)
57 cpu_relax();
58 } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
51 59
52 return true; 60 return true;
53} 61}
62#endif /* CONFIG_PARAVIRT */
54 63
55#include <asm-generic/qspinlock.h> 64#include <asm-generic/qspinlock.h>
56 65
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index c42827eb86cf..25f909362b7a 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -338,10 +338,15 @@ done:
338 338
339static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr) 339static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
340{ 340{
341 unsigned long flags;
342
341 if (instr[0] != 0x90) 343 if (instr[0] != 0x90)
342 return; 344 return;
343 345
346 local_irq_save(flags);
344 add_nops(instr + (a->instrlen - a->padlen), a->padlen); 347 add_nops(instr + (a->instrlen - a->padlen), a->padlen);
348 sync_core();
349 local_irq_restore(flags);
345 350
346 DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ", 351 DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
347 instr, a->instrlen - a->padlen, a->padlen); 352 instr, a->instrlen - a->padlen, a->padlen);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 3ca3e46aa405..24e94ce454e2 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -336,6 +336,13 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
336 apic_write(APIC_LVTT, lvtt_value); 336 apic_write(APIC_LVTT, lvtt_value);
337 337
338 if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) { 338 if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
339 /*
340 * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode,
341 * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized.
342 * According to Intel, MFENCE can do the serialization here.
343 */
344 asm volatile("mfence" : : : "memory");
345
339 printk_once(KERN_DEBUG "TSC deadline timer enabled\n"); 346 printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
340 return; 347 return;
341 } 348 }
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 38a76f826530..5c60bb162622 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2522,6 +2522,7 @@ void __init setup_ioapic_dest(void)
2522 int pin, ioapic, irq, irq_entry; 2522 int pin, ioapic, irq, irq_entry;
2523 const struct cpumask *mask; 2523 const struct cpumask *mask;
2524 struct irq_data *idata; 2524 struct irq_data *idata;
2525 struct irq_chip *chip;
2525 2526
2526 if (skip_ioapic_setup == 1) 2527 if (skip_ioapic_setup == 1)
2527 return; 2528 return;
@@ -2545,9 +2546,9 @@ void __init setup_ioapic_dest(void)
2545 else 2546 else
2546 mask = apic->target_cpus(); 2547 mask = apic->target_cpus();
2547 2548
2548 irq_set_affinity(irq, mask); 2549 chip = irq_data_get_irq_chip(idata);
2550 chip->irq_set_affinity(idata, mask, false);
2549 } 2551 }
2550
2551} 2552}
2552#endif 2553#endif
2553 2554
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 1bbd0fe2c806..836d11b92811 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -489,10 +489,8 @@ static int apic_set_affinity(struct irq_data *irq_data,
489 489
490 err = assign_irq_vector(irq, data, dest); 490 err = assign_irq_vector(irq, data, dest);
491 if (err) { 491 if (err) {
492 struct irq_data *top = irq_get_irq_data(irq);
493
494 if (assign_irq_vector(irq, data, 492 if (assign_irq_vector(irq, data,
495 irq_data_get_affinity_mask(top))) 493 irq_data_get_affinity_mask(irq_data)))
496 pr_err("Failed to recover vector for irq %d\n", irq); 494 pr_err("Failed to recover vector for irq %d\n", irq);
497 return err; 495 return err;
498 } 496 }
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 07ce52c22ec8..de22ea7ff82f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1110,10 +1110,10 @@ void print_cpu_info(struct cpuinfo_x86 *c)
1110 else 1110 else
1111 printk(KERN_CONT "%d86", c->x86); 1111 printk(KERN_CONT "%d86", c->x86);
1112 1112
1113 printk(KERN_CONT " (fam: %02x, model: %02x", c->x86, c->x86_model); 1113 printk(KERN_CONT " (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
1114 1114
1115 if (c->x86_mask || c->cpuid_level >= 0) 1115 if (c->x86_mask || c->cpuid_level >= 0)
1116 printk(KERN_CONT ", stepping: %02x)\n", c->x86_mask); 1116 printk(KERN_CONT ", stepping: 0x%x)\n", c->x86_mask);
1117 else 1117 else
1118 printk(KERN_CONT ")\n"); 1118 printk(KERN_CONT ")\n");
1119 1119
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index cd9b6d0b10bf..3fefebfbdf4b 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2316,9 +2316,12 @@ static struct event_constraint *
2316intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 2316intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2317 struct perf_event *event) 2317 struct perf_event *event)
2318{ 2318{
2319 struct event_constraint *c1 = cpuc->event_constraint[idx]; 2319 struct event_constraint *c1 = NULL;
2320 struct event_constraint *c2; 2320 struct event_constraint *c2;
2321 2321
2322 if (idx >= 0) /* fake does < 0 */
2323 c1 = cpuc->event_constraint[idx];
2324
2322 /* 2325 /*
2323 * first time only 2326 * first time only
2324 * - static constraint: no change across incremental scheduling calls 2327 * - static constraint: no change across incremental scheduling calls
diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/kernel/cpu/perf_event_intel_bts.c
index 54690e885759..d1c0f254afbe 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_bts.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_bts.c
@@ -222,6 +222,7 @@ static void __bts_event_start(struct perf_event *event)
222 if (!buf || bts_buffer_is_full(buf, bts)) 222 if (!buf || bts_buffer_is_full(buf, bts))
223 return; 223 return;
224 224
225 event->hw.itrace_started = 1;
225 event->hw.state = 0; 226 event->hw.state = 0;
226 227
227 if (!buf->snapshot) 228 if (!buf->snapshot)
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index c80cf6699678..38da8f29a9c8 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -68,11 +68,10 @@ static inline void *current_stack(void)
68 return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1)); 68 return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
69} 69}
70 70
71static inline int 71static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
72execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
73{ 72{
74 struct irq_stack *curstk, *irqstk; 73 struct irq_stack *curstk, *irqstk;
75 u32 *isp, *prev_esp, arg1, arg2; 74 u32 *isp, *prev_esp, arg1;
76 75
77 curstk = (struct irq_stack *) current_stack(); 76 curstk = (struct irq_stack *) current_stack();
78 irqstk = __this_cpu_read(hardirq_stack); 77 irqstk = __this_cpu_read(hardirq_stack);
@@ -98,8 +97,8 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
98 asm volatile("xchgl %%ebx,%%esp \n" 97 asm volatile("xchgl %%ebx,%%esp \n"
99 "call *%%edi \n" 98 "call *%%edi \n"
100 "movl %%ebx,%%esp \n" 99 "movl %%ebx,%%esp \n"
101 : "=a" (arg1), "=d" (arg2), "=b" (isp) 100 : "=a" (arg1), "=b" (isp)
102 : "0" (irq), "1" (desc), "2" (isp), 101 : "0" (desc), "1" (isp),
103 "D" (desc->handle_irq) 102 "D" (desc->handle_irq)
104 : "memory", "cc", "ecx"); 103 : "memory", "cc", "ecx");
105 return 1; 104 return 1;
@@ -150,19 +149,15 @@ void do_softirq_own_stack(void)
150 149
151bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) 150bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
152{ 151{
153 unsigned int irq; 152 int overflow = check_stack_overflow();
154 int overflow;
155
156 overflow = check_stack_overflow();
157 153
158 if (IS_ERR_OR_NULL(desc)) 154 if (IS_ERR_OR_NULL(desc))
159 return false; 155 return false;
160 156
161 irq = irq_desc_get_irq(desc); 157 if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) {
162 if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
163 if (unlikely(overflow)) 158 if (unlikely(overflow))
164 print_stack_overflow(); 159 print_stack_overflow();
165 generic_handle_irq_desc(irq, desc); 160 generic_handle_irq_desc(desc);
166 } 161 }
167 162
168 return true; 163 return true;
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index ff16ccb918f2..c767cf2bc80a 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -75,6 +75,6 @@ bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
75 if (unlikely(IS_ERR_OR_NULL(desc))) 75 if (unlikely(IS_ERR_OR_NULL(desc)))
76 return false; 76 return false;
77 77
78 generic_handle_irq_desc(irq_desc_get_irq(desc), desc); 78 generic_handle_irq_desc(desc);
79 return true; 79 return true;
80} 80}
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 2bcc0525f1c1..6acc9dd91f36 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -58,7 +58,7 @@ static struct ldt_struct *alloc_ldt_struct(int size)
58 if (alloc_size > PAGE_SIZE) 58 if (alloc_size > PAGE_SIZE)
59 new_ldt->entries = vzalloc(alloc_size); 59 new_ldt->entries = vzalloc(alloc_size);
60 else 60 else
61 new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL); 61 new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL);
62 62
63 if (!new_ldt->entries) { 63 if (!new_ldt->entries) {
64 kfree(new_ldt); 64 kfree(new_ldt);
@@ -95,7 +95,7 @@ static void free_ldt_struct(struct ldt_struct *ldt)
95 if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE) 95 if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
96 vfree(ldt->entries); 96 vfree(ldt->entries);
97 else 97 else
98 kfree(ldt->entries); 98 free_page((unsigned long)ldt->entries);
99 kfree(ldt); 99 kfree(ldt);
100} 100}
101 101
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 84b8ef82a159..1b55de1267cf 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -131,8 +131,8 @@ void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
131 131
132bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp) 132bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
133{ 133{
134 *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
135 *gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 134 *gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
135 *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
136 136
137 if (!*dev) 137 if (!*dev)
138 *dev = &x86_dma_fallback_dev; 138 *dev = &x86_dma_fallback_dev;
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index c8d52cb4cb6e..c3f7602cd038 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -21,6 +21,7 @@
21#include <asm/hypervisor.h> 21#include <asm/hypervisor.h>
22#include <asm/nmi.h> 22#include <asm/nmi.h>
23#include <asm/x86_init.h> 23#include <asm/x86_init.h>
24#include <asm/geode.h>
24 25
25unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ 26unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
26EXPORT_SYMBOL(cpu_khz); 27EXPORT_SYMBOL(cpu_khz);
@@ -1013,15 +1014,17 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);
1013 1014
1014static void __init check_system_tsc_reliable(void) 1015static void __init check_system_tsc_reliable(void)
1015{ 1016{
1016#ifdef CONFIG_MGEODE_LX 1017#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
1017 /* RTSC counts during suspend */ 1018 if (is_geode_lx()) {
1019 /* RTSC counts during suspend */
1018#define RTSC_SUSP 0x100 1020#define RTSC_SUSP 0x100
1019 unsigned long res_low, res_high; 1021 unsigned long res_low, res_high;
1020 1022
1021 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); 1023 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1022 /* Geode_LX - the OLPC CPU has a very reliable TSC */ 1024 /* Geode_LX - the OLPC CPU has a very reliable TSC */
1023 if (res_low & RTSC_SUSP) 1025 if (res_low & RTSC_SUSP)
1024 tsc_clocksource_reliable = 1; 1026 tsc_clocksource_reliable = 1;
1027 }
1025#endif 1028#endif
1026 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) 1029 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
1027 tsc_clocksource_reliable = 1; 1030 tsc_clocksource_reliable = 1;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index abd8b856bd2b..524619351961 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -45,6 +45,7 @@
45#include <linux/audit.h> 45#include <linux/audit.h>
46#include <linux/stddef.h> 46#include <linux/stddef.h>
47#include <linux/slab.h> 47#include <linux/slab.h>
48#include <linux/security.h>
48 49
49#include <asm/uaccess.h> 50#include <asm/uaccess.h>
50#include <asm/io.h> 51#include <asm/io.h>
@@ -232,6 +233,32 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
232 struct pt_regs *regs = current_pt_regs(); 233 struct pt_regs *regs = current_pt_regs();
233 unsigned long err = 0; 234 unsigned long err = 0;
234 235
236 err = security_mmap_addr(0);
237 if (err) {
238 /*
239 * vm86 cannot virtualize the address space, so vm86 users
240 * need to manage the low 1MB themselves using mmap. Given
241 * that BIOS places important data in the first page, vm86
242 * is essentially useless if mmap_min_addr != 0. DOSEMU,
243 * for example, won't even bother trying to use vm86 if it
244 * can't map a page at virtual address 0.
245 *
246 * To reduce the available kernel attack surface, simply
247 * disallow vm86(old) for users who cannot mmap at va 0.
248 *
249 * The implementation of security_mmap_addr will allow
250 * suitably privileged users to map va 0 even if
251 * vm.mmap_min_addr is set above 0, and we want this
252 * behavior for vm86 as well, as it ensures that legacy
253 * tools like vbetool will not fail just because of
254 * vm.mmap_min_addr.
255 */
256 pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d). Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n",
257 current->comm, task_pid_nr(current),
258 from_kuid_munged(&init_user_ns, current_uid()));
259 return -EPERM;
260 }
261
235 if (!vm86) { 262 if (!vm86) {
236 if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL))) 263 if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
237 return -ENOMEM; 264 return -ENOMEM;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 69088a1ba509..ff606f507913 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3322,7 +3322,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
3322 break; 3322 break;
3323 3323
3324 reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, 3324 reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte,
3325 leaf); 3325 iterator.level);
3326 } 3326 }
3327 3327
3328 walk_shadow_page_lockless_end(vcpu); 3328 walk_shadow_page_lockless_end(vcpu);
@@ -3614,7 +3614,7 @@ static void
3614__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, 3614__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
3615 struct rsvd_bits_validate *rsvd_check, 3615 struct rsvd_bits_validate *rsvd_check,
3616 int maxphyaddr, int level, bool nx, bool gbpages, 3616 int maxphyaddr, int level, bool nx, bool gbpages,
3617 bool pse) 3617 bool pse, bool amd)
3618{ 3618{
3619 u64 exb_bit_rsvd = 0; 3619 u64 exb_bit_rsvd = 0;
3620 u64 gbpages_bit_rsvd = 0; 3620 u64 gbpages_bit_rsvd = 0;
@@ -3631,7 +3631,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
3631 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for 3631 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
3632 * leaf entries) on AMD CPUs only. 3632 * leaf entries) on AMD CPUs only.
3633 */ 3633 */
3634 if (guest_cpuid_is_amd(vcpu)) 3634 if (amd)
3635 nonleaf_bit8_rsvd = rsvd_bits(8, 8); 3635 nonleaf_bit8_rsvd = rsvd_bits(8, 8);
3636 3636
3637 switch (level) { 3637 switch (level) {
@@ -3699,7 +3699,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
3699 __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, 3699 __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
3700 cpuid_maxphyaddr(vcpu), context->root_level, 3700 cpuid_maxphyaddr(vcpu), context->root_level,
3701 context->nx, guest_cpuid_has_gbpages(vcpu), 3701 context->nx, guest_cpuid_has_gbpages(vcpu),
3702 is_pse(vcpu)); 3702 is_pse(vcpu), guest_cpuid_is_amd(vcpu));
3703} 3703}
3704 3704
3705static void 3705static void
@@ -3749,13 +3749,24 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
3749void 3749void
3750reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) 3750reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
3751{ 3751{
3752 /*
3753 * Passing "true" to the last argument is okay; it adds a check
3754 * on bit 8 of the SPTEs which KVM doesn't use anyway.
3755 */
3752 __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, 3756 __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
3753 boot_cpu_data.x86_phys_bits, 3757 boot_cpu_data.x86_phys_bits,
3754 context->shadow_root_level, context->nx, 3758 context->shadow_root_level, context->nx,
3755 guest_cpuid_has_gbpages(vcpu), is_pse(vcpu)); 3759 guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
3760 true);
3756} 3761}
3757EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask); 3762EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
3758 3763
3764static inline bool boot_cpu_is_amd(void)
3765{
3766 WARN_ON_ONCE(!tdp_enabled);
3767 return shadow_x_mask == 0;
3768}
3769
3759/* 3770/*
3760 * the direct page table on host, use as much mmu features as 3771 * the direct page table on host, use as much mmu features as
3761 * possible, however, kvm currently does not do execution-protection. 3772 * possible, however, kvm currently does not do execution-protection.
@@ -3764,11 +3775,11 @@ static void
3764reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, 3775reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
3765 struct kvm_mmu *context) 3776 struct kvm_mmu *context)
3766{ 3777{
3767 if (guest_cpuid_is_amd(vcpu)) 3778 if (boot_cpu_is_amd())
3768 __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, 3779 __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
3769 boot_cpu_data.x86_phys_bits, 3780 boot_cpu_data.x86_phys_bits,
3770 context->shadow_root_level, false, 3781 context->shadow_root_level, false,
3771 cpu_has_gbpages, true); 3782 cpu_has_gbpages, true, true);
3772 else 3783 else
3773 __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, 3784 __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
3774 boot_cpu_data.x86_phys_bits, 3785 boot_cpu_data.x86_phys_bits,
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index fdb8cb63a6c0..94b7d15db3fc 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -202,6 +202,7 @@ module_param(npt, int, S_IRUGO);
202static int nested = true; 202static int nested = true;
203module_param(nested, int, S_IRUGO); 203module_param(nested, int, S_IRUGO);
204 204
205static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
205static void svm_flush_tlb(struct kvm_vcpu *vcpu); 206static void svm_flush_tlb(struct kvm_vcpu *vcpu);
206static void svm_complete_interrupts(struct vcpu_svm *svm); 207static void svm_complete_interrupts(struct vcpu_svm *svm);
207 208
@@ -1263,7 +1264,8 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
1263 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0. 1264 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
1264 * It also updates the guest-visible cr0 value. 1265 * It also updates the guest-visible cr0 value.
1265 */ 1266 */
1266 (void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); 1267 svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
1268 kvm_mmu_reset_context(&svm->vcpu);
1267 1269
1268 save->cr4 = X86_CR4_PAE; 1270 save->cr4 = X86_CR4_PAE;
1269 /* rdx = ?? */ 1271 /* rdx = ?? */
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d01986832afc..64076740251e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6064,6 +6064,8 @@ static __init int hardware_setup(void)
6064 memcpy(vmx_msr_bitmap_longmode_x2apic, 6064 memcpy(vmx_msr_bitmap_longmode_x2apic,
6065 vmx_msr_bitmap_longmode, PAGE_SIZE); 6065 vmx_msr_bitmap_longmode, PAGE_SIZE);
6066 6066
6067 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
6068
6067 if (enable_apicv) { 6069 if (enable_apicv) {
6068 for (msr = 0x800; msr <= 0x8ff; msr++) 6070 for (msr = 0x800; msr <= 0x8ff; msr++)
6069 vmx_disable_intercept_msr_read_x2apic(msr); 6071 vmx_disable_intercept_msr_read_x2apic(msr);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a60bdbccff51..991466bf8dee 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -149,6 +149,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
149 { "nmi_window", VCPU_STAT(nmi_window_exits) }, 149 { "nmi_window", VCPU_STAT(nmi_window_exits) },
150 { "halt_exits", VCPU_STAT(halt_exits) }, 150 { "halt_exits", VCPU_STAT(halt_exits) },
151 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, 151 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
152 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
152 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 153 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
153 { "hypercalls", VCPU_STAT(hypercalls) }, 154 { "hypercalls", VCPU_STAT(hypercalls) },
154 { "request_irq", VCPU_STAT(request_irq_exits) }, 155 { "request_irq", VCPU_STAT(request_irq_exits) },
@@ -2189,6 +2190,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2189 case MSR_IA32_LASTINTFROMIP: 2190 case MSR_IA32_LASTINTFROMIP:
2190 case MSR_IA32_LASTINTTOIP: 2191 case MSR_IA32_LASTINTTOIP:
2191 case MSR_K8_SYSCFG: 2192 case MSR_K8_SYSCFG:
2193 case MSR_K8_TSEG_ADDR:
2194 case MSR_K8_TSEG_MASK:
2192 case MSR_K7_HWCR: 2195 case MSR_K7_HWCR:
2193 case MSR_VM_HSAVE_PA: 2196 case MSR_VM_HSAVE_PA:
2194 case MSR_K8_INT_PENDING_MSG: 2197 case MSR_K8_INT_PENDING_MSG:
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 161804de124a..a0d09f6c6533 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1015,7 +1015,7 @@ static struct clock_event_device lguest_clockevent = {
1015 * This is the Guest timer interrupt handler (hardware interrupt 0). We just 1015 * This is the Guest timer interrupt handler (hardware interrupt 0). We just
1016 * call the clockevent infrastructure and it does whatever needs doing. 1016 * call the clockevent infrastructure and it does whatever needs doing.
1017 */ 1017 */
1018static void lguest_time_irq(unsigned int irq, struct irq_desc *desc) 1018static void lguest_time_irq(struct irq_desc *desc)
1019{ 1019{
1020 unsigned long flags; 1020 unsigned long flags;
1021 1021
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 66338a60aa6e..c2aea63bee20 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -192,10 +192,11 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
192 192
193 node_set(node, numa_nodes_parsed); 193 node_set(node, numa_nodes_parsed);
194 194
195 pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s\n", 195 pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s%s\n",
196 node, pxm, 196 node, pxm,
197 (unsigned long long) start, (unsigned long long) end - 1, 197 (unsigned long long) start, (unsigned long long) end - 1,
198 hotpluggable ? " hotplug" : ""); 198 hotpluggable ? " hotplug" : "",
199 ma->flags & ACPI_SRAT_MEM_NON_VOLATILE ? " non-volatile" : "");
199 200
200 /* Mark hotplug range in memblock. */ 201 /* Mark hotplug range in memblock. */
201 if (hotpluggable && memblock_mark_hotplug(start, ma->length)) 202 if (hotpluggable && memblock_mark_hotplug(start, ma->length))
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 09d3afc0a181..dc78a4a9a466 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -166,6 +166,7 @@ void pcibios_fixup_bus(struct pci_bus *b)
166{ 166{
167 struct pci_dev *dev; 167 struct pci_dev *dev;
168 168
169 pci_read_bridge_bases(b);
169 list_for_each_entry(dev, &b->devices, bus_list) 170 list_for_each_entry(dev, &b->devices, bus_list)
170 pcibios_fixup_device_resources(dev); 171 pcibios_fixup_device_resources(dev);
171} 172}
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index d27b4dcf221f..b848cc3dc913 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -210,6 +210,10 @@ subsys_initcall(pcibios_init);
210 210
211void pcibios_fixup_bus(struct pci_bus *bus) 211void pcibios_fixup_bus(struct pci_bus *bus)
212{ 212{
213 if (bus->parent) {
214 /* This is a subordinate bridge */
215 pci_read_bridge_bases(bus);
216 }
213} 217}
214 218
215void pcibios_set_master(struct pci_dev *dev) 219void pcibios_set_master(struct pci_dev *dev)
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 4aecca79374a..14b8faf8b09d 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -140,6 +140,11 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
140 140
141 iv = bip->bip_vec + bip->bip_vcnt; 141 iv = bip->bip_vec + bip->bip_vcnt;
142 142
143 if (bip->bip_vcnt &&
144 bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev),
145 &bip->bip_vec[bip->bip_vcnt - 1], offset))
146 return 0;
147
143 iv->bv_page = page; 148 iv->bv_page = page;
144 iv->bv_len = len; 149 iv->bv_len = len;
145 iv->bv_offset = offset; 150 iv->bv_offset = offset;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index ac8370cb2515..55512dd62633 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -370,6 +370,9 @@ static void blkg_destroy_all(struct request_queue *q)
370 blkg_destroy(blkg); 370 blkg_destroy(blkg);
371 spin_unlock(&blkcg->lock); 371 spin_unlock(&blkcg->lock);
372 } 372 }
373
374 q->root_blkg = NULL;
375 q->root_rl.blkg = NULL;
373} 376}
374 377
375/* 378/*
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index f548b64be092..75f29cf70188 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -204,6 +204,9 @@ bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
204 q->limits.max_integrity_segments) 204 q->limits.max_integrity_segments)
205 return false; 205 return false;
206 206
207 if (integrity_req_gap_back_merge(req, next->bio))
208 return false;
209
207 return true; 210 return true;
208} 211}
209EXPORT_SYMBOL(blk_integrity_merge_rq); 212EXPORT_SYMBOL(blk_integrity_merge_rq);
diff --git a/block/blk-map.c b/block/blk-map.c
index 233841644c9d..f565e11f465a 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -9,6 +9,24 @@
9 9
10#include "blk.h" 10#include "blk.h"
11 11
12static bool iovec_gap_to_prv(struct request_queue *q,
13 struct iovec *prv, struct iovec *cur)
14{
15 unsigned long prev_end;
16
17 if (!queue_virt_boundary(q))
18 return false;
19
20 if (prv->iov_base == NULL && prv->iov_len == 0)
21 /* prv is not set - don't check */
22 return false;
23
24 prev_end = (unsigned long)(prv->iov_base + prv->iov_len);
25
26 return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
27 prev_end & queue_virt_boundary(q));
28}
29
12int blk_rq_append_bio(struct request_queue *q, struct request *rq, 30int blk_rq_append_bio(struct request_queue *q, struct request *rq,
13 struct bio *bio) 31 struct bio *bio)
14{ 32{
@@ -67,7 +85,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
67 struct bio *bio; 85 struct bio *bio;
68 int unaligned = 0; 86 int unaligned = 0;
69 struct iov_iter i; 87 struct iov_iter i;
70 struct iovec iov; 88 struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
71 89
72 if (!iter || !iter->count) 90 if (!iter || !iter->count)
73 return -EINVAL; 91 return -EINVAL;
@@ -81,8 +99,12 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
81 /* 99 /*
82 * Keep going so we check length of all segments 100 * Keep going so we check length of all segments
83 */ 101 */
84 if (uaddr & queue_dma_alignment(q)) 102 if ((uaddr & queue_dma_alignment(q)) ||
103 iovec_gap_to_prv(q, &prv, &iov))
85 unaligned = 1; 104 unaligned = 1;
105
106 prv.iov_base = iov.iov_base;
107 prv.iov_len = iov.iov_len;
86 } 108 }
87 109
88 if (unaligned || (q->dma_pad_mask & iter->count) || map_data) 110 if (unaligned || (q->dma_pad_mask & iter->count) || map_data)
diff --git a/block/blk-merge.c b/block/blk-merge.c
index d088cffb8105..c4e9c37f3e38 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -66,36 +66,33 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
66 struct bio *bio, 66 struct bio *bio,
67 struct bio_set *bs) 67 struct bio_set *bs)
68{ 68{
69 struct bio *split; 69 struct bio_vec bv, bvprv, *bvprvp = NULL;
70 struct bio_vec bv, bvprv;
71 struct bvec_iter iter; 70 struct bvec_iter iter;
72 unsigned seg_size = 0, nsegs = 0, sectors = 0; 71 unsigned seg_size = 0, nsegs = 0, sectors = 0;
73 int prev = 0;
74 72
75 bio_for_each_segment(bv, bio, iter) { 73 bio_for_each_segment(bv, bio, iter) {
76 sectors += bv.bv_len >> 9; 74 if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
77
78 if (sectors > queue_max_sectors(q))
79 goto split; 75 goto split;
80 76
81 /* 77 /*
82 * If the queue doesn't support SG gaps and adding this 78 * If the queue doesn't support SG gaps and adding this
83 * offset would create a gap, disallow it. 79 * offset would create a gap, disallow it.
84 */ 80 */
85 if (prev && bvec_gap_to_prev(q, &bvprv, bv.bv_offset)) 81 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
86 goto split; 82 goto split;
87 83
88 if (prev && blk_queue_cluster(q)) { 84 if (bvprvp && blk_queue_cluster(q)) {
89 if (seg_size + bv.bv_len > queue_max_segment_size(q)) 85 if (seg_size + bv.bv_len > queue_max_segment_size(q))
90 goto new_segment; 86 goto new_segment;
91 if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) 87 if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
92 goto new_segment; 88 goto new_segment;
93 if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) 89 if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
94 goto new_segment; 90 goto new_segment;
95 91
96 seg_size += bv.bv_len; 92 seg_size += bv.bv_len;
97 bvprv = bv; 93 bvprv = bv;
98 prev = 1; 94 bvprvp = &bv;
95 sectors += bv.bv_len >> 9;
99 continue; 96 continue;
100 } 97 }
101new_segment: 98new_segment:
@@ -104,23 +101,14 @@ new_segment:
104 101
105 nsegs++; 102 nsegs++;
106 bvprv = bv; 103 bvprv = bv;
107 prev = 1; 104 bvprvp = &bv;
108 seg_size = bv.bv_len; 105 seg_size = bv.bv_len;
106 sectors += bv.bv_len >> 9;
109 } 107 }
110 108
111 return NULL; 109 return NULL;
112split: 110split:
113 split = bio_clone_bioset(bio, GFP_NOIO, bs); 111 return bio_split(bio, sectors, GFP_NOIO, bs);
114
115 split->bi_iter.bi_size -= iter.bi_size;
116 bio->bi_iter = iter;
117
118 if (bio_integrity(bio)) {
119 bio_integrity_advance(bio, split->bi_iter.bi_size);
120 bio_integrity_trim(split, 0, bio_sectors(split));
121 }
122
123 return split;
124} 112}
125 113
126void blk_queue_split(struct request_queue *q, struct bio **bio, 114void blk_queue_split(struct request_queue *q, struct bio **bio,
@@ -439,6 +427,11 @@ no_merge:
439int ll_back_merge_fn(struct request_queue *q, struct request *req, 427int ll_back_merge_fn(struct request_queue *q, struct request *req,
440 struct bio *bio) 428 struct bio *bio)
441{ 429{
430 if (req_gap_back_merge(req, bio))
431 return 0;
432 if (blk_integrity_rq(req) &&
433 integrity_req_gap_back_merge(req, bio))
434 return 0;
442 if (blk_rq_sectors(req) + bio_sectors(bio) > 435 if (blk_rq_sectors(req) + bio_sectors(bio) >
443 blk_rq_get_max_sectors(req)) { 436 blk_rq_get_max_sectors(req)) {
444 req->cmd_flags |= REQ_NOMERGE; 437 req->cmd_flags |= REQ_NOMERGE;
@@ -457,6 +450,12 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
457int ll_front_merge_fn(struct request_queue *q, struct request *req, 450int ll_front_merge_fn(struct request_queue *q, struct request *req,
458 struct bio *bio) 451 struct bio *bio)
459{ 452{
453
454 if (req_gap_front_merge(req, bio))
455 return 0;
456 if (blk_integrity_rq(req) &&
457 integrity_req_gap_front_merge(req, bio))
458 return 0;
460 if (blk_rq_sectors(req) + bio_sectors(bio) > 459 if (blk_rq_sectors(req) + bio_sectors(bio) >
461 blk_rq_get_max_sectors(req)) { 460 blk_rq_get_max_sectors(req)) {
462 req->cmd_flags |= REQ_NOMERGE; 461 req->cmd_flags |= REQ_NOMERGE;
@@ -483,14 +482,6 @@ static bool req_no_special_merge(struct request *req)
483 return !q->mq_ops && req->special; 482 return !q->mq_ops && req->special;
484} 483}
485 484
486static int req_gap_to_prev(struct request *req, struct bio *next)
487{
488 struct bio *prev = req->biotail;
489
490 return bvec_gap_to_prev(req->q, &prev->bi_io_vec[prev->bi_vcnt - 1],
491 next->bi_io_vec[0].bv_offset);
492}
493
494static int ll_merge_requests_fn(struct request_queue *q, struct request *req, 485static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
495 struct request *next) 486 struct request *next)
496{ 487{
@@ -505,7 +496,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
505 if (req_no_special_merge(req) || req_no_special_merge(next)) 496 if (req_no_special_merge(req) || req_no_special_merge(next))
506 return 0; 497 return 0;
507 498
508 if (req_gap_to_prev(req, next->bio)) 499 if (req_gap_back_merge(req, next->bio))
509 return 0; 500 return 0;
510 501
511 /* 502 /*
@@ -713,10 +704,6 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
713 !blk_write_same_mergeable(rq->bio, bio)) 704 !blk_write_same_mergeable(rq->bio, bio))
714 return false; 705 return false;
715 706
716 /* Only check gaps if the bio carries data */
717 if (bio_has_data(bio) && req_gap_to_prev(rq, bio))
718 return false;
719
720 return true; 707 return true;
721} 708}
722 709
diff --git a/block/bounce.c b/block/bounce.c
index 0611aea1cfe9..1cb5dd3a5da1 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -128,12 +128,14 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)
128 struct bio *bio_orig = bio->bi_private; 128 struct bio *bio_orig = bio->bi_private;
129 struct bio_vec *bvec, *org_vec; 129 struct bio_vec *bvec, *org_vec;
130 int i; 130 int i;
131 int start = bio_orig->bi_iter.bi_idx;
131 132
132 /* 133 /*
133 * free up bounce indirect pages used 134 * free up bounce indirect pages used
134 */ 135 */
135 bio_for_each_segment_all(bvec, bio, i) { 136 bio_for_each_segment_all(bvec, bio, i) {
136 org_vec = bio_orig->bi_io_vec + i; 137 org_vec = bio_orig->bi_io_vec + i + start;
138
137 if (bvec->bv_page == org_vec->bv_page) 139 if (bvec->bv_page == org_vec->bv_page)
138 continue; 140 continue;
139 141
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 35c2de136971..fa18753f5c34 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -940,6 +940,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
940 char *xbuf[XBUFSIZE]; 940 char *xbuf[XBUFSIZE];
941 char *xoutbuf[XBUFSIZE]; 941 char *xoutbuf[XBUFSIZE];
942 int ret = -ENOMEM; 942 int ret = -ENOMEM;
943 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
943 944
944 if (testmgr_alloc_buf(xbuf)) 945 if (testmgr_alloc_buf(xbuf))
945 goto out_nobuf; 946 goto out_nobuf;
@@ -975,7 +976,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
975 continue; 976 continue;
976 977
977 if (template[i].iv) 978 if (template[i].iv)
978 memcpy(iv, template[i].iv, MAX_IVLEN); 979 memcpy(iv, template[i].iv, ivsize);
979 else 980 else
980 memset(iv, 0, MAX_IVLEN); 981 memset(iv, 0, MAX_IVLEN);
981 982
@@ -1051,7 +1052,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
1051 continue; 1052 continue;
1052 1053
1053 if (template[i].iv) 1054 if (template[i].iv)
1054 memcpy(iv, template[i].iv, MAX_IVLEN); 1055 memcpy(iv, template[i].iv, ivsize);
1055 else 1056 else
1056 memset(iv, 0, MAX_IVLEN); 1057 memset(iv, 0, MAX_IVLEN);
1057 1058
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 46506e7687cd..a212cefae524 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -315,14 +315,10 @@ static void acpi_bus_osc_support(void)
315 315
316 capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE; 316 capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
317 capbuf[OSC_SUPPORT_DWORD] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */ 317 capbuf[OSC_SUPPORT_DWORD] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
318#if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\ 318 if (IS_ENABLED(CONFIG_ACPI_PROCESSOR_AGGREGATOR))
319 defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE) 319 capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT;
320 capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT; 320 if (IS_ENABLED(CONFIG_ACPI_PROCESSOR))
321#endif 321 capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
322
323#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
324 capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
325#endif
326 322
327 capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT; 323 capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
328 324
diff --git a/drivers/acpi/int340x_thermal.c b/drivers/acpi/int340x_thermal.c
index 9dcf83682e36..33505c651f62 100644
--- a/drivers/acpi/int340x_thermal.c
+++ b/drivers/acpi/int340x_thermal.c
@@ -33,13 +33,12 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
33static int int340x_thermal_handler_attach(struct acpi_device *adev, 33static int int340x_thermal_handler_attach(struct acpi_device *adev,
34 const struct acpi_device_id *id) 34 const struct acpi_device_id *id)
35{ 35{
36#if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE) 36 if (IS_ENABLED(CONFIG_INT340X_THERMAL))
37 acpi_create_platform_device(adev); 37 acpi_create_platform_device(adev);
38#elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE)
39 /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */ 38 /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
40 if (id->driver_data == INT3401_DEVICE) 39 else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) &&
40 id->driver_data == INT3401_DEVICE)
41 acpi_create_platform_device(adev); 41 acpi_create_platform_device(adev);
42#endif
43 return 1; 42 return 1;
44} 43}
45 44
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index fc28b9f5aa84..30d8518b25fb 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -525,8 +525,7 @@ static void acpi_thermal_check(void *data)
525 525
526/* sys I/F for generic thermal sysfs support */ 526/* sys I/F for generic thermal sysfs support */
527 527
528static int thermal_get_temp(struct thermal_zone_device *thermal, 528static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
529 unsigned long *temp)
530{ 529{
531 struct acpi_thermal *tz = thermal->devdata; 530 struct acpi_thermal *tz = thermal->devdata;
532 int result; 531 int result;
@@ -633,7 +632,7 @@ static int thermal_get_trip_type(struct thermal_zone_device *thermal,
633} 632}
634 633
635static int thermal_get_trip_temp(struct thermal_zone_device *thermal, 634static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
636 int trip, unsigned long *temp) 635 int trip, int *temp)
637{ 636{
638 struct acpi_thermal *tz = thermal->devdata; 637 struct acpi_thermal *tz = thermal->devdata;
639 int i; 638 int i;
@@ -686,7 +685,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
686} 685}
687 686
688static int thermal_get_crit_temp(struct thermal_zone_device *thermal, 687static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
689 unsigned long *temperature) { 688 int *temperature)
689{
690 struct acpi_thermal *tz = thermal->devdata; 690 struct acpi_thermal *tz = thermal->devdata;
691 691
692 if (tz->trips.critical.flags.valid) { 692 if (tz->trips.critical.flags.valid) {
@@ -709,8 +709,8 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
709 return -EINVAL; 709 return -EINVAL;
710 710
711 if (type == THERMAL_TRIP_ACTIVE) { 711 if (type == THERMAL_TRIP_ACTIVE) {
712 unsigned long trip_temp; 712 int trip_temp;
713 unsigned long temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET( 713 int temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
714 tz->temperature, tz->kelvin_offset); 714 tz->temperature, tz->kelvin_offset);
715 if (thermal_get_trip_temp(thermal, trip, &trip_temp)) 715 if (thermal_get_trip_temp(thermal, trip, &trip_temp))
716 return -EINVAL; 716 return -EINVAL;
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index a8da3a50e374..0f5cb37636bc 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1578,9 +1578,7 @@ he_stop(struct he_dev *he_dev)
1578 1578
1579 kfree(he_dev->rbpl_virt); 1579 kfree(he_dev->rbpl_virt);
1580 kfree(he_dev->rbpl_table); 1580 kfree(he_dev->rbpl_table);
1581 1581 dma_pool_destroy(he_dev->rbpl_pool);
1582 if (he_dev->rbpl_pool)
1583 dma_pool_destroy(he_dev->rbpl_pool);
1584 1582
1585 if (he_dev->rbrq_base) 1583 if (he_dev->rbrq_base)
1586 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), 1584 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
@@ -1594,8 +1592,7 @@ he_stop(struct he_dev *he_dev)
1594 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), 1592 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1595 he_dev->tpdrq_base, he_dev->tpdrq_phys); 1593 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1596 1594
1597 if (he_dev->tpd_pool) 1595 dma_pool_destroy(he_dev->tpd_pool);
1598 dma_pool_destroy(he_dev->tpd_pool);
1599 1596
1600 if (he_dev->pci_dev) { 1597 if (he_dev->pci_dev) {
1601 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command); 1598 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 74e18b0a6d89..3d7fb6516f74 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -805,7 +805,12 @@ static void solos_bh(unsigned long card_arg)
805 continue; 805 continue;
806 } 806 }
807 807
808 skb = alloc_skb(size + 1, GFP_ATOMIC); 808 /* Use netdev_alloc_skb() because it adds NET_SKB_PAD of
809 * headroom, and ensures we can route packets back out an
810 * Ethernet interface (for example) without having to
811 * reallocate. Adding NET_IP_ALIGN also ensures that both
812 * PPPoATM and PPPoEoBR2684 packets end up aligned. */
813 skb = netdev_alloc_skb_ip_align(NULL, size + 1);
809 if (!skb) { 814 if (!skb) {
810 if (net_ratelimit()) 815 if (net_ratelimit())
811 dev_warn(&card->dev->dev, "Failed to allocate sk_buff for RX\n"); 816 dev_warn(&card->dev->dev, "Failed to allocate sk_buff for RX\n");
@@ -869,7 +874,10 @@ static void solos_bh(unsigned long card_arg)
869 /* Allocate RX skbs for any ports which need them */ 874 /* Allocate RX skbs for any ports which need them */
870 if (card->using_dma && card->atmdev[port] && 875 if (card->using_dma && card->atmdev[port] &&
871 !card->rx_skb[port]) { 876 !card->rx_skb[port]) {
872 struct sk_buff *skb = alloc_skb(RX_DMA_SIZE, GFP_ATOMIC); 877 /* Unlike the MMIO case (qv) we can't add NET_IP_ALIGN
878 * here; the FPGA can only DMA to addresses which are
879 * aligned to 4 bytes. */
880 struct sk_buff *skb = dev_alloc_skb(RX_DMA_SIZE);
873 if (skb) { 881 if (skb) {
874 SKB_CB(skb)->dma_addr = 882 SKB_CB(skb)->dma_addr =
875 dma_map_single(&card->dev->dev, skb->data, 883 dma_map_single(&card->dev->dev, skb->data,
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 1857a5dd0816..134483daac25 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -63,20 +63,8 @@ static int platform_msi_init(struct irq_domain *domain,
63 unsigned int virq, irq_hw_number_t hwirq, 63 unsigned int virq, irq_hw_number_t hwirq,
64 msi_alloc_info_t *arg) 64 msi_alloc_info_t *arg)
65{ 65{
66 struct irq_data *data; 66 return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
67 67 info->chip, info->chip_data);
68 irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
69 info->chip, info->chip_data);
70
71 /*
72 * Save the MSI descriptor in handler_data so that the
73 * irq_write_msi_msg callback can retrieve it (and the
74 * associated device).
75 */
76 data = irq_domain_get_irq_data(domain, virq);
77 data->handler_data = arg->desc;
78
79 return 0;
80} 68}
81#else 69#else
82#define platform_msi_set_desc NULL 70#define platform_msi_set_desc NULL
@@ -97,7 +85,7 @@ static void platform_msi_update_dom_ops(struct msi_domain_info *info)
97 85
98static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg) 86static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
99{ 87{
100 struct msi_desc *desc = irq_data_get_irq_handler_data(data); 88 struct msi_desc *desc = irq_data_get_msi_desc(data);
101 struct platform_msi_priv_data *priv_data; 89 struct platform_msi_priv_data *priv_data;
102 90
103 priv_data = desc->platform.msi_priv_data; 91 priv_data = desc->platform.msi_priv_data;
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 416720159e96..16550c63d611 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -213,6 +213,18 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
213} 213}
214 214
215/** 215/**
216 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
217 * @genpd: PM domait to power off.
218 *
219 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
220 * before.
221 */
222static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
223{
224 queue_work(pm_wq, &genpd->power_off_work);
225}
226
227/**
216 * __pm_genpd_poweron - Restore power to a given PM domain and its masters. 228 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
217 * @genpd: PM domain to power up. 229 * @genpd: PM domain to power up.
218 * 230 *
@@ -259,8 +271,12 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
259 return 0; 271 return 0;
260 272
261 err: 273 err:
262 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) 274 list_for_each_entry_continue_reverse(link,
275 &genpd->slave_links,
276 slave_node) {
263 genpd_sd_counter_dec(link->master); 277 genpd_sd_counter_dec(link->master);
278 genpd_queue_power_off_work(link->master);
279 }
264 280
265 return ret; 281 return ret;
266} 282}
@@ -349,18 +365,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
349} 365}
350 366
351/** 367/**
352 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
353 * @genpd: PM domait to power off.
354 *
355 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
356 * before.
357 */
358static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
359{
360 queue_work(pm_wq, &genpd->power_off_work);
361}
362
363/**
364 * pm_genpd_poweroff - Remove power from a given PM domain. 368 * pm_genpd_poweroff - Remove power from a given PM domain.
365 * @genpd: PM domain to power down. 369 * @genpd: PM domain to power down.
366 * 370 *
@@ -1469,6 +1473,13 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1469 1473
1470 mutex_lock(&genpd->lock); 1474 mutex_lock(&genpd->lock);
1471 1475
1476 if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
1477 pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1478 subdomain->name);
1479 ret = -EBUSY;
1480 goto out;
1481 }
1482
1472 list_for_each_entry(link, &genpd->master_links, master_node) { 1483 list_for_each_entry(link, &genpd->master_links, master_node) {
1473 if (link->slave != subdomain) 1484 if (link->slave != subdomain)
1474 continue; 1485 continue;
@@ -1487,6 +1498,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1487 break; 1498 break;
1488 } 1499 }
1489 1500
1501out:
1490 mutex_unlock(&genpd->lock); 1502 mutex_unlock(&genpd->lock);
1491 1503
1492 return ret; 1504 return ret;
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index eb254497a494..28cd75c535b0 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -341,6 +341,34 @@ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
341EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); 341EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
342 342
343/** 343/**
344 * dev_pm_opp_get_suspend_opp() - Get suspend opp
345 * @dev: device for which we do this operation
346 *
347 * Return: This function returns pointer to the suspend opp if it is
348 * defined and available, otherwise it returns NULL.
349 *
350 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
351 * protected pointer. The reason for the same is that the opp pointer which is
352 * returned will remain valid for use with opp_get_{voltage, freq} only while
353 * under the locked area. The pointer returned must be used prior to unlocking
354 * with rcu_read_unlock() to maintain the integrity of the pointer.
355 */
356struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
357{
358 struct device_opp *dev_opp;
359
360 opp_rcu_lockdep_assert();
361
362 dev_opp = _find_device_opp(dev);
363 if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
364 !dev_opp->suspend_opp->available)
365 return NULL;
366
367 return dev_opp->suspend_opp;
368}
369EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
370
371/**
344 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list 372 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
345 * @dev: device for which we do this operation 373 * @dev: device for which we do this operation
346 * 374 *
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 17269a3b85f2..a295b98c6bae 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -406,6 +406,22 @@ static struct blk_mq_ops null_mq_ops = {
406 .complete = null_softirq_done_fn, 406 .complete = null_softirq_done_fn,
407}; 407};
408 408
409static void cleanup_queue(struct nullb_queue *nq)
410{
411 kfree(nq->tag_map);
412 kfree(nq->cmds);
413}
414
415static void cleanup_queues(struct nullb *nullb)
416{
417 int i;
418
419 for (i = 0; i < nullb->nr_queues; i++)
420 cleanup_queue(&nullb->queues[i]);
421
422 kfree(nullb->queues);
423}
424
409static void null_del_dev(struct nullb *nullb) 425static void null_del_dev(struct nullb *nullb)
410{ 426{
411 list_del_init(&nullb->list); 427 list_del_init(&nullb->list);
@@ -415,6 +431,7 @@ static void null_del_dev(struct nullb *nullb)
415 if (queue_mode == NULL_Q_MQ) 431 if (queue_mode == NULL_Q_MQ)
416 blk_mq_free_tag_set(&nullb->tag_set); 432 blk_mq_free_tag_set(&nullb->tag_set);
417 put_disk(nullb->disk); 433 put_disk(nullb->disk);
434 cleanup_queues(nullb);
418 kfree(nullb); 435 kfree(nullb);
419} 436}
420 437
@@ -459,22 +476,6 @@ static int setup_commands(struct nullb_queue *nq)
459 return 0; 476 return 0;
460} 477}
461 478
462static void cleanup_queue(struct nullb_queue *nq)
463{
464 kfree(nq->tag_map);
465 kfree(nq->cmds);
466}
467
468static void cleanup_queues(struct nullb *nullb)
469{
470 int i;
471
472 for (i = 0; i < nullb->nr_queues; i++)
473 cleanup_queue(&nullb->queues[i]);
474
475 kfree(nullb->queues);
476}
477
478static int setup_queues(struct nullb *nullb) 479static int setup_queues(struct nullb *nullb)
479{ 480{
480 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), 481 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
@@ -588,8 +589,7 @@ static int null_add_dev(void)
588 blk_queue_physical_block_size(nullb->q, bs); 589 blk_queue_physical_block_size(nullb->q, bs);
589 590
590 size = gb * 1024 * 1024 * 1024ULL; 591 size = gb * 1024 * 1024 * 1024ULL;
591 sector_div(size, bs); 592 set_capacity(disk, size >> 9);
592 set_capacity(disk, size);
593 593
594 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; 594 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
595 disk->major = null_major; 595 disk->major = null_major;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 698f761037ce..d93a0372b37b 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4673,7 +4673,10 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4673 } 4673 }
4674 4674
4675 ret = rbd_dev_v2_snap_context(rbd_dev); 4675 ret = rbd_dev_v2_snap_context(rbd_dev);
4676 dout("rbd_dev_v2_snap_context returned %d\n", ret); 4676 if (ret && first_time) {
4677 kfree(rbd_dev->header.object_prefix);
4678 rbd_dev->header.object_prefix = NULL;
4679 }
4677 4680
4678 return ret; 4681 return ret;
4679} 4682}
@@ -5154,7 +5157,6 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
5154out_err: 5157out_err:
5155 if (parent) { 5158 if (parent) {
5156 rbd_dev_unparent(rbd_dev); 5159 rbd_dev_unparent(rbd_dev);
5157 kfree(rbd_dev->header_name);
5158 rbd_dev_destroy(parent); 5160 rbd_dev_destroy(parent);
5159 } else { 5161 } else {
5160 rbd_put_client(rbdc); 5162 rbd_put_client(rbdc);
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 965d1afb0eaa..5cb13ca3a3ac 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -330,12 +330,14 @@ void zcomp_destroy(struct zcomp *comp)
330 * allocate new zcomp and initialize it. return compressing 330 * allocate new zcomp and initialize it. return compressing
331 * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL) 331 * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
332 * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in 332 * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
333 * case of allocation error. 333 * case of allocation error, or any other error potentially
334 * returned by functions zcomp_strm_{multi,single}_create.
334 */ 335 */
335struct zcomp *zcomp_create(const char *compress, int max_strm) 336struct zcomp *zcomp_create(const char *compress, int max_strm)
336{ 337{
337 struct zcomp *comp; 338 struct zcomp *comp;
338 struct zcomp_backend *backend; 339 struct zcomp_backend *backend;
340 int error;
339 341
340 backend = find_backend(compress); 342 backend = find_backend(compress);
341 if (!backend) 343 if (!backend)
@@ -347,12 +349,12 @@ struct zcomp *zcomp_create(const char *compress, int max_strm)
347 349
348 comp->backend = backend; 350 comp->backend = backend;
349 if (max_strm > 1) 351 if (max_strm > 1)
350 zcomp_strm_multi_create(comp, max_strm); 352 error = zcomp_strm_multi_create(comp, max_strm);
351 else 353 else
352 zcomp_strm_single_create(comp); 354 error = zcomp_strm_single_create(comp);
353 if (!comp->stream) { 355 if (error) {
354 kfree(comp); 356 kfree(comp);
355 return ERR_PTR(-ENOMEM); 357 return ERR_PTR(error);
356 } 358 }
357 return comp; 359 return comp;
358} 360}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 43e2c3ad6c31..0ebcf449778a 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2437,7 +2437,8 @@ static int __clk_init(struct device *dev, struct clk *clk_user)
2437 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { 2437 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
2438 if (orphan->num_parents && orphan->ops->get_parent) { 2438 if (orphan->num_parents && orphan->ops->get_parent) {
2439 i = orphan->ops->get_parent(orphan->hw); 2439 i = orphan->ops->get_parent(orphan->hw);
2440 if (!strcmp(core->name, orphan->parent_names[i])) 2440 if (i >= 0 && i < orphan->num_parents &&
2441 !strcmp(core->name, orphan->parent_names[i]))
2441 clk_core_reparent(orphan, core); 2442 clk_core_reparent(orphan, core);
2442 continue; 2443 continue;
2443 } 2444 }
diff --git a/drivers/clk/h8300/clk-h8s2678.c b/drivers/clk/h8300/clk-h8s2678.c
index 2a38eb4a2552..6cf38dc1c929 100644
--- a/drivers/clk/h8300/clk-h8s2678.c
+++ b/drivers/clk/h8300/clk-h8s2678.c
@@ -8,6 +8,7 @@
8#include <linux/err.h> 8#include <linux/err.h>
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/of_address.h> 10#include <linux/of_address.h>
11#include <linux/slab.h>
11 12
12static DEFINE_SPINLOCK(clklock); 13static DEFINE_SPINLOCK(clklock);
13 14
diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig
index 2c16807341dc..e43485448612 100644
--- a/drivers/clk/hisilicon/Kconfig
+++ b/drivers/clk/hisilicon/Kconfig
@@ -1,6 +1,12 @@
1config COMMON_CLK_HI6220 1config COMMON_CLK_HI6220
2 bool "Hi6220 Clock Driver" 2 bool "Hi6220 Clock Driver"
3 depends on (ARCH_HISI || COMPILE_TEST) && MAILBOX 3 depends on ARCH_HISI || COMPILE_TEST
4 default ARCH_HISI 4 default ARCH_HISI
5 help 5 help
6 Build the Hisilicon Hi6220 clock driver based on the common clock framework. 6 Build the Hisilicon Hi6220 clock driver based on the common clock framework.
7
8config STUB_CLK_HI6220
9 bool "Hi6220 Stub Clock Driver"
10 depends on COMMON_CLK_HI6220 && MAILBOX
11 help
12 Build the Hisilicon Hi6220 stub clock driver.
diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile
index 4a1001a11f04..74dba31590f9 100644
--- a/drivers/clk/hisilicon/Makefile
+++ b/drivers/clk/hisilicon/Makefile
@@ -7,4 +7,5 @@ obj-y += clk.o clkgate-separated.o clkdivider-hi6220.o
7obj-$(CONFIG_ARCH_HI3xxx) += clk-hi3620.o 7obj-$(CONFIG_ARCH_HI3xxx) += clk-hi3620.o
8obj-$(CONFIG_ARCH_HIP04) += clk-hip04.o 8obj-$(CONFIG_ARCH_HIP04) += clk-hip04.o
9obj-$(CONFIG_ARCH_HIX5HD2) += clk-hix5hd2.o 9obj-$(CONFIG_ARCH_HIX5HD2) += clk-hix5hd2.o
10obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o clk-hi6220-stub.o 10obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o
11obj-$(CONFIG_STUB_CLK_HI6220) += clk-hi6220-stub.o
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index ed02bbc7b11f..abb47608713b 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -716,6 +716,8 @@ static const char *const rk3188_critical_clocks[] __initconst = {
716 "aclk_cpu", 716 "aclk_cpu",
717 "aclk_peri", 717 "aclk_peri",
718 "hclk_peri", 718 "hclk_peri",
719 "pclk_cpu",
720 "pclk_peri",
719}; 721};
720 722
721static void __init rk3188_common_clk_init(struct device_node *np) 723static void __init rk3188_common_clk_init(struct device_node *np)
@@ -744,8 +746,6 @@ static void __init rk3188_common_clk_init(struct device_node *np)
744 746
745 rockchip_clk_register_branches(common_clk_branches, 747 rockchip_clk_register_branches(common_clk_branches,
746 ARRAY_SIZE(common_clk_branches)); 748 ARRAY_SIZE(common_clk_branches));
747 rockchip_clk_protect_critical(rk3188_critical_clocks,
748 ARRAY_SIZE(rk3188_critical_clocks));
749 749
750 rockchip_register_softrst(np, 9, reg_base + RK2928_SOFTRST_CON(0), 750 rockchip_register_softrst(np, 9, reg_base + RK2928_SOFTRST_CON(0),
751 ROCKCHIP_SOFTRST_HIWORD_MASK); 751 ROCKCHIP_SOFTRST_HIWORD_MASK);
@@ -765,6 +765,8 @@ static void __init rk3066a_clk_init(struct device_node *np)
765 mux_armclk_p, ARRAY_SIZE(mux_armclk_p), 765 mux_armclk_p, ARRAY_SIZE(mux_armclk_p),
766 &rk3066_cpuclk_data, rk3066_cpuclk_rates, 766 &rk3066_cpuclk_data, rk3066_cpuclk_rates,
767 ARRAY_SIZE(rk3066_cpuclk_rates)); 767 ARRAY_SIZE(rk3066_cpuclk_rates));
768 rockchip_clk_protect_critical(rk3188_critical_clocks,
769 ARRAY_SIZE(rk3188_critical_clocks));
768} 770}
769CLK_OF_DECLARE(rk3066a_cru, "rockchip,rk3066a-cru", rk3066a_clk_init); 771CLK_OF_DECLARE(rk3066a_cru, "rockchip,rk3066a-cru", rk3066a_clk_init);
770 772
@@ -801,6 +803,9 @@ static void __init rk3188a_clk_init(struct device_node *np)
801 pr_warn("%s: missing clocks to reparent aclk_cpu_pre to gpll\n", 803 pr_warn("%s: missing clocks to reparent aclk_cpu_pre to gpll\n",
802 __func__); 804 __func__);
803 } 805 }
806
807 rockchip_clk_protect_critical(rk3188_critical_clocks,
808 ARRAY_SIZE(rk3188_critical_clocks));
804} 809}
805CLK_OF_DECLARE(rk3188a_cru, "rockchip,rk3188a-cru", rk3188a_clk_init); 810CLK_OF_DECLARE(rk3188a_cru, "rockchip,rk3188a-cru", rk3188a_clk_init);
806 811
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 9c5d61e698ef..7e6b783e6eee 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -818,6 +818,10 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
818 GATE(0, "sclk_timer00", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 0, GFLAGS), 818 GATE(0, "sclk_timer00", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 0, GFLAGS),
819}; 819};
820 820
821static const char *const rk3368_critical_clocks[] __initconst = {
822 "pclk_pd_pmu",
823};
824
821static void __init rk3368_clk_init(struct device_node *np) 825static void __init rk3368_clk_init(struct device_node *np)
822{ 826{
823 void __iomem *reg_base; 827 void __iomem *reg_base;
@@ -862,6 +866,8 @@ static void __init rk3368_clk_init(struct device_node *np)
862 RK3368_GRF_SOC_STATUS0); 866 RK3368_GRF_SOC_STATUS0);
863 rockchip_clk_register_branches(rk3368_clk_branches, 867 rockchip_clk_register_branches(rk3368_clk_branches,
864 ARRAY_SIZE(rk3368_clk_branches)); 868 ARRAY_SIZE(rk3368_clk_branches));
869 rockchip_clk_protect_critical(rk3368_critical_clocks,
870 ARRAY_SIZE(rk3368_critical_clocks));
865 871
866 rockchip_clk_register_armclk(ARMCLKB, "armclkb", 872 rockchip_clk_register_armclk(ARMCLKB, "armclkb",
867 mux_armclkb_p, ARRAY_SIZE(mux_armclkb_p), 873 mux_armclkb_p, ARRAY_SIZE(mux_armclkb_p),
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index 83ccf142ff2a..576cd0354d48 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -307,7 +307,7 @@ static const struct clkgen_quadfs_data st_fs660c32_F_416 = {
307 .get_rate = clk_fs660c32_dig_get_rate, 307 .get_rate = clk_fs660c32_dig_get_rate,
308}; 308};
309 309
310static const struct clkgen_quadfs_data st_fs660c32_C_407 = { 310static const struct clkgen_quadfs_data st_fs660c32_C = {
311 .nrst_present = true, 311 .nrst_present = true,
312 .nrst = { CLKGEN_FIELD(0x2f0, 0x1, 0), 312 .nrst = { CLKGEN_FIELD(0x2f0, 0x1, 0),
313 CLKGEN_FIELD(0x2f0, 0x1, 1), 313 CLKGEN_FIELD(0x2f0, 0x1, 1),
@@ -350,7 +350,7 @@ static const struct clkgen_quadfs_data st_fs660c32_C_407 = {
350 .get_rate = clk_fs660c32_dig_get_rate, 350 .get_rate = clk_fs660c32_dig_get_rate,
351}; 351};
352 352
353static const struct clkgen_quadfs_data st_fs660c32_D_407 = { 353static const struct clkgen_quadfs_data st_fs660c32_D = {
354 .nrst_present = true, 354 .nrst_present = true,
355 .nrst = { CLKGEN_FIELD(0x2a0, 0x1, 0), 355 .nrst = { CLKGEN_FIELD(0x2a0, 0x1, 0),
356 CLKGEN_FIELD(0x2a0, 0x1, 1), 356 CLKGEN_FIELD(0x2a0, 0x1, 1),
@@ -1077,11 +1077,11 @@ static const struct of_device_id quadfs_of_match[] = {
1077 }, 1077 },
1078 { 1078 {
1079 .compatible = "st,stih407-quadfs660-C", 1079 .compatible = "st,stih407-quadfs660-C",
1080 .data = &st_fs660c32_C_407 1080 .data = &st_fs660c32_C
1081 }, 1081 },
1082 { 1082 {
1083 .compatible = "st,stih407-quadfs660-D", 1083 .compatible = "st,stih407-quadfs660-D",
1084 .data = &st_fs660c32_D_407 1084 .data = &st_fs660c32_D
1085 }, 1085 },
1086 {} 1086 {}
1087}; 1087};
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index 47a38a994cac..b2a332cf8985 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -193,7 +193,7 @@ static const struct clkgen_pll_data st_pll3200c32_407_a0 = {
193 .ops = &stm_pll3200c32_ops, 193 .ops = &stm_pll3200c32_ops,
194}; 194};
195 195
196static const struct clkgen_pll_data st_pll3200c32_407_c0_0 = { 196static const struct clkgen_pll_data st_pll3200c32_cx_0 = {
197 /* 407 C0 PLL0 */ 197 /* 407 C0 PLL0 */
198 .pdn_status = CLKGEN_FIELD(0x2a0, 0x1, 8), 198 .pdn_status = CLKGEN_FIELD(0x2a0, 0x1, 8),
199 .locked_status = CLKGEN_FIELD(0x2a0, 0x1, 24), 199 .locked_status = CLKGEN_FIELD(0x2a0, 0x1, 24),
@@ -205,7 +205,7 @@ static const struct clkgen_pll_data st_pll3200c32_407_c0_0 = {
205 .ops = &stm_pll3200c32_ops, 205 .ops = &stm_pll3200c32_ops,
206}; 206};
207 207
208static const struct clkgen_pll_data st_pll3200c32_407_c0_1 = { 208static const struct clkgen_pll_data st_pll3200c32_cx_1 = {
209 /* 407 C0 PLL1 */ 209 /* 407 C0 PLL1 */
210 .pdn_status = CLKGEN_FIELD(0x2c8, 0x1, 8), 210 .pdn_status = CLKGEN_FIELD(0x2c8, 0x1, 8),
211 .locked_status = CLKGEN_FIELD(0x2c8, 0x1, 24), 211 .locked_status = CLKGEN_FIELD(0x2c8, 0x1, 24),
@@ -624,12 +624,12 @@ static const struct of_device_id c32_pll_of_match[] = {
624 .data = &st_pll3200c32_407_a0, 624 .data = &st_pll3200c32_407_a0,
625 }, 625 },
626 { 626 {
627 .compatible = "st,stih407-plls-c32-c0_0", 627 .compatible = "st,plls-c32-cx_0",
628 .data = &st_pll3200c32_407_c0_0, 628 .data = &st_pll3200c32_cx_0,
629 }, 629 },
630 { 630 {
631 .compatible = "st,stih407-plls-c32-c0_1", 631 .compatible = "st,plls-c32-cx_1",
632 .data = &st_pll3200c32_407_c0_1, 632 .data = &st_pll3200c32_cx_1,
633 }, 633 },
634 { 634 {
635 .compatible = "st,stih407-plls-c32-a9", 635 .compatible = "st,stih407-plls-c32-a9",
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index c2ff859ee0e8..c4e3a52e225b 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -682,11 +682,17 @@ static int find_lut_index_for_rate(struct tegra_dfll *td, unsigned long rate)
682 struct dev_pm_opp *opp; 682 struct dev_pm_opp *opp;
683 int i, uv; 683 int i, uv;
684 684
685 rcu_read_lock();
686
685 opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate); 687 opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
686 if (IS_ERR(opp)) 688 if (IS_ERR(opp)) {
689 rcu_read_unlock();
687 return PTR_ERR(opp); 690 return PTR_ERR(opp);
691 }
688 uv = dev_pm_opp_get_voltage(opp); 692 uv = dev_pm_opp_get_voltage(opp);
689 693
694 rcu_read_unlock();
695
690 for (i = 0; i < td->i2c_lut_size; i++) { 696 for (i = 0; i < td->i2c_lut_size; i++) {
691 if (regulator_list_voltage(td->vdd_reg, td->i2c_lut[i]) == uv) 697 if (regulator_list_voltage(td->vdd_reg, td->i2c_lut[i]) == uv)
692 return i; 698 return i;
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 5f498d9f1825..cd0391e46c6d 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -84,6 +84,7 @@ config ARM_KIRKWOOD_CPUFREQ
84config ARM_MT8173_CPUFREQ 84config ARM_MT8173_CPUFREQ
85 bool "Mediatek MT8173 CPUFreq support" 85 bool "Mediatek MT8173 CPUFreq support"
86 depends on ARCH_MEDIATEK && REGULATOR 86 depends on ARCH_MEDIATEK && REGULATOR
87 depends on !CPU_THERMAL || THERMAL=y
87 select PM_OPP 88 select PM_OPP
88 help 89 help
89 This adds the CPUFreq driver support for Mediatek MT8173 SoC. 90 This adds the CPUFreq driver support for Mediatek MT8173 SoC.
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 15b921a9248c..798277227de7 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -375,12 +375,11 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
375 375
376 pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); 376 pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
377 377
378 policy = cpufreq_cpu_get(cpu); 378 policy = cpufreq_cpu_get_raw(cpu);
379 if (unlikely(!policy)) 379 if (unlikely(!policy))
380 return 0; 380 return 0;
381 381
382 data = policy->driver_data; 382 data = policy->driver_data;
383 cpufreq_cpu_put(policy);
384 if (unlikely(!data || !data->freq_table)) 383 if (unlikely(!data || !data->freq_table))
385 return 0; 384 return 0;
386 385
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index c3583cdfadbd..7c0d70e2a861 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -196,6 +196,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
196 struct device *cpu_dev; 196 struct device *cpu_dev;
197 struct regulator *cpu_reg; 197 struct regulator *cpu_reg;
198 struct clk *cpu_clk; 198 struct clk *cpu_clk;
199 struct dev_pm_opp *suspend_opp;
199 unsigned long min_uV = ~0, max_uV = 0; 200 unsigned long min_uV = ~0, max_uV = 0;
200 unsigned int transition_latency; 201 unsigned int transition_latency;
201 bool need_update = false; 202 bool need_update = false;
@@ -239,6 +240,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
239 */ 240 */
240 of_cpumask_init_opp_table(policy->cpus); 241 of_cpumask_init_opp_table(policy->cpus);
241 242
243 /*
244 * But we need OPP table to function so if it is not there let's
245 * give platform code chance to provide it for us.
246 */
247 ret = dev_pm_opp_get_opp_count(cpu_dev);
248 if (ret <= 0) {
249 pr_debug("OPP table is not ready, deferring probe\n");
250 ret = -EPROBE_DEFER;
251 goto out_free_opp;
252 }
253
242 if (need_update) { 254 if (need_update) {
243 struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data(); 255 struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
244 256
@@ -249,24 +261,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
249 * OPP tables are initialized only for policy->cpu, do it for 261 * OPP tables are initialized only for policy->cpu, do it for
250 * others as well. 262 * others as well.
251 */ 263 */
252 set_cpus_sharing_opps(cpu_dev, policy->cpus); 264 ret = set_cpus_sharing_opps(cpu_dev, policy->cpus);
265 if (ret)
266 dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
267 __func__, ret);
253 268
254 of_property_read_u32(np, "clock-latency", &transition_latency); 269 of_property_read_u32(np, "clock-latency", &transition_latency);
255 } else { 270 } else {
256 transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev); 271 transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
257 } 272 }
258 273
259 /*
260 * But we need OPP table to function so if it is not there let's
261 * give platform code chance to provide it for us.
262 */
263 ret = dev_pm_opp_get_opp_count(cpu_dev);
264 if (ret <= 0) {
265 pr_debug("OPP table is not ready, deferring probe\n");
266 ret = -EPROBE_DEFER;
267 goto out_free_opp;
268 }
269
270 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 274 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
271 if (!priv) { 275 if (!priv) {
272 ret = -ENOMEM; 276 ret = -ENOMEM;
@@ -300,7 +304,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
300 rcu_read_unlock(); 304 rcu_read_unlock();
301 305
302 tol_uV = opp_uV * priv->voltage_tolerance / 100; 306 tol_uV = opp_uV * priv->voltage_tolerance / 100;
303 if (regulator_is_supported_voltage(cpu_reg, opp_uV, 307 if (regulator_is_supported_voltage(cpu_reg,
308 opp_uV - tol_uV,
304 opp_uV + tol_uV)) { 309 opp_uV + tol_uV)) {
305 if (opp_uV < min_uV) 310 if (opp_uV < min_uV)
306 min_uV = opp_uV; 311 min_uV = opp_uV;
@@ -329,6 +334,13 @@ static int cpufreq_init(struct cpufreq_policy *policy)
329 policy->driver_data = priv; 334 policy->driver_data = priv;
330 335
331 policy->clk = cpu_clk; 336 policy->clk = cpu_clk;
337
338 rcu_read_lock();
339 suspend_opp = dev_pm_opp_get_suspend_opp(cpu_dev);
340 if (suspend_opp)
341 policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000;
342 rcu_read_unlock();
343
332 ret = cpufreq_table_validate_and_show(policy, freq_table); 344 ret = cpufreq_table_validate_and_show(policy, freq_table);
333 if (ret) { 345 if (ret) {
334 dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, 346 dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
@@ -419,6 +431,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
419 .ready = cpufreq_ready, 431 .ready = cpufreq_ready,
420 .name = "cpufreq-dt", 432 .name = "cpufreq-dt",
421 .attr = cpufreq_dt_attr, 433 .attr = cpufreq_dt_attr,
434 .suspend = cpufreq_generic_suspend,
422}; 435};
423 436
424static int dt_cpufreq_probe(struct platform_device *pdev) 437static int dt_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b3d9368339af..ef5ed9470de9 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -238,13 +238,13 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
238} 238}
239EXPORT_SYMBOL_GPL(cpufreq_generic_init); 239EXPORT_SYMBOL_GPL(cpufreq_generic_init);
240 240
241/* Only for cpufreq core internal use */
242struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) 241struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
243{ 242{
244 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 243 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
245 244
246 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; 245 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
247} 246}
247EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
248 248
249unsigned int cpufreq_generic_get(unsigned int cpu) 249unsigned int cpufreq_generic_get(unsigned int cpu)
250{ 250{
@@ -1626,8 +1626,8 @@ int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1626 int ret; 1626 int ret;
1627 1627
1628 if (!policy->suspend_freq) { 1628 if (!policy->suspend_freq) {
1629 pr_err("%s: suspend_freq can't be zero\n", __func__); 1629 pr_debug("%s: suspend_freq not defined\n", __func__);
1630 return -EINVAL; 1630 return 0;
1631 } 1631 }
1632 1632
1633 pr_debug("%s: Setting suspend-freq: %u\n", __func__, 1633 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
@@ -2031,8 +2031,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
2031 if (!try_module_get(policy->governor->owner)) 2031 if (!try_module_get(policy->governor->owner))
2032 return -EINVAL; 2032 return -EINVAL;
2033 2033
2034 pr_debug("__cpufreq_governor for CPU %u, event %u\n", 2034 pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
2035 policy->cpu, event);
2036 2035
2037 mutex_lock(&cpufreq_governor_lock); 2036 mutex_lock(&cpufreq_governor_lock);
2038 if ((policy->governor_enabled && event == CPUFREQ_GOV_START) 2037 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index cddc61939a86..3af9dd7332e6 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -260,24 +260,31 @@ static inline void update_turbo_state(void)
260 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 260 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
261} 261}
262 262
263#define PCT_TO_HWP(x) (x * 255 / 100)
264static void intel_pstate_hwp_set(void) 263static void intel_pstate_hwp_set(void)
265{ 264{
266 int min, max, cpu; 265 int min, hw_min, max, hw_max, cpu, range, adj_range;
267 u64 value, freq; 266 u64 value, cap;
267
268 rdmsrl(MSR_HWP_CAPABILITIES, cap);
269 hw_min = HWP_LOWEST_PERF(cap);
270 hw_max = HWP_HIGHEST_PERF(cap);
271 range = hw_max - hw_min;
268 272
269 get_online_cpus(); 273 get_online_cpus();
270 274
271 for_each_online_cpu(cpu) { 275 for_each_online_cpu(cpu) {
272 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 276 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
273 min = PCT_TO_HWP(limits.min_perf_pct); 277 adj_range = limits.min_perf_pct * range / 100;
278 min = hw_min + adj_range;
274 value &= ~HWP_MIN_PERF(~0L); 279 value &= ~HWP_MIN_PERF(~0L);
275 value |= HWP_MIN_PERF(min); 280 value |= HWP_MIN_PERF(min);
276 281
277 max = PCT_TO_HWP(limits.max_perf_pct); 282 adj_range = limits.max_perf_pct * range / 100;
283 max = hw_min + adj_range;
278 if (limits.no_turbo) { 284 if (limits.no_turbo) {
279 rdmsrl( MSR_HWP_CAPABILITIES, freq); 285 hw_max = HWP_GUARANTEED_PERF(cap);
280 max = HWP_GUARANTEED_PERF(freq); 286 if (hw_max < max)
287 max = hw_max;
281 } 288 }
282 289
283 value &= ~HWP_MAX_PERF(~0L); 290 value &= ~HWP_MAX_PERF(~0L);
@@ -423,6 +430,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
423 430
424 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); 431 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
425 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 432 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
433 limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
434 limits.max_perf_pct = max(limits.min_perf_pct, limits.max_perf_pct);
426 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 435 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
427 436
428 if (hwp_active) 437 if (hwp_active)
@@ -442,6 +451,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
442 451
443 limits.min_sysfs_pct = clamp_t(int, input, 0 , 100); 452 limits.min_sysfs_pct = clamp_t(int, input, 0 , 100);
444 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); 453 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
454 limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
455 limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
445 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 456 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
446 457
447 if (hwp_active) 458 if (hwp_active)
@@ -989,12 +1000,19 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
989 1000
990 limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1001 limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
991 limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100); 1002 limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100);
992 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
993 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
994
995 limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; 1003 limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
996 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); 1004 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
1005
1006 /* Normalize user input to [min_policy_pct, max_policy_pct] */
1007 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
1008 limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
997 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 1009 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
1010 limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
1011
1012 /* Make sure min_perf_pct <= max_perf_pct */
1013 limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
1014
1015 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
998 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 1016 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
999 1017
1000 if (hwp_active) 1018 if (hwp_active)
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 1523e2d745eb..344058f8501a 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -187,6 +187,28 @@ bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
187} 187}
188 188
189/** 189/**
190 * cpuidle_coupled_state_verify - check if the coupled states are correctly set.
191 * @drv: struct cpuidle_driver for the platform
192 *
193 * Returns 0 for valid state values, a negative error code otherwise:
194 * * -EINVAL if any coupled state(safe_state_index) is wrongly set.
195 */
196int cpuidle_coupled_state_verify(struct cpuidle_driver *drv)
197{
198 int i;
199
200 for (i = drv->state_count - 1; i >= 0; i--) {
201 if (cpuidle_state_is_coupled(drv, i) &&
202 (drv->safe_state_index == i ||
203 drv->safe_state_index < 0 ||
204 drv->safe_state_index >= drv->state_count))
205 return -EINVAL;
206 }
207
208 return 0;
209}
210
211/**
190 * cpuidle_coupled_set_ready - mark a cpu as ready 212 * cpuidle_coupled_set_ready - mark a cpu as ready
191 * @coupled: the struct coupled that contains the current cpu 213 * @coupled: the struct coupled that contains the current cpu
192 */ 214 */
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 178c5ad3d568..f87f399b0540 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -35,6 +35,7 @@ extern void cpuidle_remove_sysfs(struct cpuidle_device *dev);
35 35
36#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 36#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
37bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state); 37bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state);
38int cpuidle_coupled_state_verify(struct cpuidle_driver *drv);
38int cpuidle_enter_state_coupled(struct cpuidle_device *dev, 39int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
39 struct cpuidle_driver *drv, int next_state); 40 struct cpuidle_driver *drv, int next_state);
40int cpuidle_coupled_register_device(struct cpuidle_device *dev); 41int cpuidle_coupled_register_device(struct cpuidle_device *dev);
@@ -46,6 +47,11 @@ bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
46 return false; 47 return false;
47} 48}
48 49
50static inline int cpuidle_coupled_state_verify(struct cpuidle_driver *drv)
51{
52 return 0;
53}
54
49static inline int cpuidle_enter_state_coupled(struct cpuidle_device *dev, 55static inline int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
50 struct cpuidle_driver *drv, int next_state) 56 struct cpuidle_driver *drv, int next_state)
51{ 57{
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 5db147859b90..389ade4572be 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -227,6 +227,10 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
227 if (!drv || !drv->state_count) 227 if (!drv || !drv->state_count)
228 return -EINVAL; 228 return -EINVAL;
229 229
230 ret = cpuidle_coupled_state_verify(drv);
231 if (ret)
232 return ret;
233
230 if (cpuidle_disabled()) 234 if (cpuidle_disabled())
231 return -ENODEV; 235 return -ENODEV;
232 236
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 07bc7aa6b224..d234719065a5 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -461,7 +461,7 @@ config CRYPTO_DEV_QCE
461 461
462config CRYPTO_DEV_VMX 462config CRYPTO_DEV_VMX
463 bool "Support for VMX cryptographic acceleration instructions" 463 bool "Support for VMX cryptographic acceleration instructions"
464 depends on PPC64 464 depends on PPC64 && VSX
465 help 465 help
466 Support for VMX cryptographic acceleration instructions. 466 Support for VMX cryptographic acceleration instructions.
467 467
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index e070c316e8b7..a19ee127edca 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -104,7 +104,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
104 sg_miter_next(&mo); 104 sg_miter_next(&mo);
105 oo = 0; 105 oo = 0;
106 } 106 }
107 } while (mo.length > 0); 107 } while (oleft > 0);
108 108
109 if (areq->info) { 109 if (areq->info) {
110 for (i = 0; i < 4 && i < ivsize / 4; i++) { 110 for (i = 0; i < 4 && i < ivsize / 4; i++) {
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index ca1b362d77e2..3927ed9fdbd5 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -53,7 +53,7 @@ static struct devfreq *find_device_devfreq(struct device *dev)
53{ 53{
54 struct devfreq *tmp_devfreq; 54 struct devfreq *tmp_devfreq;
55 55
56 if (unlikely(IS_ERR_OR_NULL(dev))) { 56 if (IS_ERR_OR_NULL(dev)) {
57 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); 57 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
58 return ERR_PTR(-EINVAL); 58 return ERR_PTR(-EINVAL);
59 } 59 }
@@ -133,7 +133,7 @@ static struct devfreq_governor *find_devfreq_governor(const char *name)
133{ 133{
134 struct devfreq_governor *tmp_governor; 134 struct devfreq_governor *tmp_governor;
135 135
136 if (unlikely(IS_ERR_OR_NULL(name))) { 136 if (IS_ERR_OR_NULL(name)) {
137 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); 137 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
138 return ERR_PTR(-EINVAL); 138 return ERR_PTR(-EINVAL);
139 } 139 }
@@ -177,10 +177,10 @@ int update_devfreq(struct devfreq *devfreq)
177 return err; 177 return err;
178 178
179 /* 179 /*
180 * Adjust the freuqency with user freq and QoS. 180 * Adjust the frequency with user freq and QoS.
181 * 181 *
182 * List from the highest proiority 182 * List from the highest priority
183 * max_freq (probably called by thermal when it's too hot) 183 * max_freq
184 * min_freq 184 * min_freq
185 */ 185 */
186 186
@@ -482,7 +482,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
482 devfreq->profile->max_state * 482 devfreq->profile->max_state *
483 devfreq->profile->max_state, 483 devfreq->profile->max_state,
484 GFP_KERNEL); 484 GFP_KERNEL);
485 devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) * 485 devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) *
486 devfreq->profile->max_state, 486 devfreq->profile->max_state,
487 GFP_KERNEL); 487 GFP_KERNEL);
488 devfreq->last_stat_updated = jiffies; 488 devfreq->last_stat_updated = jiffies;
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index f9901f52a225..f312485f1451 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -319,7 +319,8 @@ static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev,
319 case PPMU_PMNCNT3: 319 case PPMU_PMNCNT3:
320 pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH); 320 pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH);
321 pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW); 321 pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW);
322 load_count = (u64)((pmcnt_high & 0xff) << 32) + (u64)pmcnt_low; 322 load_count = ((u64)((pmcnt_high & 0xff)) << 32)
323 + (u64)pmcnt_low;
323 break; 324 break;
324 } 325 }
325 edata->load_count = load_count; 326 edata->load_count = load_count;
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index 0720ba84ca92..ae72ba5e78df 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -21,17 +21,20 @@
21static int devfreq_simple_ondemand_func(struct devfreq *df, 21static int devfreq_simple_ondemand_func(struct devfreq *df,
22 unsigned long *freq) 22 unsigned long *freq)
23{ 23{
24 struct devfreq_dev_status stat; 24 int err;
25 int err = df->profile->get_dev_status(df->dev.parent, &stat); 25 struct devfreq_dev_status *stat;
26 unsigned long long a, b; 26 unsigned long long a, b;
27 unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD; 27 unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD;
28 unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL; 28 unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
29 struct devfreq_simple_ondemand_data *data = df->data; 29 struct devfreq_simple_ondemand_data *data = df->data;
30 unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX; 30 unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX;
31 31
32 err = devfreq_update_stats(df);
32 if (err) 33 if (err)
33 return err; 34 return err;
34 35
36 stat = &df->last_status;
37
35 if (data) { 38 if (data) {
36 if (data->upthreshold) 39 if (data->upthreshold)
37 dfso_upthreshold = data->upthreshold; 40 dfso_upthreshold = data->upthreshold;
@@ -43,41 +46,41 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
43 return -EINVAL; 46 return -EINVAL;
44 47
45 /* Assume MAX if it is going to be divided by zero */ 48 /* Assume MAX if it is going to be divided by zero */
46 if (stat.total_time == 0) { 49 if (stat->total_time == 0) {
47 *freq = max; 50 *freq = max;
48 return 0; 51 return 0;
49 } 52 }
50 53
51 /* Prevent overflow */ 54 /* Prevent overflow */
52 if (stat.busy_time >= (1 << 24) || stat.total_time >= (1 << 24)) { 55 if (stat->busy_time >= (1 << 24) || stat->total_time >= (1 << 24)) {
53 stat.busy_time >>= 7; 56 stat->busy_time >>= 7;
54 stat.total_time >>= 7; 57 stat->total_time >>= 7;
55 } 58 }
56 59
57 /* Set MAX if it's busy enough */ 60 /* Set MAX if it's busy enough */
58 if (stat.busy_time * 100 > 61 if (stat->busy_time * 100 >
59 stat.total_time * dfso_upthreshold) { 62 stat->total_time * dfso_upthreshold) {
60 *freq = max; 63 *freq = max;
61 return 0; 64 return 0;
62 } 65 }
63 66
64 /* Set MAX if we do not know the initial frequency */ 67 /* Set MAX if we do not know the initial frequency */
65 if (stat.current_frequency == 0) { 68 if (stat->current_frequency == 0) {
66 *freq = max; 69 *freq = max;
67 return 0; 70 return 0;
68 } 71 }
69 72
70 /* Keep the current frequency */ 73 /* Keep the current frequency */
71 if (stat.busy_time * 100 > 74 if (stat->busy_time * 100 >
72 stat.total_time * (dfso_upthreshold - dfso_downdifferential)) { 75 stat->total_time * (dfso_upthreshold - dfso_downdifferential)) {
73 *freq = stat.current_frequency; 76 *freq = stat->current_frequency;
74 return 0; 77 return 0;
75 } 78 }
76 79
77 /* Set the desired frequency based on the load */ 80 /* Set the desired frequency based on the load */
78 a = stat.busy_time; 81 a = stat->busy_time;
79 a *= stat.current_frequency; 82 a *= stat->current_frequency;
80 b = div_u64(a, stat.total_time); 83 b = div_u64(a, stat->total_time);
81 b *= 100; 84 b *= 100;
82 b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2)); 85 b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2));
83 *freq = (unsigned long) b; 86 *freq = (unsigned long) b;
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index 13a1a6e8108c..848b93ee930f 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -541,18 +541,20 @@ static struct devfreq_dev_profile tegra_devfreq_profile = {
541static int tegra_governor_get_target(struct devfreq *devfreq, 541static int tegra_governor_get_target(struct devfreq *devfreq,
542 unsigned long *freq) 542 unsigned long *freq)
543{ 543{
544 struct devfreq_dev_status stat; 544 struct devfreq_dev_status *stat;
545 struct tegra_devfreq *tegra; 545 struct tegra_devfreq *tegra;
546 struct tegra_devfreq_device *dev; 546 struct tegra_devfreq_device *dev;
547 unsigned long target_freq = 0; 547 unsigned long target_freq = 0;
548 unsigned int i; 548 unsigned int i;
549 int err; 549 int err;
550 550
551 err = devfreq->profile->get_dev_status(devfreq->dev.parent, &stat); 551 err = devfreq_update_stats(devfreq);
552 if (err) 552 if (err)
553 return err; 553 return err;
554 554
555 tegra = stat.private_data; 555 stat = &devfreq->last_status;
556
557 tegra = stat->private_data;
556 558
557 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { 559 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
558 dev = &tegra->devices[i]; 560 dev = &tegra->devices[i];
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index 4768a829253a..2bf37e68ad0f 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -266,7 +266,7 @@ int ipu_irq_unmap(unsigned int source)
266} 266}
267 267
268/* Chained IRQ handler for IPU function and error interrupt */ 268/* Chained IRQ handler for IPU function and error interrupt */
269static void ipu_irq_handler(unsigned int __irq, struct irq_desc *desc) 269static void ipu_irq_handler(struct irq_desc *desc)
270{ 270{
271 struct ipu *ipu = irq_desc_get_handler_data(desc); 271 struct ipu *ipu = irq_desc_get_handler_data(desc);
272 u32 status; 272 u32 status;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index ca7831168298..cf1268ddef0c 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -280,6 +280,7 @@ struct sbridge_info {
280 u8 max_interleave; 280 u8 max_interleave;
281 u8 (*get_node_id)(struct sbridge_pvt *pvt); 281 u8 (*get_node_id)(struct sbridge_pvt *pvt);
282 enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt); 282 enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt);
283 enum dev_type (*get_width)(struct sbridge_pvt *pvt, u32 mtr);
283 struct pci_dev *pci_vtd; 284 struct pci_dev *pci_vtd;
284}; 285};
285 286
@@ -471,6 +472,9 @@ static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
471#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c 472#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
472#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d 473#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
473#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd 474#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
475#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1 0x2fbf
476#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2 0x2fb9
477#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb
474static const struct pci_id_descr pci_dev_descr_haswell[] = { 478static const struct pci_id_descr pci_dev_descr_haswell[] = {
475 /* first item must be the HA */ 479 /* first item must be the HA */
476 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0) }, 480 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0) },
@@ -488,6 +492,9 @@ static const struct pci_id_descr pci_dev_descr_haswell[] = {
488 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1) }, 492 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1) },
489 493
490 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1) }, 494 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1) },
495 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1, 1) },
496 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2, 1) },
497 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3, 1) },
491 498
492 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1) }, 499 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1) },
493 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL, 1) }, 500 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL, 1) },
@@ -762,6 +769,49 @@ out:
762 return mtype; 769 return mtype;
763} 770}
764 771
772static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
773{
774 /* there's no way to figure out */
775 return DEV_UNKNOWN;
776}
777
778static enum dev_type __ibridge_get_width(u32 mtr)
779{
780 enum dev_type type;
781
782 switch (mtr) {
783 case 3:
784 type = DEV_UNKNOWN;
785 break;
786 case 2:
787 type = DEV_X16;
788 break;
789 case 1:
790 type = DEV_X8;
791 break;
792 case 0:
793 type = DEV_X4;
794 break;
795 }
796
797 return type;
798}
799
800static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
801{
802 /*
803 * ddr3_width on the documentation but also valid for DDR4 on
804 * Haswell
805 */
806 return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8));
807}
808
809static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr)
810{
811 /* ddr3_width on the documentation but also valid for DDR4 */
812 return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9));
813}
814
765static u8 get_node_id(struct sbridge_pvt *pvt) 815static u8 get_node_id(struct sbridge_pvt *pvt)
766{ 816{
767 u32 reg; 817 u32 reg;
@@ -966,17 +1016,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
966 1016
967 dimm->nr_pages = npages; 1017 dimm->nr_pages = npages;
968 dimm->grain = 32; 1018 dimm->grain = 32;
969 switch (banks) { 1019 dimm->dtype = pvt->info.get_width(pvt, mtr);
970 case 16:
971 dimm->dtype = DEV_X16;
972 break;
973 case 8:
974 dimm->dtype = DEV_X8;
975 break;
976 case 4:
977 dimm->dtype = DEV_X4;
978 break;
979 }
980 dimm->mtype = mtype; 1020 dimm->mtype = mtype;
981 dimm->edac_mode = mode; 1021 dimm->edac_mode = mode;
982 snprintf(dimm->label, sizeof(dimm->label), 1022 snprintf(dimm->label, sizeof(dimm->label),
@@ -1869,7 +1909,11 @@ static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
1869 } 1909 }
1870 break; 1910 break;
1871 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0: 1911 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
1872 pvt->pci_ddrio = pdev; 1912 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1:
1913 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2:
1914 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3:
1915 if (!pvt->pci_ddrio)
1916 pvt->pci_ddrio = pdev;
1873 break; 1917 break;
1874 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1: 1918 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
1875 pvt->pci_ha1 = pdev; 1919 pvt->pci_ha1 = pdev;
@@ -2361,6 +2405,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
2361 pvt->info.interleave_list = ibridge_interleave_list; 2405 pvt->info.interleave_list = ibridge_interleave_list;
2362 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); 2406 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
2363 pvt->info.interleave_pkg = ibridge_interleave_pkg; 2407 pvt->info.interleave_pkg = ibridge_interleave_pkg;
2408 pvt->info.get_width = ibridge_get_width;
2364 mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx); 2409 mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx);
2365 2410
2366 /* Store pci devices at mci for faster access */ 2411 /* Store pci devices at mci for faster access */
@@ -2380,6 +2425,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
2380 pvt->info.interleave_list = sbridge_interleave_list; 2425 pvt->info.interleave_list = sbridge_interleave_list;
2381 pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list); 2426 pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list);
2382 pvt->info.interleave_pkg = sbridge_interleave_pkg; 2427 pvt->info.interleave_pkg = sbridge_interleave_pkg;
2428 pvt->info.get_width = sbridge_get_width;
2383 mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx); 2429 mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx);
2384 2430
2385 /* Store pci devices at mci for faster access */ 2431 /* Store pci devices at mci for faster access */
@@ -2399,6 +2445,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
2399 pvt->info.interleave_list = ibridge_interleave_list; 2445 pvt->info.interleave_list = ibridge_interleave_list;
2400 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); 2446 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
2401 pvt->info.interleave_pkg = ibridge_interleave_pkg; 2447 pvt->info.interleave_pkg = ibridge_interleave_pkg;
2448 pvt->info.get_width = ibridge_get_width;
2402 mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell Socket#%d", mci->mc_idx); 2449 mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell Socket#%d", mci->mc_idx);
2403 2450
2404 /* Store pci devices at mci for faster access */ 2451 /* Store pci devices at mci for faster access */
@@ -2418,6 +2465,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
2418 pvt->info.interleave_list = ibridge_interleave_list; 2465 pvt->info.interleave_list = ibridge_interleave_list;
2419 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); 2466 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
2420 pvt->info.interleave_pkg = ibridge_interleave_pkg; 2467 pvt->info.interleave_pkg = ibridge_interleave_pkg;
2468 pvt->info.get_width = broadwell_get_width;
2421 mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell Socket#%d", mci->mc_idx); 2469 mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell Socket#%d", mci->mc_idx);
2422 2470
2423 /* Store pci devices at mci for faster access */ 2471 /* Store pci devices at mci for faster access */
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index e334a01cf92f..6b6548fda089 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -5,10 +5,6 @@
5/* error code which can't be mistaken for valid address */ 5/* error code which can't be mistaken for valid address */
6#define EFI_ERROR (~0UL) 6#define EFI_ERROR (~0UL)
7 7
8#undef memcpy
9#undef memset
10#undef memmove
11
12void efi_char16_printk(efi_system_table_t *, efi_char16_t *); 8void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
13 9
14efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image, 10efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index b4fc9e4d24c6..8949b3f6f74d 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -356,7 +356,7 @@ config GPIO_PXA
356 356
357config GPIO_RCAR 357config GPIO_RCAR
358 tristate "Renesas R-Car GPIO" 358 tristate "Renesas R-Car GPIO"
359 depends on ARM && (ARCH_SHMOBILE || COMPILE_TEST) 359 depends on ARCH_SHMOBILE || COMPILE_TEST
360 select GPIOLIB_IRQCHIP 360 select GPIOLIB_IRQCHIP
361 help 361 help
362 Say yes here to support GPIO on Renesas R-Car SoCs. 362 Say yes here to support GPIO on Renesas R-Car SoCs.
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 9b7e0b3db387..1b44941574fa 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -201,8 +201,7 @@ static int altera_gpio_direction_output(struct gpio_chip *gc,
201 return 0; 201 return 0;
202} 202}
203 203
204static void altera_gpio_irq_edge_handler(unsigned int irq, 204static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
205 struct irq_desc *desc)
206{ 205{
207 struct altera_gpio_chip *altera_gc; 206 struct altera_gpio_chip *altera_gc;
208 struct irq_chip *chip; 207 struct irq_chip *chip;
@@ -231,8 +230,7 @@ static void altera_gpio_irq_edge_handler(unsigned int irq,
231} 230}
232 231
233 232
234static void altera_gpio_irq_leveL_high_handler(unsigned int irq, 233static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
235 struct irq_desc *desc)
236{ 234{
237 struct altera_gpio_chip *altera_gc; 235 struct altera_gpio_chip *altera_gc;
238 struct irq_chip *chip; 236 struct irq_chip *chip;
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 31b90ac15204..33a1f9779b86 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -433,7 +433,7 @@ static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type)
433 return 0; 433 return 0;
434} 434}
435 435
436static void bcm_kona_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) 436static void bcm_kona_gpio_irq_handler(struct irq_desc *desc)
437{ 437{
438 void __iomem *reg_base; 438 void __iomem *reg_base;
439 int bit, bank_id; 439 int bit, bank_id;
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 9ea86d2ac054..4c64627c6bb5 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -236,7 +236,7 @@ static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank)
236} 236}
237 237
238/* Each UPG GIO block has one IRQ for all banks */ 238/* Each UPG GIO block has one IRQ for all banks */
239static void brcmstb_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) 239static void brcmstb_gpio_irq_handler(struct irq_desc *desc)
240{ 240{
241 struct gpio_chip *gc = irq_desc_get_handler_data(desc); 241 struct gpio_chip *gc = irq_desc_get_handler_data(desc);
242 struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc); 242 struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 94b0ab709721..5e715388803d 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -326,8 +326,7 @@ static struct irq_chip gpio_irqchip = {
326 .flags = IRQCHIP_SET_TYPE_MASKED, 326 .flags = IRQCHIP_SET_TYPE_MASKED,
327}; 327};
328 328
329static void 329static void gpio_irq_handler(struct irq_desc *desc)
330gpio_irq_handler(unsigned __irq, struct irq_desc *desc)
331{ 330{
332 unsigned int irq = irq_desc_get_irq(desc); 331 unsigned int irq = irq_desc_get_irq(desc);
333 struct davinci_gpio_regs __iomem *g; 332 struct davinci_gpio_regs __iomem *g;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index c5be4b9b8baf..fcd5b0acfc72 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -147,7 +147,7 @@ static u32 dwapb_do_irq(struct dwapb_gpio *gpio)
147 return ret; 147 return ret;
148} 148}
149 149
150static void dwapb_irq_handler(u32 irq, struct irq_desc *desc) 150static void dwapb_irq_handler(struct irq_desc *desc)
151{ 151{
152 struct dwapb_gpio *gpio = irq_desc_get_handler_data(desc); 152 struct dwapb_gpio *gpio = irq_desc_get_handler_data(desc);
153 struct irq_chip *chip = irq_desc_get_chip(desc); 153 struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 9d90366ea259..3e3947b35c83 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -78,7 +78,7 @@ static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable)
78 EP93XX_GPIO_REG(int_debounce_register_offset[port])); 78 EP93XX_GPIO_REG(int_debounce_register_offset[port]));
79} 79}
80 80
81static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc) 81static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
82{ 82{
83 unsigned char status; 83 unsigned char status;
84 int i; 84 int i;
@@ -100,8 +100,7 @@ static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc)
100 } 100 }
101} 101}
102 102
103static void ep93xx_gpio_f_irq_handler(unsigned int __irq, 103static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc)
104 struct irq_desc *desc)
105{ 104{
106 /* 105 /*
107 * map discontiguous hw irq range to continuous sw irq range: 106 * map discontiguous hw irq range to continuous sw irq range:
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index aa28c65eb6b4..70097472b02c 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -301,7 +301,7 @@ static const struct pci_device_id intel_gpio_ids[] = {
301}; 301};
302MODULE_DEVICE_TABLE(pci, intel_gpio_ids); 302MODULE_DEVICE_TABLE(pci, intel_gpio_ids);
303 303
304static void intel_mid_irq_handler(unsigned irq, struct irq_desc *desc) 304static void intel_mid_irq_handler(struct irq_desc *desc)
305{ 305{
306 struct gpio_chip *gc = irq_desc_get_handler_data(desc); 306 struct gpio_chip *gc = irq_desc_get_handler_data(desc);
307 struct intel_mid_gpio *priv = to_intel_gpio_priv(gc); 307 struct intel_mid_gpio *priv = to_intel_gpio_priv(gc);
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index 153af464c7a7..127c37b380ae 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -234,7 +234,7 @@ static int lp_gpio_direction_output(struct gpio_chip *chip,
234 return 0; 234 return 0;
235} 235}
236 236
237static void lp_gpio_irq_handler(unsigned hwirq, struct irq_desc *desc) 237static void lp_gpio_irq_handler(struct irq_desc *desc)
238{ 238{
239 struct irq_data *data = irq_desc_get_irq_data(desc); 239 struct irq_data *data = irq_desc_get_irq_data(desc);
240 struct gpio_chip *gc = irq_desc_get_handler_data(desc); 240 struct gpio_chip *gc = irq_desc_get_handler_data(desc);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 8ef7a12de983..48ef368347ab 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -194,7 +194,7 @@ static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
194 return -ENXIO; 194 return -ENXIO;
195} 195}
196 196
197static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc) 197static void mpc8xxx_gpio_irq_cascade(struct irq_desc *desc)
198{ 198{
199 struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc); 199 struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc);
200 struct irq_chip *chip = irq_desc_get_chip(desc); 200 struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
index 7bcfb87a5fa6..22523aae8abe 100644
--- a/drivers/gpio/gpio-msic.c
+++ b/drivers/gpio/gpio-msic.c
@@ -232,7 +232,7 @@ static struct irq_chip msic_irqchip = {
232 .irq_bus_sync_unlock = msic_bus_sync_unlock, 232 .irq_bus_sync_unlock = msic_bus_sync_unlock,
233}; 233};
234 234
235static void msic_gpio_irq_handler(unsigned irq, struct irq_desc *desc) 235static void msic_gpio_irq_handler(struct irq_desc *desc)
236{ 236{
237 struct irq_data *data = irq_desc_get_irq_data(desc); 237 struct irq_data *data = irq_desc_get_irq_data(desc);
238 struct msic_gpio *mg = irq_data_get_irq_handler_data(data); 238 struct msic_gpio *mg = irq_data_get_irq_handler_data(data);
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index d2012cfb5571..4b4222145f10 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -305,7 +305,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
305 * which have been set as summary IRQ lines and which are triggered, 305 * which have been set as summary IRQ lines and which are triggered,
306 * and to call their interrupt handlers. 306 * and to call their interrupt handlers.
307 */ 307 */
308static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc) 308static void msm_summary_irq_handler(struct irq_desc *desc)
309{ 309{
310 unsigned long i; 310 unsigned long i;
311 struct irq_chip *chip = irq_desc_get_chip(desc); 311 struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index b396bf3bf294..df418b81456d 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -458,7 +458,7 @@ static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type)
458 return 0; 458 return 0;
459} 459}
460 460
461static void mvebu_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc) 461static void mvebu_gpio_irq_handler(struct irq_desc *desc)
462{ 462{
463 struct mvebu_gpio_chip *mvchip = irq_desc_get_handler_data(desc); 463 struct mvebu_gpio_chip *mvchip = irq_desc_get_handler_data(desc);
464 struct irq_chip *chip = irq_desc_get_chip(desc); 464 struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index b752b560126e..b8dd847443c5 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -272,7 +272,7 @@ static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat)
272} 272}
273 273
274/* MX1 and MX3 has one interrupt *per* gpio port */ 274/* MX1 and MX3 has one interrupt *per* gpio port */
275static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc) 275static void mx3_gpio_irq_handler(struct irq_desc *desc)
276{ 276{
277 u32 irq_stat; 277 u32 irq_stat;
278 struct mxc_gpio_port *port = irq_desc_get_handler_data(desc); 278 struct mxc_gpio_port *port = irq_desc_get_handler_data(desc);
@@ -288,7 +288,7 @@ static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc)
288} 288}
289 289
290/* MX2 has one interrupt *for all* gpio ports */ 290/* MX2 has one interrupt *for all* gpio ports */
291static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc) 291static void mx2_gpio_irq_handler(struct irq_desc *desc)
292{ 292{
293 u32 irq_msk, irq_stat; 293 u32 irq_msk, irq_stat;
294 struct mxc_gpio_port *port; 294 struct mxc_gpio_port *port;
@@ -339,13 +339,15 @@ static int gpio_set_wake_irq(struct irq_data *d, u32 enable)
339 return 0; 339 return 0;
340} 340}
341 341
342static void mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base) 342static int mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
343{ 343{
344 struct irq_chip_generic *gc; 344 struct irq_chip_generic *gc;
345 struct irq_chip_type *ct; 345 struct irq_chip_type *ct;
346 346
347 gc = irq_alloc_generic_chip("gpio-mxc", 1, irq_base, 347 gc = irq_alloc_generic_chip("gpio-mxc", 1, irq_base,
348 port->base, handle_level_irq); 348 port->base, handle_level_irq);
349 if (!gc)
350 return -ENOMEM;
349 gc->private = port; 351 gc->private = port;
350 352
351 ct = gc->chip_types; 353 ct = gc->chip_types;
@@ -360,6 +362,8 @@ static void mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
360 362
361 irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK, 363 irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
362 IRQ_NOREQUEST, 0); 364 IRQ_NOREQUEST, 0);
365
366 return 0;
363} 367}
364 368
365static void mxc_gpio_get_hw(struct platform_device *pdev) 369static void mxc_gpio_get_hw(struct platform_device *pdev)
@@ -477,12 +481,16 @@ static int mxc_gpio_probe(struct platform_device *pdev)
477 } 481 }
478 482
479 /* gpio-mxc can be a generic irq chip */ 483 /* gpio-mxc can be a generic irq chip */
480 mxc_gpio_init_gc(port, irq_base); 484 err = mxc_gpio_init_gc(port, irq_base);
485 if (err < 0)
486 goto out_irqdomain_remove;
481 487
482 list_add_tail(&port->node, &mxc_gpio_ports); 488 list_add_tail(&port->node, &mxc_gpio_ports);
483 489
484 return 0; 490 return 0;
485 491
492out_irqdomain_remove:
493 irq_domain_remove(port->domain);
486out_irqdesc_free: 494out_irqdesc_free:
487 irq_free_descs(irq_base, 32); 495 irq_free_descs(irq_base, 32);
488out_gpiochip_remove: 496out_gpiochip_remove:
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index b7f383eb18d9..a4288f428819 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -154,7 +154,7 @@ static void mxs_flip_edge(struct mxs_gpio_port *port, u32 gpio)
154} 154}
155 155
156/* MXS has one interrupt *per* gpio port */ 156/* MXS has one interrupt *per* gpio port */
157static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc) 157static void mxs_gpio_irq_handler(struct irq_desc *desc)
158{ 158{
159 u32 irq_stat; 159 u32 irq_stat;
160 struct mxs_gpio_port *port = irq_desc_get_handler_data(desc); 160 struct mxs_gpio_port *port = irq_desc_get_handler_data(desc);
@@ -196,13 +196,16 @@ static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable)
196 return 0; 196 return 0;
197} 197}
198 198
199static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base) 199static int __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
200{ 200{
201 struct irq_chip_generic *gc; 201 struct irq_chip_generic *gc;
202 struct irq_chip_type *ct; 202 struct irq_chip_type *ct;
203 203
204 gc = irq_alloc_generic_chip("gpio-mxs", 1, irq_base, 204 gc = irq_alloc_generic_chip("gpio-mxs", 1, irq_base,
205 port->base, handle_level_irq); 205 port->base, handle_level_irq);
206 if (!gc)
207 return -ENOMEM;
208
206 gc->private = port; 209 gc->private = port;
207 210
208 ct = gc->chip_types; 211 ct = gc->chip_types;
@@ -216,6 +219,8 @@ static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
216 219
217 irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK, 220 irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
218 IRQ_NOREQUEST, 0); 221 IRQ_NOREQUEST, 0);
222
223 return 0;
219} 224}
220 225
221static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset) 226static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -317,7 +322,9 @@ static int mxs_gpio_probe(struct platform_device *pdev)
317 } 322 }
318 323
319 /* gpio-mxs can be a generic irq chip */ 324 /* gpio-mxs can be a generic irq chip */
320 mxs_gpio_init_gc(port, irq_base); 325 err = mxs_gpio_init_gc(port, irq_base);
326 if (err < 0)
327 goto out_irqdomain_remove;
321 328
322 /* setup one handler for each entry */ 329 /* setup one handler for each entry */
323 irq_set_chained_handler_and_data(port->irq, mxs_gpio_irq_handler, 330 irq_set_chained_handler_and_data(port->irq, mxs_gpio_irq_handler,
@@ -343,6 +350,8 @@ static int mxs_gpio_probe(struct platform_device *pdev)
343 350
344out_bgpio_remove: 351out_bgpio_remove:
345 bgpio_remove(&port->bgc); 352 bgpio_remove(&port->bgc);
353out_irqdomain_remove:
354 irq_domain_remove(port->domain);
346out_irqdesc_free: 355out_irqdesc_free:
347 irq_free_descs(irq_base, 32); 356 irq_free_descs(irq_base, 32);
348 return err; 357 return err;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 2ae0d47e9554..5236db161e76 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -709,7 +709,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
709 * line's interrupt handler has been run, we may miss some nested 709 * line's interrupt handler has been run, we may miss some nested
710 * interrupts. 710 * interrupts.
711 */ 711 */
712static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) 712static void omap_gpio_irq_handler(struct irq_desc *desc)
713{ 713{
714 void __iomem *isr_reg = NULL; 714 void __iomem *isr_reg = NULL;
715 u32 isr; 715 u32 isr;
@@ -1098,7 +1098,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
1098 } else { 1098 } else {
1099 bank->chip.label = "gpio"; 1099 bank->chip.label = "gpio";
1100 bank->chip.base = gpio; 1100 bank->chip.base = gpio;
1101 gpio += bank->width;
1102 } 1101 }
1103 bank->chip.ngpio = bank->width; 1102 bank->chip.ngpio = bank->width;
1104 1103
@@ -1108,6 +1107,9 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
1108 return ret; 1107 return ret;
1109 } 1108 }
1110 1109
1110 if (!bank->is_mpuio)
1111 gpio += bank->width;
1112
1111#ifdef CONFIG_ARCH_OMAP1 1113#ifdef CONFIG_ARCH_OMAP1
1112 /* 1114 /*
1113 * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop 1115 * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
@@ -1253,8 +1255,11 @@ static int omap_gpio_probe(struct platform_device *pdev)
1253 omap_gpio_mod_init(bank); 1255 omap_gpio_mod_init(bank);
1254 1256
1255 ret = omap_gpio_chip_init(bank, irqc); 1257 ret = omap_gpio_chip_init(bank, irqc);
1256 if (ret) 1258 if (ret) {
1259 pm_runtime_put_sync(bank->dev);
1260 pm_runtime_disable(bank->dev);
1257 return ret; 1261 return ret;
1262 }
1258 1263
1259 omap_gpio_show_rev(bank); 1264 omap_gpio_show_rev(bank);
1260 1265
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 04756130437f..229ef653e0f8 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -187,7 +187,7 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger)
187 return 0; 187 return 0;
188} 188}
189 189
190static void pl061_irq_handler(unsigned irq, struct irq_desc *desc) 190static void pl061_irq_handler(struct irq_desc *desc)
191{ 191{
192 unsigned long pending; 192 unsigned long pending;
193 int offset; 193 int offset;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 55a11de3d5b7..df2ce550f309 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -401,7 +401,7 @@ static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
401 return 0; 401 return 0;
402} 402}
403 403
404static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc) 404static void pxa_gpio_demux_handler(struct irq_desc *desc)
405{ 405{
406 struct pxa_gpio_chip *c; 406 struct pxa_gpio_chip *c;
407 int loop, gpio, gpio_base, n; 407 int loop, gpio, gpio_base, n;
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 67bd2f5d89e8..990fa9023e22 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -172,8 +172,7 @@ static struct irq_domain *sa1100_gpio_irqdomain;
172 * irq_controller_lock held, and IRQs disabled. Decode the IRQ 172 * irq_controller_lock held, and IRQs disabled. Decode the IRQ
173 * and call the handler. 173 * and call the handler.
174 */ 174 */
175static void 175static void sa1100_gpio_handler(struct irq_desc *desc)
176sa1100_gpio_handler(unsigned int __irq, struct irq_desc *desc)
177{ 176{
178 unsigned int irq, mask; 177 unsigned int irq, mask;
179 178
diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c
index 458d9d7952b8..9c6b96707c9f 100644
--- a/drivers/gpio/gpio-sx150x.c
+++ b/drivers/gpio/gpio-sx150x.c
@@ -706,4 +706,3 @@ module_exit(sx150x_exit);
706MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>"); 706MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>");
707MODULE_DESCRIPTION("Driver for Semtech SX150X I2C GPIO Expanders"); 707MODULE_DESCRIPTION("Driver for Semtech SX150X I2C GPIO Expanders");
708MODULE_LICENSE("GPL v2"); 708MODULE_LICENSE("GPL v2");
709MODULE_ALIAS("i2c:sx150x");
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 9b14aafb576d..027e5f47dd28 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -266,7 +266,7 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d)
266 gpiochip_unlock_as_irq(&tegra_gpio_chip, gpio); 266 gpiochip_unlock_as_irq(&tegra_gpio_chip, gpio);
267} 267}
268 268
269static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) 269static void tegra_gpio_irq_handler(struct irq_desc *desc)
270{ 270{
271 int port; 271 int port;
272 int pin; 272 int pin;
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index 5a492054589f..30653e6319e9 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -192,7 +192,7 @@ out:
192 return ret; 192 return ret;
193} 193}
194 194
195static void timbgpio_irq(unsigned int irq, struct irq_desc *desc) 195static void timbgpio_irq(struct irq_desc *desc)
196{ 196{
197 struct timbgpio *tgpio = irq_desc_get_handler_data(desc); 197 struct timbgpio *tgpio = irq_desc_get_handler_data(desc);
198 struct irq_data *data = irq_desc_get_irq_data(desc); 198 struct irq_data *data = irq_desc_get_irq_data(desc);
diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c
index bbac92ae4c32..87bb1b1eee8d 100644
--- a/drivers/gpio/gpio-tz1090.c
+++ b/drivers/gpio/gpio-tz1090.c
@@ -375,7 +375,7 @@ static int gpio_set_irq_wake(struct irq_data *data, unsigned int on)
375#define gpio_set_irq_wake NULL 375#define gpio_set_irq_wake NULL
376#endif 376#endif
377 377
378static void tz1090_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) 378static void tz1090_gpio_irq_handler(struct irq_desc *desc)
379{ 379{
380 irq_hw_number_t hw; 380 irq_hw_number_t hw;
381 unsigned int irq_stat, irq_no; 381 unsigned int irq_stat, irq_no;
@@ -400,7 +400,7 @@ static void tz1090_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
400 == IRQ_TYPE_EDGE_BOTH) 400 == IRQ_TYPE_EDGE_BOTH)
401 tz1090_gpio_irq_next_edge(bank, hw); 401 tz1090_gpio_irq_next_edge(bank, hw);
402 402
403 generic_handle_irq_desc(irq_no, child_desc); 403 generic_handle_irq_desc(child_desc);
404 } 404 }
405} 405}
406 406
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 3d5714d4f405..069f9e4b7daa 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -120,7 +120,7 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
120 return pinctrl_gpio_direction_output(chip->base + gpio); 120 return pinctrl_gpio_direction_output(chip->base + gpio);
121} 121}
122 122
123static void vf610_gpio_irq_handler(u32 irq, struct irq_desc *desc) 123static void vf610_gpio_irq_handler(struct irq_desc *desc)
124{ 124{
125 struct vf610_gpio_port *port = irq_desc_get_handler_data(desc); 125 struct vf610_gpio_port *port = irq_desc_get_handler_data(desc);
126 struct irq_chip *chip = irq_desc_get_chip(desc); 126 struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -176,9 +176,9 @@ static int vf610_gpio_irq_set_type(struct irq_data *d, u32 type)
176 port->irqc[d->hwirq] = irqc; 176 port->irqc[d->hwirq] = irqc;
177 177
178 if (type & IRQ_TYPE_LEVEL_MASK) 178 if (type & IRQ_TYPE_LEVEL_MASK)
179 __irq_set_handler_locked(d->irq, handle_level_irq); 179 irq_set_handler_locked(d, handle_level_irq);
180 else 180 else
181 __irq_set_handler_locked(d->irq, handle_edge_irq); 181 irq_set_handler_locked(d, handle_edge_irq);
182 182
183 return 0; 183 return 0;
184} 184}
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
index 12ee1969298c..4b8a26910705 100644
--- a/drivers/gpio/gpio-zx.c
+++ b/drivers/gpio/gpio-zx.c
@@ -177,7 +177,7 @@ static int zx_irq_type(struct irq_data *d, unsigned trigger)
177 return 0; 177 return 0;
178} 178}
179 179
180static void zx_irq_handler(unsigned irq, struct irq_desc *desc) 180static void zx_irq_handler(struct irq_desc *desc)
181{ 181{
182 unsigned long pending; 182 unsigned long pending;
183 int offset; 183 int offset;
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 27348e7cb705..1d1a5865ede9 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -514,7 +514,7 @@ static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio,
514 * application for that pin. 514 * application for that pin.
515 * Note: A bug is reported if no handler is set for the gpio pin. 515 * Note: A bug is reported if no handler is set for the gpio pin.
516 */ 516 */
517static void zynq_gpio_irqhandler(unsigned int irq, struct irq_desc *desc) 517static void zynq_gpio_irqhandler(struct irq_desc *desc)
518{ 518{
519 u32 int_sts, int_enb; 519 u32 int_sts, int_enb;
520 unsigned int bank_num; 520 unsigned int bank_num;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 980c1f87866a..5db3445552b1 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1174,15 +1174,16 @@ EXPORT_SYMBOL_GPL(gpiod_is_active_low);
1174 * that the GPIO was actually requested. 1174 * that the GPIO was actually requested.
1175 */ 1175 */
1176 1176
1177static bool _gpiod_get_raw_value(const struct gpio_desc *desc) 1177static int _gpiod_get_raw_value(const struct gpio_desc *desc)
1178{ 1178{
1179 struct gpio_chip *chip; 1179 struct gpio_chip *chip;
1180 bool value;
1181 int offset; 1180 int offset;
1181 int value;
1182 1182
1183 chip = desc->chip; 1183 chip = desc->chip;
1184 offset = gpio_chip_hwgpio(desc); 1184 offset = gpio_chip_hwgpio(desc);
1185 value = chip->get ? chip->get(chip, offset) : false; 1185 value = chip->get ? chip->get(chip, offset) : -EIO;
1186 value = value < 0 ? value : !!value;
1186 trace_gpio_value(desc_to_gpio(desc), 1, value); 1187 trace_gpio_value(desc_to_gpio(desc), 1, value);
1187 return value; 1188 return value;
1188} 1189}
@@ -1192,7 +1193,7 @@ static bool _gpiod_get_raw_value(const struct gpio_desc *desc)
1192 * @desc: gpio whose value will be returned 1193 * @desc: gpio whose value will be returned
1193 * 1194 *
1194 * Return the GPIO's raw value, i.e. the value of the physical line disregarding 1195 * Return the GPIO's raw value, i.e. the value of the physical line disregarding
1195 * its ACTIVE_LOW status. 1196 * its ACTIVE_LOW status, or negative errno on failure.
1196 * 1197 *
1197 * This function should be called from contexts where we cannot sleep, and will 1198 * This function should be called from contexts where we cannot sleep, and will
1198 * complain if the GPIO chip functions potentially sleep. 1199 * complain if the GPIO chip functions potentially sleep.
@@ -1212,7 +1213,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_value);
1212 * @desc: gpio whose value will be returned 1213 * @desc: gpio whose value will be returned
1213 * 1214 *
1214 * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into 1215 * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into
1215 * account. 1216 * account, or negative errno on failure.
1216 * 1217 *
1217 * This function should be called from contexts where we cannot sleep, and will 1218 * This function should be called from contexts where we cannot sleep, and will
1218 * complain if the GPIO chip functions potentially sleep. 1219 * complain if the GPIO chip functions potentially sleep.
@@ -1226,6 +1227,9 @@ int gpiod_get_value(const struct gpio_desc *desc)
1226 WARN_ON(desc->chip->can_sleep); 1227 WARN_ON(desc->chip->can_sleep);
1227 1228
1228 value = _gpiod_get_raw_value(desc); 1229 value = _gpiod_get_raw_value(desc);
1230 if (value < 0)
1231 return value;
1232
1229 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) 1233 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
1230 value = !value; 1234 value = !value;
1231 1235
@@ -1548,7 +1552,7 @@ EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq);
1548 * @desc: gpio whose value will be returned 1552 * @desc: gpio whose value will be returned
1549 * 1553 *
1550 * Return the GPIO's raw value, i.e. the value of the physical line disregarding 1554 * Return the GPIO's raw value, i.e. the value of the physical line disregarding
1551 * its ACTIVE_LOW status. 1555 * its ACTIVE_LOW status, or negative errno on failure.
1552 * 1556 *
1553 * This function is to be called from contexts that can sleep. 1557 * This function is to be called from contexts that can sleep.
1554 */ 1558 */
@@ -1566,7 +1570,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_value_cansleep);
1566 * @desc: gpio whose value will be returned 1570 * @desc: gpio whose value will be returned
1567 * 1571 *
1568 * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into 1572 * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into
1569 * account. 1573 * account, or negative errno on failure.
1570 * 1574 *
1571 * This function is to be called from contexts that can sleep. 1575 * This function is to be called from contexts that can sleep.
1572 */ 1576 */
@@ -1579,6 +1583,9 @@ int gpiod_get_value_cansleep(const struct gpio_desc *desc)
1579 return 0; 1583 return 0;
1580 1584
1581 value = _gpiod_get_raw_value(desc); 1585 value = _gpiod_get_raw_value(desc);
1586 if (value < 0)
1587 return value;
1588
1582 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) 1589 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
1583 value = !value; 1590 value = !value;
1584 1591
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 668939a14206..6647fb26ef25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -82,6 +82,7 @@ extern int amdgpu_vm_block_size;
82extern int amdgpu_enable_scheduler; 82extern int amdgpu_enable_scheduler;
83extern int amdgpu_sched_jobs; 83extern int amdgpu_sched_jobs;
84extern int amdgpu_sched_hw_submission; 84extern int amdgpu_sched_hw_submission;
85extern int amdgpu_enable_semaphores;
85 86
86#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 87#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
87#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 88#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -432,7 +433,7 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev);
432void amdgpu_fence_driver_fini(struct amdgpu_device *adev); 433void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
433void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); 434void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
434 435
435void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); 436int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
436int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 437int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
437 struct amdgpu_irq_src *irq_src, 438 struct amdgpu_irq_src *irq_src,
438 unsigned irq_type); 439 unsigned irq_type);
@@ -890,7 +891,7 @@ struct amdgpu_ring {
890 struct amdgpu_device *adev; 891 struct amdgpu_device *adev;
891 const struct amdgpu_ring_funcs *funcs; 892 const struct amdgpu_ring_funcs *funcs;
892 struct amdgpu_fence_driver fence_drv; 893 struct amdgpu_fence_driver fence_drv;
893 struct amd_gpu_scheduler *scheduler; 894 struct amd_gpu_scheduler sched;
894 895
895 spinlock_t fence_lock; 896 spinlock_t fence_lock;
896 struct mutex *ring_lock; 897 struct mutex *ring_lock;
@@ -1201,8 +1202,6 @@ struct amdgpu_gfx {
1201 struct amdgpu_irq_src priv_inst_irq; 1202 struct amdgpu_irq_src priv_inst_irq;
1202 /* gfx status */ 1203 /* gfx status */
1203 uint32_t gfx_current_status; 1204 uint32_t gfx_current_status;
1204 /* sync signal for const engine */
1205 unsigned ce_sync_offs;
1206 /* ce ram size*/ 1205 /* ce ram size*/
1207 unsigned ce_ram_size; 1206 unsigned ce_ram_size;
1208}; 1207};
@@ -1274,8 +1273,10 @@ struct amdgpu_job {
1274 uint32_t num_ibs; 1273 uint32_t num_ibs;
1275 struct mutex job_lock; 1274 struct mutex job_lock;
1276 struct amdgpu_user_fence uf; 1275 struct amdgpu_user_fence uf;
1277 int (*free_job)(struct amdgpu_job *sched_job); 1276 int (*free_job)(struct amdgpu_job *job);
1278}; 1277};
1278#define to_amdgpu_job(sched_job) \
1279 container_of((sched_job), struct amdgpu_job, base)
1279 1280
1280static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) 1281static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
1281{ 1282{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 496ed2192eba..84d68d658f8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -183,7 +183,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
183 return -ENOMEM; 183 return -ENOMEM;
184 184
185 r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 185 r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
186 AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo); 186 AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
187 if (r) { 187 if (r) {
188 dev_err(rdev->dev, 188 dev_err(rdev->dev,
189 "failed to allocate BO for amdkfd (%d)\n", r); 189 "failed to allocate BO for amdkfd (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 98d59ee640ce..cd639c362df3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -79,7 +79,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
79 int time; 79 int time;
80 80
81 n = AMDGPU_BENCHMARK_ITERATIONS; 81 n = AMDGPU_BENCHMARK_ITERATIONS;
82 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj); 82 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
83 NULL, &sobj);
83 if (r) { 84 if (r) {
84 goto out_cleanup; 85 goto out_cleanup;
85 } 86 }
@@ -91,7 +92,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
91 if (r) { 92 if (r) {
92 goto out_cleanup; 93 goto out_cleanup;
93 } 94 }
94 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj); 95 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
96 NULL, &dobj);
95 if (r) { 97 if (r) {
96 goto out_cleanup; 98 goto out_cleanup;
97 } 99 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 6b1243f9f86d..1c3fc99c5465 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -86,7 +86,7 @@ static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
86 86
87 struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages); 87 struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
88 ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false, 88 ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
89 AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo); 89 AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
90 if (ret) 90 if (ret)
91 return ret; 91 return ret;
92 ret = amdgpu_bo_reserve(bo, false); 92 ret = amdgpu_bo_reserve(bo, false);
@@ -197,7 +197,8 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
197 197
198 ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, 198 ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
199 true, domain, flags, 199 true, domain, flags,
200 NULL, &placement, &obj); 200 NULL, &placement, NULL,
201 &obj);
201 if (ret) { 202 if (ret) {
202 DRM_ERROR("(%d) bo create failed\n", ret); 203 DRM_ERROR("(%d) bo create failed\n", ret);
203 return ret; 204 return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 3b355aeb62fd..749420f1ea6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -154,42 +154,41 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
154{ 154{
155 union drm_amdgpu_cs *cs = data; 155 union drm_amdgpu_cs *cs = data;
156 uint64_t *chunk_array_user; 156 uint64_t *chunk_array_user;
157 uint64_t *chunk_array = NULL; 157 uint64_t *chunk_array;
158 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 158 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
159 unsigned size, i; 159 unsigned size, i;
160 int r = 0; 160 int ret;
161 161
162 if (!cs->in.num_chunks) 162 if (cs->in.num_chunks == 0)
163 goto out; 163 return 0;
164
165 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
166 if (!chunk_array)
167 return -ENOMEM;
164 168
165 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); 169 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
166 if (!p->ctx) { 170 if (!p->ctx) {
167 r = -EINVAL; 171 ret = -EINVAL;
168 goto out; 172 goto free_chunk;
169 } 173 }
174
170 p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); 175 p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
171 176
172 /* get chunks */ 177 /* get chunks */
173 INIT_LIST_HEAD(&p->validated); 178 INIT_LIST_HEAD(&p->validated);
174 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
175 if (chunk_array == NULL) {
176 r = -ENOMEM;
177 goto out;
178 }
179
180 chunk_array_user = (uint64_t __user *)(cs->in.chunks); 179 chunk_array_user = (uint64_t __user *)(cs->in.chunks);
181 if (copy_from_user(chunk_array, chunk_array_user, 180 if (copy_from_user(chunk_array, chunk_array_user,
182 sizeof(uint64_t)*cs->in.num_chunks)) { 181 sizeof(uint64_t)*cs->in.num_chunks)) {
183 r = -EFAULT; 182 ret = -EFAULT;
184 goto out; 183 goto put_bo_list;
185 } 184 }
186 185
187 p->nchunks = cs->in.num_chunks; 186 p->nchunks = cs->in.num_chunks;
188 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), 187 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
189 GFP_KERNEL); 188 GFP_KERNEL);
190 if (p->chunks == NULL) { 189 if (!p->chunks) {
191 r = -ENOMEM; 190 ret = -ENOMEM;
192 goto out; 191 goto put_bo_list;
193 } 192 }
194 193
195 for (i = 0; i < p->nchunks; i++) { 194 for (i = 0; i < p->nchunks; i++) {
@@ -200,8 +199,9 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
200 chunk_ptr = (void __user *)chunk_array[i]; 199 chunk_ptr = (void __user *)chunk_array[i];
201 if (copy_from_user(&user_chunk, chunk_ptr, 200 if (copy_from_user(&user_chunk, chunk_ptr,
202 sizeof(struct drm_amdgpu_cs_chunk))) { 201 sizeof(struct drm_amdgpu_cs_chunk))) {
203 r = -EFAULT; 202 ret = -EFAULT;
204 goto out; 203 i--;
204 goto free_partial_kdata;
205 } 205 }
206 p->chunks[i].chunk_id = user_chunk.chunk_id; 206 p->chunks[i].chunk_id = user_chunk.chunk_id;
207 p->chunks[i].length_dw = user_chunk.length_dw; 207 p->chunks[i].length_dw = user_chunk.length_dw;
@@ -212,13 +212,14 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
212 212
213 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); 213 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
214 if (p->chunks[i].kdata == NULL) { 214 if (p->chunks[i].kdata == NULL) {
215 r = -ENOMEM; 215 ret = -ENOMEM;
216 goto out; 216 i--;
217 goto free_partial_kdata;
217 } 218 }
218 size *= sizeof(uint32_t); 219 size *= sizeof(uint32_t);
219 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { 220 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
220 r = -EFAULT; 221 ret = -EFAULT;
221 goto out; 222 goto free_partial_kdata;
222 } 223 }
223 224
224 switch (p->chunks[i].chunk_id) { 225 switch (p->chunks[i].chunk_id) {
@@ -238,15 +239,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
238 gobj = drm_gem_object_lookup(p->adev->ddev, 239 gobj = drm_gem_object_lookup(p->adev->ddev,
239 p->filp, handle); 240 p->filp, handle);
240 if (gobj == NULL) { 241 if (gobj == NULL) {
241 r = -EINVAL; 242 ret = -EINVAL;
242 goto out; 243 goto free_partial_kdata;
243 } 244 }
244 245
245 p->uf.bo = gem_to_amdgpu_bo(gobj); 246 p->uf.bo = gem_to_amdgpu_bo(gobj);
246 p->uf.offset = fence_data->offset; 247 p->uf.offset = fence_data->offset;
247 } else { 248 } else {
248 r = -EINVAL; 249 ret = -EINVAL;
249 goto out; 250 goto free_partial_kdata;
250 } 251 }
251 break; 252 break;
252 253
@@ -254,19 +255,35 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
254 break; 255 break;
255 256
256 default: 257 default:
257 r = -EINVAL; 258 ret = -EINVAL;
258 goto out; 259 goto free_partial_kdata;
259 } 260 }
260 } 261 }
261 262
262 263
263 p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); 264 p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
264 if (!p->ibs) 265 if (!p->ibs) {
265 r = -ENOMEM; 266 ret = -ENOMEM;
267 goto free_all_kdata;
268 }
266 269
267out:
268 kfree(chunk_array); 270 kfree(chunk_array);
269 return r; 271 return 0;
272
273free_all_kdata:
274 i = p->nchunks - 1;
275free_partial_kdata:
276 for (; i >= 0; i--)
277 drm_free_large(p->chunks[i].kdata);
278 kfree(p->chunks);
279put_bo_list:
280 if (p->bo_list)
281 amdgpu_bo_list_put(p->bo_list);
282 amdgpu_ctx_put(p->ctx);
283free_chunk:
284 kfree(chunk_array);
285
286 return ret;
270} 287}
271 288
272/* Returns how many bytes TTM can move per IB. 289/* Returns how many bytes TTM can move per IB.
@@ -321,25 +338,17 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
321 return max(bytes_moved_threshold, 1024*1024ull); 338 return max(bytes_moved_threshold, 1024*1024ull);
322} 339}
323 340
324int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p) 341int amdgpu_cs_list_validate(struct amdgpu_device *adev,
342 struct amdgpu_vm *vm,
343 struct list_head *validated)
325{ 344{
326 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
327 struct amdgpu_vm *vm = &fpriv->vm;
328 struct amdgpu_device *adev = p->adev;
329 struct amdgpu_bo_list_entry *lobj; 345 struct amdgpu_bo_list_entry *lobj;
330 struct list_head duplicates;
331 struct amdgpu_bo *bo; 346 struct amdgpu_bo *bo;
332 u64 bytes_moved = 0, initial_bytes_moved; 347 u64 bytes_moved = 0, initial_bytes_moved;
333 u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev); 348 u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev);
334 int r; 349 int r;
335 350
336 INIT_LIST_HEAD(&duplicates); 351 list_for_each_entry(lobj, validated, tv.head) {
337 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
338 if (unlikely(r != 0)) {
339 return r;
340 }
341
342 list_for_each_entry(lobj, &p->validated, tv.head) {
343 bo = lobj->robj; 352 bo = lobj->robj;
344 if (!bo->pin_count) { 353 if (!bo->pin_count) {
345 u32 domain = lobj->prefered_domains; 354 u32 domain = lobj->prefered_domains;
@@ -373,7 +382,6 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
373 domain = lobj->allowed_domains; 382 domain = lobj->allowed_domains;
374 goto retry; 383 goto retry;
375 } 384 }
376 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
377 return r; 385 return r;
378 } 386 }
379 } 387 }
@@ -386,6 +394,7 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
386{ 394{
387 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 395 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
388 struct amdgpu_cs_buckets buckets; 396 struct amdgpu_cs_buckets buckets;
397 struct list_head duplicates;
389 bool need_mmap_lock = false; 398 bool need_mmap_lock = false;
390 int i, r; 399 int i, r;
391 400
@@ -405,8 +414,22 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
405 if (need_mmap_lock) 414 if (need_mmap_lock)
406 down_read(&current->mm->mmap_sem); 415 down_read(&current->mm->mmap_sem);
407 416
408 r = amdgpu_cs_list_validate(p); 417 INIT_LIST_HEAD(&duplicates);
418 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
419 if (unlikely(r != 0))
420 goto error_reserve;
421
422 r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated);
423 if (r)
424 goto error_validate;
425
426 r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates);
427
428error_validate:
429 if (r)
430 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
409 431
432error_reserve:
410 if (need_mmap_lock) 433 if (need_mmap_lock)
411 up_read(&current->mm->mmap_sem); 434 up_read(&current->mm->mmap_sem);
412 435
@@ -772,15 +795,15 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
772 return 0; 795 return 0;
773} 796}
774 797
775static int amdgpu_cs_free_job(struct amdgpu_job *sched_job) 798static int amdgpu_cs_free_job(struct amdgpu_job *job)
776{ 799{
777 int i; 800 int i;
778 if (sched_job->ibs) 801 if (job->ibs)
779 for (i = 0; i < sched_job->num_ibs; i++) 802 for (i = 0; i < job->num_ibs; i++)
780 amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]); 803 amdgpu_ib_free(job->adev, &job->ibs[i]);
781 kfree(sched_job->ibs); 804 kfree(job->ibs);
782 if (sched_job->uf.bo) 805 if (job->uf.bo)
783 drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base); 806 drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base);
784 return 0; 807 return 0;
785} 808}
786 809
@@ -804,7 +827,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
804 r = amdgpu_cs_parser_init(parser, data); 827 r = amdgpu_cs_parser_init(parser, data);
805 if (r) { 828 if (r) {
806 DRM_ERROR("Failed to initialize parser !\n"); 829 DRM_ERROR("Failed to initialize parser !\n");
807 amdgpu_cs_parser_fini(parser, r, false); 830 kfree(parser);
808 up_read(&adev->exclusive_lock); 831 up_read(&adev->exclusive_lock);
809 r = amdgpu_cs_handle_lockup(adev, r); 832 r = amdgpu_cs_handle_lockup(adev, r);
810 return r; 833 return r;
@@ -842,7 +865,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
842 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 865 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
843 if (!job) 866 if (!job)
844 return -ENOMEM; 867 return -ENOMEM;
845 job->base.sched = ring->scheduler; 868 job->base.sched = &ring->sched;
846 job->base.s_entity = &parser->ctx->rings[ring->idx].entity; 869 job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
847 job->adev = parser->adev; 870 job->adev = parser->adev;
848 job->ibs = parser->ibs; 871 job->ibs = parser->ibs;
@@ -857,7 +880,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
857 880
858 job->free_job = amdgpu_cs_free_job; 881 job->free_job = amdgpu_cs_free_job;
859 mutex_lock(&job->job_lock); 882 mutex_lock(&job->job_lock);
860 r = amd_sched_entity_push_job((struct amd_sched_job *)job); 883 r = amd_sched_entity_push_job(&job->base);
861 if (r) { 884 if (r) {
862 mutex_unlock(&job->job_lock); 885 mutex_unlock(&job->job_lock);
863 amdgpu_cs_free_job(job); 886 amdgpu_cs_free_job(job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 20cbc4eb5a6f..e0b80ccdfe8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,10 +43,10 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
43 for (i = 0; i < adev->num_rings; i++) { 43 for (i = 0; i < adev->num_rings; i++) {
44 struct amd_sched_rq *rq; 44 struct amd_sched_rq *rq;
45 if (kernel) 45 if (kernel)
46 rq = &adev->rings[i]->scheduler->kernel_rq; 46 rq = &adev->rings[i]->sched.kernel_rq;
47 else 47 else
48 rq = &adev->rings[i]->scheduler->sched_rq; 48 rq = &adev->rings[i]->sched.sched_rq;
49 r = amd_sched_entity_init(adev->rings[i]->scheduler, 49 r = amd_sched_entity_init(&adev->rings[i]->sched,
50 &ctx->rings[i].entity, 50 &ctx->rings[i].entity,
51 rq, amdgpu_sched_jobs); 51 rq, amdgpu_sched_jobs);
52 if (r) 52 if (r)
@@ -55,7 +55,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
55 55
56 if (i < adev->num_rings) { 56 if (i < adev->num_rings) {
57 for (j = 0; j < i; j++) 57 for (j = 0; j < i; j++)
58 amd_sched_entity_fini(adev->rings[j]->scheduler, 58 amd_sched_entity_fini(&adev->rings[j]->sched,
59 &ctx->rings[j].entity); 59 &ctx->rings[j].entity);
60 kfree(ctx); 60 kfree(ctx);
61 return r; 61 return r;
@@ -75,7 +75,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
75 75
76 if (amdgpu_enable_scheduler) { 76 if (amdgpu_enable_scheduler) {
77 for (i = 0; i < adev->num_rings; i++) 77 for (i = 0; i < adev->num_rings; i++)
78 amd_sched_entity_fini(adev->rings[i]->scheduler, 78 amd_sched_entity_fini(&adev->rings[i]->sched,
79 &ctx->rings[i].entity); 79 &ctx->rings[i].entity);
80 } 80 }
81} 81}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6ff6ae945794..6068d8207d10 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -246,7 +246,7 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
246 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, 246 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
247 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 247 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
248 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 248 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
249 NULL, &adev->vram_scratch.robj); 249 NULL, NULL, &adev->vram_scratch.robj);
250 if (r) { 250 if (r) {
251 return r; 251 return r;
252 } 252 }
@@ -449,7 +449,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
449 449
450 if (adev->wb.wb_obj == NULL) { 450 if (adev->wb.wb_obj == NULL) {
451 r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true, 451 r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
452 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, &adev->wb.wb_obj); 452 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
453 &adev->wb.wb_obj);
453 if (r) { 454 if (r) {
454 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); 455 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
455 return r; 456 return r;
@@ -1650,9 +1651,11 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1650 drm_kms_helper_poll_disable(dev); 1651 drm_kms_helper_poll_disable(dev);
1651 1652
1652 /* turn off display hw */ 1653 /* turn off display hw */
1654 drm_modeset_lock_all(dev);
1653 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1655 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1654 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 1656 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1655 } 1657 }
1658 drm_modeset_unlock_all(dev);
1656 1659
1657 /* unpin the front buffers */ 1660 /* unpin the front buffers */
1658 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1661 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1747,9 +1750,11 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1747 if (fbcon) { 1750 if (fbcon) {
1748 drm_helper_resume_force_mode(dev); 1751 drm_helper_resume_force_mode(dev);
1749 /* turn on display hw */ 1752 /* turn on display hw */
1753 drm_modeset_lock_all(dev);
1750 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1754 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1751 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 1755 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1752 } 1756 }
1757 drm_modeset_unlock_all(dev);
1753 } 1758 }
1754 1759
1755 drm_kms_helper_poll_enable(dev); 1760 drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0fcc0bd1622c..adb48353f2e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -79,6 +79,7 @@ int amdgpu_exp_hw_support = 0;
79int amdgpu_enable_scheduler = 0; 79int amdgpu_enable_scheduler = 0;
80int amdgpu_sched_jobs = 16; 80int amdgpu_sched_jobs = 16;
81int amdgpu_sched_hw_submission = 2; 81int amdgpu_sched_hw_submission = 2;
82int amdgpu_enable_semaphores = 1;
82 83
83MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); 84MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
84module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); 85module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -152,6 +153,9 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
152MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); 153MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
153module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); 154module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
154 155
156MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable (default), 0 = disable)");
157module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644);
158
155static struct pci_device_id pciidlist[] = { 159static struct pci_device_id pciidlist[] = {
156#ifdef CONFIG_DRM_AMDGPU_CIK 160#ifdef CONFIG_DRM_AMDGPU_CIK
157 /* Kaveri */ 161 /* Kaveri */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 1be2bd6d07ea..b3fc26c59787 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -609,9 +609,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
609 * Init the fence driver for the requested ring (all asics). 609 * Init the fence driver for the requested ring (all asics).
610 * Helper function for amdgpu_fence_driver_init(). 610 * Helper function for amdgpu_fence_driver_init().
611 */ 611 */
612void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) 612int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
613{ 613{
614 int i; 614 int i, r;
615 615
616 ring->fence_drv.cpu_addr = NULL; 616 ring->fence_drv.cpu_addr = NULL;
617 ring->fence_drv.gpu_addr = 0; 617 ring->fence_drv.gpu_addr = 0;
@@ -625,15 +625,19 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
625 amdgpu_fence_check_lockup); 625 amdgpu_fence_check_lockup);
626 ring->fence_drv.ring = ring; 626 ring->fence_drv.ring = ring;
627 627
628 init_waitqueue_head(&ring->fence_drv.fence_queue);
629
628 if (amdgpu_enable_scheduler) { 630 if (amdgpu_enable_scheduler) {
629 ring->scheduler = amd_sched_create(&amdgpu_sched_ops, 631 r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
630 ring->idx, 632 amdgpu_sched_hw_submission, ring->name);
631 amdgpu_sched_hw_submission, 633 if (r) {
632 (void *)ring->adev); 634 DRM_ERROR("Failed to create scheduler on ring %s.\n",
633 if (!ring->scheduler) 635 ring->name);
634 DRM_ERROR("Failed to create scheduler on ring %d.\n", 636 return r;
635 ring->idx); 637 }
636 } 638 }
639
640 return 0;
637} 641}
638 642
639/** 643/**
@@ -681,8 +685,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
681 wake_up_all(&ring->fence_drv.fence_queue); 685 wake_up_all(&ring->fence_drv.fence_queue);
682 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 686 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
683 ring->fence_drv.irq_type); 687 ring->fence_drv.irq_type);
684 if (ring->scheduler) 688 amd_sched_fini(&ring->sched);
685 amd_sched_destroy(ring->scheduler);
686 ring->fence_drv.initialized = false; 689 ring->fence_drv.initialized = false;
687 } 690 }
688 mutex_unlock(&adev->ring_lock); 691 mutex_unlock(&adev->ring_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index cbd3a486c5c2..7312d729d300 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -127,7 +127,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
127 r = amdgpu_bo_create(adev, adev->gart.table_size, 127 r = amdgpu_bo_create(adev, adev->gart.table_size,
128 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 128 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
129 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 129 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
130 NULL, &adev->gart.robj); 130 NULL, NULL, &adev->gart.robj);
131 if (r) { 131 if (r) {
132 return r; 132 return r;
133 } 133 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 5839fab374bf..7297ca3a0ba7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -69,7 +69,8 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
69 } 69 }
70 } 70 }
71retry: 71retry:
72 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, &robj); 72 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
73 flags, NULL, NULL, &robj);
73 if (r) { 74 if (r) {
74 if (r != -ERESTARTSYS) { 75 if (r != -ERESTARTSYS) {
75 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { 76 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
@@ -426,6 +427,10 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
426 &args->data.data_size_bytes, 427 &args->data.data_size_bytes,
427 &args->data.flags); 428 &args->data.flags);
428 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) { 429 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
430 if (args->data.data_size_bytes > sizeof(args->data.data)) {
431 r = -EINVAL;
432 goto unreserve;
433 }
429 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); 434 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
430 if (!r) 435 if (!r)
431 r = amdgpu_bo_set_metadata(robj, args->data.data, 436 r = amdgpu_bo_set_metadata(robj, args->data.data,
@@ -433,6 +438,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
433 args->data.flags); 438 args->data.flags);
434 } 439 }
435 440
441unreserve:
436 amdgpu_bo_unreserve(robj); 442 amdgpu_bo_unreserve(robj);
437out: 443out:
438 drm_gem_object_unreference_unlocked(gobj); 444 drm_gem_object_unreference_unlocked(gobj);
@@ -454,11 +460,12 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
454 struct ttm_validate_buffer tv, *entry; 460 struct ttm_validate_buffer tv, *entry;
455 struct amdgpu_bo_list_entry *vm_bos; 461 struct amdgpu_bo_list_entry *vm_bos;
456 struct ww_acquire_ctx ticket; 462 struct ww_acquire_ctx ticket;
457 struct list_head list; 463 struct list_head list, duplicates;
458 unsigned domain; 464 unsigned domain;
459 int r; 465 int r;
460 466
461 INIT_LIST_HEAD(&list); 467 INIT_LIST_HEAD(&list);
468 INIT_LIST_HEAD(&duplicates);
462 469
463 tv.bo = &bo_va->bo->tbo; 470 tv.bo = &bo_va->bo->tbo;
464 tv.shared = true; 471 tv.shared = true;
@@ -468,7 +475,8 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
468 if (!vm_bos) 475 if (!vm_bos)
469 return; 476 return;
470 477
471 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 478 /* Provide duplicates to avoid -EALREADY */
479 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
472 if (r) 480 if (r)
473 goto error_free; 481 goto error_free;
474 482
@@ -651,7 +659,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
651 int r; 659 int r;
652 660
653 args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); 661 args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
654 args->size = args->pitch * args->height; 662 args->size = (u64)args->pitch * args->height;
655 args->size = ALIGN(args->size, PAGE_SIZE); 663 args->size = ALIGN(args->size, PAGE_SIZE);
656 664
657 r = amdgpu_gem_object_create(adev, args->size, 0, 665 r = amdgpu_gem_object_create(adev, args->size, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 5c8a803acedc..534fc04e80fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -43,7 +43,7 @@ static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev)
43 r = amdgpu_bo_create(adev, adev->irq.ih.ring_size, 43 r = amdgpu_bo_create(adev, adev->irq.ih.ring_size,
44 PAGE_SIZE, true, 44 PAGE_SIZE, true,
45 AMDGPU_GEM_DOMAIN_GTT, 0, 45 AMDGPU_GEM_DOMAIN_GTT, 0,
46 NULL, &adev->irq.ih.ring_obj); 46 NULL, NULL, &adev->irq.ih.ring_obj);
47 if (r) { 47 if (r) {
48 DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r); 48 DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
49 return r; 49 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 0aba8e9bc8a0..7c42ff670080 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -140,7 +140,7 @@ void amdgpu_irq_preinstall(struct drm_device *dev)
140 */ 140 */
141int amdgpu_irq_postinstall(struct drm_device *dev) 141int amdgpu_irq_postinstall(struct drm_device *dev)
142{ 142{
143 dev->max_vblank_count = 0x001fffff; 143 dev->max_vblank_count = 0x00ffffff;
144 return 0; 144 return 0;
145} 145}
146 146
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 22367939ebf1..8c735f544b66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -390,7 +390,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
390 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; 390 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
391 } 391 }
392 case AMDGPU_INFO_READ_MMR_REG: { 392 case AMDGPU_INFO_READ_MMR_REG: {
393 unsigned n, alloc_size = info->read_mmr_reg.count * 4; 393 unsigned n, alloc_size;
394 uint32_t *regs; 394 uint32_t *regs;
395 unsigned se_num = (info->read_mmr_reg.instance >> 395 unsigned se_num = (info->read_mmr_reg.instance >>
396 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & 396 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
@@ -406,9 +406,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
406 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) 406 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
407 sh_num = 0xffffffff; 407 sh_num = 0xffffffff;
408 408
409 regs = kmalloc(alloc_size, GFP_KERNEL); 409 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
410 if (!regs) 410 if (!regs)
411 return -ENOMEM; 411 return -ENOMEM;
412 alloc_size = info->read_mmr_reg.count * sizeof(*regs);
412 413
413 for (i = 0; i < info->read_mmr_reg.count; i++) 414 for (i = 0; i < info->read_mmr_reg.count; i++)
414 if (amdgpu_asic_read_register(adev, se_num, sh_num, 415 if (amdgpu_asic_read_register(adev, se_num, sh_num,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 08b09d55b96f..1a7708f365f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -215,6 +215,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
215 bool kernel, u32 domain, u64 flags, 215 bool kernel, u32 domain, u64 flags,
216 struct sg_table *sg, 216 struct sg_table *sg,
217 struct ttm_placement *placement, 217 struct ttm_placement *placement,
218 struct reservation_object *resv,
218 struct amdgpu_bo **bo_ptr) 219 struct amdgpu_bo **bo_ptr)
219{ 220{
220 struct amdgpu_bo *bo; 221 struct amdgpu_bo *bo;
@@ -261,7 +262,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
261 /* Kernel allocation are uninterruptible */ 262 /* Kernel allocation are uninterruptible */
262 r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, 263 r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
263 &bo->placement, page_align, !kernel, NULL, 264 &bo->placement, page_align, !kernel, NULL,
264 acc_size, sg, NULL, &amdgpu_ttm_bo_destroy); 265 acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
265 if (unlikely(r != 0)) { 266 if (unlikely(r != 0)) {
266 return r; 267 return r;
267 } 268 }
@@ -275,7 +276,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
275int amdgpu_bo_create(struct amdgpu_device *adev, 276int amdgpu_bo_create(struct amdgpu_device *adev,
276 unsigned long size, int byte_align, 277 unsigned long size, int byte_align,
277 bool kernel, u32 domain, u64 flags, 278 bool kernel, u32 domain, u64 flags,
278 struct sg_table *sg, struct amdgpu_bo **bo_ptr) 279 struct sg_table *sg,
280 struct reservation_object *resv,
281 struct amdgpu_bo **bo_ptr)
279{ 282{
280 struct ttm_placement placement = {0}; 283 struct ttm_placement placement = {0};
281 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; 284 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
@@ -286,11 +289,9 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
286 amdgpu_ttm_placement_init(adev, &placement, 289 amdgpu_ttm_placement_init(adev, &placement,
287 placements, domain, flags); 290 placements, domain, flags);
288 291
289 return amdgpu_bo_create_restricted(adev, size, byte_align, 292 return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
290 kernel, domain, flags, 293 domain, flags, sg, &placement,
291 sg, 294 resv, bo_ptr);
292 &placement,
293 bo_ptr);
294} 295}
295 296
296int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) 297int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
@@ -535,12 +536,10 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
535 if (metadata == NULL) 536 if (metadata == NULL)
536 return -EINVAL; 537 return -EINVAL;
537 538
538 buffer = kzalloc(metadata_size, GFP_KERNEL); 539 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
539 if (buffer == NULL) 540 if (buffer == NULL)
540 return -ENOMEM; 541 return -ENOMEM;
541 542
542 memcpy(buffer, metadata, metadata_size);
543
544 kfree(bo->metadata); 543 kfree(bo->metadata);
545 bo->metadata_flags = flags; 544 bo->metadata_flags = flags;
546 bo->metadata = buffer; 545 bo->metadata = buffer;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 6ea18dcec561..3c2ff4567798 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -129,12 +129,14 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
129 unsigned long size, int byte_align, 129 unsigned long size, int byte_align,
130 bool kernel, u32 domain, u64 flags, 130 bool kernel, u32 domain, u64 flags,
131 struct sg_table *sg, 131 struct sg_table *sg,
132 struct reservation_object *resv,
132 struct amdgpu_bo **bo_ptr); 133 struct amdgpu_bo **bo_ptr);
133int amdgpu_bo_create_restricted(struct amdgpu_device *adev, 134int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
134 unsigned long size, int byte_align, 135 unsigned long size, int byte_align,
135 bool kernel, u32 domain, u64 flags, 136 bool kernel, u32 domain, u64 flags,
136 struct sg_table *sg, 137 struct sg_table *sg,
137 struct ttm_placement *placement, 138 struct ttm_placement *placement,
139 struct reservation_object *resv,
138 struct amdgpu_bo **bo_ptr); 140 struct amdgpu_bo **bo_ptr);
139int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); 141int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
140void amdgpu_bo_kunmap(struct amdgpu_bo *bo); 142void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index d9652fe32d6a..59f735a933a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -61,12 +61,15 @@ struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
61 struct dma_buf_attachment *attach, 61 struct dma_buf_attachment *attach,
62 struct sg_table *sg) 62 struct sg_table *sg)
63{ 63{
64 struct reservation_object *resv = attach->dmabuf->resv;
64 struct amdgpu_device *adev = dev->dev_private; 65 struct amdgpu_device *adev = dev->dev_private;
65 struct amdgpu_bo *bo; 66 struct amdgpu_bo *bo;
66 int ret; 67 int ret;
67 68
69 ww_mutex_lock(&resv->lock, NULL);
68 ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, 70 ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
69 AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo); 71 AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
72 ww_mutex_unlock(&resv->lock);
70 if (ret) 73 if (ret)
71 return ERR_PTR(ret); 74 return ERR_PTR(ret);
72 75
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 9bec91484c24..30dce235ddeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -357,11 +357,11 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
357 ring->adev = adev; 357 ring->adev = adev;
358 ring->idx = adev->num_rings++; 358 ring->idx = adev->num_rings++;
359 adev->rings[ring->idx] = ring; 359 adev->rings[ring->idx] = ring;
360 amdgpu_fence_driver_init_ring(ring); 360 r = amdgpu_fence_driver_init_ring(ring);
361 if (r)
362 return r;
361 } 363 }
362 364
363 init_waitqueue_head(&ring->fence_drv.fence_queue);
364
365 r = amdgpu_wb_get(adev, &ring->rptr_offs); 365 r = amdgpu_wb_get(adev, &ring->rptr_offs);
366 if (r) { 366 if (r) {
367 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); 367 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
@@ -407,7 +407,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
407 if (ring->ring_obj == NULL) { 407 if (ring->ring_obj == NULL) {
408 r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true, 408 r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
409 AMDGPU_GEM_DOMAIN_GTT, 0, 409 AMDGPU_GEM_DOMAIN_GTT, 0,
410 NULL, &ring->ring_obj); 410 NULL, NULL, &ring->ring_obj);
411 if (r) { 411 if (r) {
412 dev_err(adev->dev, "(%d) ring create failed\n", r); 412 dev_err(adev->dev, "(%d) ring create failed\n", r);
413 return r; 413 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 74dad270362c..e90712443fe9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -64,8 +64,8 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
64 INIT_LIST_HEAD(&sa_manager->flist[i]); 64 INIT_LIST_HEAD(&sa_manager->flist[i]);
65 } 65 }
66 66
67 r = amdgpu_bo_create(adev, size, align, true, 67 r = amdgpu_bo_create(adev, size, align, true, domain,
68 domain, 0, NULL, &sa_manager->bo); 68 0, NULL, NULL, &sa_manager->bo);
69 if (r) { 69 if (r) {
70 dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); 70 dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
71 return r; 71 return r;
@@ -145,8 +145,13 @@ static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
145 struct amd_sched_fence *s_fence; 145 struct amd_sched_fence *s_fence;
146 146
147 s_fence = to_amd_sched_fence(f); 147 s_fence = to_amd_sched_fence(f);
148 if (s_fence) 148 if (s_fence) {
149 return s_fence->scheduler->ring_id; 149 struct amdgpu_ring *ring;
150
151 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
152 return ring->idx;
153 }
154
150 a_fence = to_amdgpu_fence(f); 155 a_fence = to_amdgpu_fence(f);
151 if (a_fence) 156 if (a_fence)
152 return a_fence->ring->idx; 157 return a_fence->ring->idx;
@@ -412,6 +417,26 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
412} 417}
413 418
414#if defined(CONFIG_DEBUG_FS) 419#if defined(CONFIG_DEBUG_FS)
420
421static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
422{
423 struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
424 struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
425
426 if (a_fence)
427 seq_printf(m, " protected by 0x%016llx on ring %d",
428 a_fence->seq, a_fence->ring->idx);
429
430 if (s_fence) {
431 struct amdgpu_ring *ring;
432
433
434 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
435 seq_printf(m, " protected by 0x%016x on ring %d",
436 s_fence->base.seqno, ring->idx);
437 }
438}
439
415void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, 440void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
416 struct seq_file *m) 441 struct seq_file *m)
417{ 442{
@@ -428,18 +453,8 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
428 } 453 }
429 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", 454 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
430 soffset, eoffset, eoffset - soffset); 455 soffset, eoffset, eoffset - soffset);
431 if (i->fence) { 456 if (i->fence)
432 struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence); 457 amdgpu_sa_bo_dump_fence(i->fence, m);
433 struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence);
434 if (a_fence)
435 seq_printf(m, " protected by 0x%016llx on ring %d",
436 a_fence->seq, a_fence->ring->idx);
437 if (s_fence)
438 seq_printf(m, " protected by 0x%016x on ring %d",
439 s_fence->base.seqno,
440 s_fence->scheduler->ring_id);
441
442 }
443 seq_printf(m, "\n"); 458 seq_printf(m, "\n");
444 } 459 }
445 spin_unlock(&sa_manager->wq.lock); 460 spin_unlock(&sa_manager->wq.lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index de98fbd2971e..2e946b2cad88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -27,63 +27,48 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include "amdgpu.h" 28#include "amdgpu.h"
29 29
30static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job) 30static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
31{ 31{
32 struct amdgpu_job *sched_job = (struct amdgpu_job *)job; 32 struct amdgpu_job *job = to_amdgpu_job(sched_job);
33 return amdgpu_sync_get_fence(&sched_job->ibs->sync); 33 return amdgpu_sync_get_fence(&job->ibs->sync);
34} 34}
35 35
36static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job) 36static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
37{ 37{
38 struct amdgpu_job *sched_job; 38 struct amdgpu_fence *fence = NULL;
39 struct amdgpu_fence *fence; 39 struct amdgpu_job *job;
40 int r; 40 int r;
41 41
42 if (!job) { 42 if (!sched_job) {
43 DRM_ERROR("job is null\n"); 43 DRM_ERROR("job is null\n");
44 return NULL; 44 return NULL;
45 } 45 }
46 sched_job = (struct amdgpu_job *)job; 46 job = to_amdgpu_job(sched_job);
47 mutex_lock(&sched_job->job_lock); 47 mutex_lock(&job->job_lock);
48 r = amdgpu_ib_schedule(sched_job->adev, 48 r = amdgpu_ib_schedule(job->adev,
49 sched_job->num_ibs, 49 job->num_ibs,
50 sched_job->ibs, 50 job->ibs,
51 sched_job->base.owner); 51 job->base.owner);
52 if (r) 52 if (r) {
53 DRM_ERROR("Error scheduling IBs (%d)\n", r);
53 goto err; 54 goto err;
54 fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence); 55 }
55
56 if (sched_job->free_job)
57 sched_job->free_job(sched_job);
58 56
59 mutex_unlock(&sched_job->job_lock); 57 fence = amdgpu_fence_ref(job->ibs[job->num_ibs - 1].fence);
60 return &fence->base;
61 58
62err: 59err:
63 DRM_ERROR("Run job error\n"); 60 if (job->free_job)
64 mutex_unlock(&sched_job->job_lock); 61 job->free_job(job);
65 job->sched->ops->process_job(job);
66 return NULL;
67}
68 62
69static void amdgpu_sched_process_job(struct amd_sched_job *job) 63 mutex_unlock(&job->job_lock);
70{ 64 fence_put(&job->base.s_fence->base);
71 struct amdgpu_job *sched_job; 65 kfree(job);
72 66 return fence ? &fence->base : NULL;
73 if (!job) {
74 DRM_ERROR("job is null\n");
75 return;
76 }
77 sched_job = (struct amdgpu_job *)job;
78 /* after processing job, free memory */
79 fence_put(&sched_job->base.s_fence->base);
80 kfree(sched_job);
81} 67}
82 68
83struct amd_sched_backend_ops amdgpu_sched_ops = { 69struct amd_sched_backend_ops amdgpu_sched_ops = {
84 .dependency = amdgpu_sched_dependency, 70 .dependency = amdgpu_sched_dependency,
85 .run_job = amdgpu_sched_run_job, 71 .run_job = amdgpu_sched_run_job,
86 .process_job = amdgpu_sched_process_job
87}; 72};
88 73
89int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, 74int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
@@ -100,7 +85,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
100 kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 85 kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
101 if (!job) 86 if (!job)
102 return -ENOMEM; 87 return -ENOMEM;
103 job->base.sched = ring->scheduler; 88 job->base.sched = &ring->sched;
104 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; 89 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
105 job->adev = adev; 90 job->adev = adev;
106 job->ibs = ibs; 91 job->ibs = ibs;
@@ -109,7 +94,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
109 mutex_init(&job->job_lock); 94 mutex_init(&job->job_lock);
110 job->free_job = free_job; 95 job->free_job = free_job;
111 mutex_lock(&job->job_lock); 96 mutex_lock(&job->job_lock);
112 r = amd_sched_entity_push_job((struct amd_sched_job *)job); 97 r = amd_sched_entity_push_job(&job->base);
113 if (r) { 98 if (r) {
114 mutex_unlock(&job->job_lock); 99 mutex_unlock(&job->job_lock);
115 kfree(job); 100 kfree(job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 068aeaff7183..4921de15b451 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -65,8 +65,14 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
65 65
66 if (a_fence) 66 if (a_fence)
67 return a_fence->ring->adev == adev; 67 return a_fence->ring->adev == adev;
68 if (s_fence) 68
69 return (struct amdgpu_device *)s_fence->scheduler->priv == adev; 69 if (s_fence) {
70 struct amdgpu_ring *ring;
71
72 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
73 return ring->adev == adev;
74 }
75
70 return false; 76 return false;
71} 77}
72 78
@@ -251,6 +257,20 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
251 fence_put(e->fence); 257 fence_put(e->fence);
252 kfree(e); 258 kfree(e);
253 } 259 }
260
261 if (amdgpu_enable_semaphores)
262 return 0;
263
264 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
265 struct amdgpu_fence *fence = sync->sync_to[i];
266 if (!fence)
267 continue;
268
269 r = fence_wait(&fence->base, false);
270 if (r)
271 return r;
272 }
273
254 return 0; 274 return 0;
255} 275}
256 276
@@ -285,7 +305,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
285 return -EINVAL; 305 return -EINVAL;
286 } 306 }
287 307
288 if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) { 308 if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores ||
309 (count >= AMDGPU_NUM_SYNCS)) {
289 /* not enough room, wait manually */ 310 /* not enough room, wait manually */
290 r = fence_wait(&fence->base, false); 311 r = fence_wait(&fence->base, false);
291 if (r) 312 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index f80b1a43be8a..4865615e9c06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -59,8 +59,9 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
59 goto out_cleanup; 59 goto out_cleanup;
60 } 60 }
61 61
62 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, 62 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
63 NULL, &vram_obj); 63 AMDGPU_GEM_DOMAIN_VRAM, 0,
64 NULL, NULL, &vram_obj);
64 if (r) { 65 if (r) {
65 DRM_ERROR("Failed to create VRAM object\n"); 66 DRM_ERROR("Failed to create VRAM object\n");
66 goto out_cleanup; 67 goto out_cleanup;
@@ -80,7 +81,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
80 struct fence *fence = NULL; 81 struct fence *fence = NULL;
81 82
82 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, 83 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
83 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i); 84 AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
85 NULL, gtt_obj + i);
84 if (r) { 86 if (r) {
85 DRM_ERROR("Failed to create GTT object %d\n", i); 87 DRM_ERROR("Failed to create GTT object %d\n", i);
86 goto out_lclean; 88 goto out_lclean;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b5abd5cde413..364cbe975332 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -861,7 +861,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
861 r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, 861 r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
862 AMDGPU_GEM_DOMAIN_VRAM, 862 AMDGPU_GEM_DOMAIN_VRAM,
863 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 863 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
864 NULL, &adev->stollen_vga_memory); 864 NULL, NULL, &adev->stollen_vga_memory);
865 if (r) { 865 if (r) {
866 return r; 866 return r;
867 } 867 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 482e66797ae6..5cc95f1a7dab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -247,7 +247,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
247 const struct common_firmware_header *header = NULL; 247 const struct common_firmware_header *header = NULL;
248 248
249 err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, 249 err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
250 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, bo); 250 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
251 if (err) { 251 if (err) {
252 dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); 252 dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
253 err = -ENOMEM; 253 err = -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2cf6c6b06e3b..d0312364d950 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -156,7 +156,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
156 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, 156 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
157 AMDGPU_GEM_DOMAIN_VRAM, 157 AMDGPU_GEM_DOMAIN_VRAM,
158 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 158 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
159 NULL, &adev->uvd.vcpu_bo); 159 NULL, NULL, &adev->uvd.vcpu_bo);
160 if (r) { 160 if (r) {
161 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); 161 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
162 return r; 162 return r;
@@ -543,46 +543,60 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
543 return -EINVAL; 543 return -EINVAL;
544 } 544 }
545 545
546 if (msg_type == 1) { 546 switch (msg_type) {
547 case 0:
548 /* it's a create msg, calc image size (width * height) */
549 amdgpu_bo_kunmap(bo);
550
551 /* try to alloc a new handle */
552 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
553 if (atomic_read(&adev->uvd.handles[i]) == handle) {
554 DRM_ERROR("Handle 0x%x already in use!\n", handle);
555 return -EINVAL;
556 }
557
558 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
559 adev->uvd.filp[i] = ctx->parser->filp;
560 return 0;
561 }
562 }
563
564 DRM_ERROR("No more free UVD handles!\n");
565 return -EINVAL;
566
567 case 1:
547 /* it's a decode msg, calc buffer sizes */ 568 /* it's a decode msg, calc buffer sizes */
548 r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes); 569 r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes);
549 amdgpu_bo_kunmap(bo); 570 amdgpu_bo_kunmap(bo);
550 if (r) 571 if (r)
551 return r; 572 return r;
552 573
553 } else if (msg_type == 2) { 574 /* validate the handle */
575 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
576 if (atomic_read(&adev->uvd.handles[i]) == handle) {
577 if (adev->uvd.filp[i] != ctx->parser->filp) {
578 DRM_ERROR("UVD handle collision detected!\n");
579 return -EINVAL;
580 }
581 return 0;
582 }
583 }
584
585 DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
586 return -ENOENT;
587
588 case 2:
554 /* it's a destroy msg, free the handle */ 589 /* it's a destroy msg, free the handle */
555 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) 590 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
556 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); 591 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
557 amdgpu_bo_kunmap(bo); 592 amdgpu_bo_kunmap(bo);
558 return 0; 593 return 0;
559 } else {
560 /* it's a create msg */
561 amdgpu_bo_kunmap(bo);
562
563 if (msg_type != 0) {
564 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
565 return -EINVAL;
566 }
567
568 /* it's a create msg, no special handling needed */
569 }
570
571 /* create or decode, validate the handle */
572 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
573 if (atomic_read(&adev->uvd.handles[i]) == handle)
574 return 0;
575 }
576 594
577 /* handle not found try to alloc a new one */ 595 default:
578 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { 596 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
579 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { 597 return -EINVAL;
580 adev->uvd.filp[i] = ctx->parser->filp;
581 return 0;
582 }
583 } 598 }
584 599 BUG();
585 DRM_ERROR("No more free UVD handles!\n");
586 return -EINVAL; 600 return -EINVAL;
587} 601}
588 602
@@ -805,10 +819,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
805} 819}
806 820
807static int amdgpu_uvd_free_job( 821static int amdgpu_uvd_free_job(
808 struct amdgpu_job *sched_job) 822 struct amdgpu_job *job)
809{ 823{
810 amdgpu_ib_free(sched_job->adev, sched_job->ibs); 824 amdgpu_ib_free(job->adev, job->ibs);
811 kfree(sched_job->ibs); 825 kfree(job->ibs);
812 return 0; 826 return 0;
813} 827}
814 828
@@ -905,7 +919,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
905 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, 919 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
906 AMDGPU_GEM_DOMAIN_VRAM, 920 AMDGPU_GEM_DOMAIN_VRAM,
907 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 921 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
908 NULL, &bo); 922 NULL, NULL, &bo);
909 if (r) 923 if (r)
910 return r; 924 return r;
911 925
@@ -954,7 +968,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
954 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, 968 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
955 AMDGPU_GEM_DOMAIN_VRAM, 969 AMDGPU_GEM_DOMAIN_VRAM,
956 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 970 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
957 NULL, &bo); 971 NULL, NULL, &bo);
958 if (r) 972 if (r)
959 return r; 973 return r;
960 974
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 3cab96c42aa8..74f2038ac747 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -143,7 +143,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
143 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, 143 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
144 AMDGPU_GEM_DOMAIN_VRAM, 144 AMDGPU_GEM_DOMAIN_VRAM,
145 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 145 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
146 NULL, &adev->vce.vcpu_bo); 146 NULL, NULL, &adev->vce.vcpu_bo);
147 if (r) { 147 if (r) {
148 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); 148 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
149 return r; 149 return r;
@@ -342,10 +342,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
342} 342}
343 343
344static int amdgpu_vce_free_job( 344static int amdgpu_vce_free_job(
345 struct amdgpu_job *sched_job) 345 struct amdgpu_job *job)
346{ 346{
347 amdgpu_ib_free(sched_job->adev, sched_job->ibs); 347 amdgpu_ib_free(job->adev, job->ibs);
348 kfree(sched_job->ibs); 348 kfree(job->ibs);
349 return 0; 349 return 0;
350} 350}
351 351
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index f68b7cdc370a..1e14531353e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -316,12 +316,12 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
316 } 316 }
317} 317}
318 318
319int amdgpu_vm_free_job(struct amdgpu_job *sched_job) 319int amdgpu_vm_free_job(struct amdgpu_job *job)
320{ 320{
321 int i; 321 int i;
322 for (i = 0; i < sched_job->num_ibs; i++) 322 for (i = 0; i < job->num_ibs; i++)
323 amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]); 323 amdgpu_ib_free(job->adev, &job->ibs[i]);
324 kfree(sched_job->ibs); 324 kfree(job->ibs);
325 return 0; 325 return 0;
326} 326}
327 327
@@ -686,31 +686,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
686} 686}
687 687
688/** 688/**
689 * amdgpu_vm_fence_pts - fence page tables after an update
690 *
691 * @vm: requested vm
692 * @start: start of GPU address range
693 * @end: end of GPU address range
694 * @fence: fence to use
695 *
696 * Fence the page tables in the range @start - @end (cayman+).
697 *
698 * Global and local mutex must be locked!
699 */
700static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
701 uint64_t start, uint64_t end,
702 struct fence *fence)
703{
704 unsigned i;
705
706 start >>= amdgpu_vm_block_size;
707 end >>= amdgpu_vm_block_size;
708
709 for (i = start; i <= end; ++i)
710 amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
711}
712
713/**
714 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table 689 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
715 * 690 *
716 * @adev: amdgpu_device pointer 691 * @adev: amdgpu_device pointer
@@ -813,8 +788,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
813 if (r) 788 if (r)
814 goto error_free; 789 goto error_free;
815 790
816 amdgpu_vm_fence_pts(vm, mapping->it.start, 791 amdgpu_bo_fence(vm->page_directory, f, true);
817 mapping->it.last + 1, f);
818 if (fence) { 792 if (fence) {
819 fence_put(*fence); 793 fence_put(*fence);
820 *fence = fence_get(f); 794 *fence = fence_get(f);
@@ -855,7 +829,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
855 int r; 829 int r;
856 830
857 if (mem) { 831 if (mem) {
858 addr = mem->start << PAGE_SHIFT; 832 addr = (u64)mem->start << PAGE_SHIFT;
859 if (mem->mem_type != TTM_PL_TT) 833 if (mem->mem_type != TTM_PL_TT)
860 addr += adev->vm_manager.vram_base_offset; 834 addr += adev->vm_manager.vram_base_offset;
861 } else { 835 } else {
@@ -1089,6 +1063,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1089 1063
1090 /* walk over the address space and allocate the page tables */ 1064 /* walk over the address space and allocate the page tables */
1091 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { 1065 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1066 struct reservation_object *resv = vm->page_directory->tbo.resv;
1092 struct amdgpu_bo *pt; 1067 struct amdgpu_bo *pt;
1093 1068
1094 if (vm->page_tables[pt_idx].bo) 1069 if (vm->page_tables[pt_idx].bo)
@@ -1097,11 +1072,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1097 /* drop mutex to allocate and clear page table */ 1072 /* drop mutex to allocate and clear page table */
1098 mutex_unlock(&vm->mutex); 1073 mutex_unlock(&vm->mutex);
1099 1074
1075 ww_mutex_lock(&resv->lock, NULL);
1100 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, 1076 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1101 AMDGPU_GPU_PAGE_SIZE, true, 1077 AMDGPU_GPU_PAGE_SIZE, true,
1102 AMDGPU_GEM_DOMAIN_VRAM, 1078 AMDGPU_GEM_DOMAIN_VRAM,
1103 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1079 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1104 NULL, &pt); 1080 NULL, resv, &pt);
1081 ww_mutex_unlock(&resv->lock);
1105 if (r) 1082 if (r)
1106 goto error_free; 1083 goto error_free;
1107 1084
@@ -1303,7 +1280,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1303 r = amdgpu_bo_create(adev, pd_size, align, true, 1280 r = amdgpu_bo_create(adev, pd_size, align, true,
1304 AMDGPU_GEM_DOMAIN_VRAM, 1281 AMDGPU_GEM_DOMAIN_VRAM,
1305 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1282 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1306 NULL, &vm->page_directory); 1283 NULL, NULL, &vm->page_directory);
1307 if (r) 1284 if (r)
1308 return r; 1285 return r;
1309 1286
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
index a72ffc7d6c26..e33180d3314a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
@@ -814,7 +814,8 @@ int cz_smu_init(struct amdgpu_device *adev)
814 * 3. map kernel virtual address 814 * 3. map kernel virtual address
815 */ 815 */
816 ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE, 816 ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
817 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, toc_buf); 817 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
818 toc_buf);
818 819
819 if (ret) { 820 if (ret) {
820 dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret); 821 dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
@@ -822,7 +823,8 @@ int cz_smu_init(struct amdgpu_device *adev)
822 } 823 }
823 824
824 ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE, 825 ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
825 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, smu_buf); 826 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
827 smu_buf);
826 828
827 if (ret) { 829 if (ret) {
828 dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret); 830 dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
index 322edea65857..bda1249eb871 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
@@ -764,7 +764,7 @@ int fiji_smu_init(struct amdgpu_device *adev)
764 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, 764 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
765 true, AMDGPU_GEM_DOMAIN_VRAM, 765 true, AMDGPU_GEM_DOMAIN_VRAM,
766 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 766 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
767 NULL, toc_buf); 767 NULL, NULL, toc_buf);
768 if (ret) { 768 if (ret) {
769 DRM_ERROR("Failed to allocate memory for TOC buffer\n"); 769 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
770 return -ENOMEM; 770 return -ENOMEM;
@@ -774,7 +774,7 @@ int fiji_smu_init(struct amdgpu_device *adev)
774 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, 774 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
775 true, AMDGPU_GEM_DOMAIN_VRAM, 775 true, AMDGPU_GEM_DOMAIN_VRAM,
776 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 776 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
777 NULL, smu_buf); 777 NULL, NULL, smu_buf);
778 if (ret) { 778 if (ret) {
779 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); 779 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
780 return -ENOMEM; 780 return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 4bd1e5cf65ca..e992bf2ff66c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3206,7 +3206,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
3206 r = amdgpu_bo_create(adev, 3206 r = amdgpu_bo_create(adev,
3207 adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, 3207 adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
3208 PAGE_SIZE, true, 3208 PAGE_SIZE, true,
3209 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, 3209 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
3210 &adev->gfx.mec.hpd_eop_obj); 3210 &adev->gfx.mec.hpd_eop_obj);
3211 if (r) { 3211 if (r) {
3212 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 3212 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -3373,7 +3373,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
3373 r = amdgpu_bo_create(adev, 3373 r = amdgpu_bo_create(adev,
3374 sizeof(struct bonaire_mqd), 3374 sizeof(struct bonaire_mqd),
3375 PAGE_SIZE, true, 3375 PAGE_SIZE, true,
3376 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, 3376 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
3377 &ring->mqd_obj); 3377 &ring->mqd_obj);
3378 if (r) { 3378 if (r) {
3379 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); 3379 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
@@ -3610,41 +3610,6 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
3610 return 0; 3610 return 0;
3611} 3611}
3612 3612
3613static void gfx_v7_0_ce_sync_me(struct amdgpu_ring *ring)
3614{
3615 struct amdgpu_device *adev = ring->adev;
3616 u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4;
3617
3618 /* instruct DE to set a magic number */
3619 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3620 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3621 WRITE_DATA_DST_SEL(5)));
3622 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
3623 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
3624 amdgpu_ring_write(ring, 1);
3625
3626 /* let CE wait till condition satisfied */
3627 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3628 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
3629 WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
3630 WAIT_REG_MEM_FUNCTION(3) | /* == */
3631 WAIT_REG_MEM_ENGINE(2))); /* ce */
3632 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
3633 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
3634 amdgpu_ring_write(ring, 1);
3635 amdgpu_ring_write(ring, 0xffffffff);
3636 amdgpu_ring_write(ring, 4); /* poll interval */
3637
3638 /* instruct CE to reset wb of ce_sync to zero */
3639 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3640 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
3641 WRITE_DATA_DST_SEL(5) |
3642 WR_CONFIRM));
3643 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
3644 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
3645 amdgpu_ring_write(ring, 0);
3646}
3647
3648/* 3613/*
3649 * vm 3614 * vm
3650 * VMID 0 is the physical GPU addresses as used by the kernel. 3615 * VMID 0 is the physical GPU addresses as used by the kernel.
@@ -3663,6 +3628,13 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3663 unsigned vm_id, uint64_t pd_addr) 3628 unsigned vm_id, uint64_t pd_addr)
3664{ 3629{
3665 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); 3630 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
3631 if (usepfp) {
3632 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3633 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3634 amdgpu_ring_write(ring, 0);
3635 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3636 amdgpu_ring_write(ring, 0);
3637 }
3666 3638
3667 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3639 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3668 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | 3640 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -3703,7 +3675,10 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3703 amdgpu_ring_write(ring, 0x0); 3675 amdgpu_ring_write(ring, 0x0);
3704 3676
3705 /* synce CE with ME to prevent CE fetch CEIB before context switch done */ 3677 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3706 gfx_v7_0_ce_sync_me(ring); 3678 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3679 amdgpu_ring_write(ring, 0);
3680 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3681 amdgpu_ring_write(ring, 0);
3707 } 3682 }
3708} 3683}
3709 3684
@@ -3788,7 +3763,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3788 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, 3763 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
3789 AMDGPU_GEM_DOMAIN_VRAM, 3764 AMDGPU_GEM_DOMAIN_VRAM,
3790 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 3765 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3791 NULL, &adev->gfx.rlc.save_restore_obj); 3766 NULL, NULL,
3767 &adev->gfx.rlc.save_restore_obj);
3792 if (r) { 3768 if (r) {
3793 dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); 3769 dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
3794 return r; 3770 return r;
@@ -3831,7 +3807,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3831 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, 3807 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
3832 AMDGPU_GEM_DOMAIN_VRAM, 3808 AMDGPU_GEM_DOMAIN_VRAM,
3833 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 3809 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3834 NULL, &adev->gfx.rlc.clear_state_obj); 3810 NULL, NULL,
3811 &adev->gfx.rlc.clear_state_obj);
3835 if (r) { 3812 if (r) {
3836 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); 3813 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
3837 gfx_v7_0_rlc_fini(adev); 3814 gfx_v7_0_rlc_fini(adev);
@@ -3870,7 +3847,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3870 r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, 3847 r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
3871 AMDGPU_GEM_DOMAIN_VRAM, 3848 AMDGPU_GEM_DOMAIN_VRAM,
3872 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 3849 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3873 NULL, &adev->gfx.rlc.cp_table_obj); 3850 NULL, NULL,
3851 &adev->gfx.rlc.cp_table_obj);
3874 if (r) { 3852 if (r) {
3875 dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); 3853 dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
3876 gfx_v7_0_rlc_fini(adev); 3854 gfx_v7_0_rlc_fini(adev);
@@ -4802,12 +4780,6 @@ static int gfx_v7_0_sw_init(void *handle)
4802 return r; 4780 return r;
4803 } 4781 }
4804 4782
4805 r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs);
4806 if (r) {
4807 DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r);
4808 return r;
4809 }
4810
4811 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4783 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4812 ring = &adev->gfx.gfx_ring[i]; 4784 ring = &adev->gfx.gfx_ring[i];
4813 ring->ring_obj = NULL; 4785 ring->ring_obj = NULL;
@@ -4851,21 +4823,21 @@ static int gfx_v7_0_sw_init(void *handle)
4851 r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, 4823 r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
4852 PAGE_SIZE, true, 4824 PAGE_SIZE, true,
4853 AMDGPU_GEM_DOMAIN_GDS, 0, 4825 AMDGPU_GEM_DOMAIN_GDS, 0,
4854 NULL, &adev->gds.gds_gfx_bo); 4826 NULL, NULL, &adev->gds.gds_gfx_bo);
4855 if (r) 4827 if (r)
4856 return r; 4828 return r;
4857 4829
4858 r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, 4830 r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
4859 PAGE_SIZE, true, 4831 PAGE_SIZE, true,
4860 AMDGPU_GEM_DOMAIN_GWS, 0, 4832 AMDGPU_GEM_DOMAIN_GWS, 0,
4861 NULL, &adev->gds.gws_gfx_bo); 4833 NULL, NULL, &adev->gds.gws_gfx_bo);
4862 if (r) 4834 if (r)
4863 return r; 4835 return r;
4864 4836
4865 r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, 4837 r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
4866 PAGE_SIZE, true, 4838 PAGE_SIZE, true,
4867 AMDGPU_GEM_DOMAIN_OA, 0, 4839 AMDGPU_GEM_DOMAIN_OA, 0,
4868 NULL, &adev->gds.oa_gfx_bo); 4840 NULL, NULL, &adev->gds.oa_gfx_bo);
4869 if (r) 4841 if (r)
4870 return r; 4842 return r;
4871 4843
@@ -4886,8 +4858,6 @@ static int gfx_v7_0_sw_fini(void *handle)
4886 for (i = 0; i < adev->gfx.num_compute_rings; i++) 4858 for (i = 0; i < adev->gfx.num_compute_rings; i++)
4887 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 4859 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
4888 4860
4889 amdgpu_wb_free(adev, adev->gfx.ce_sync_offs);
4890
4891 gfx_v7_0_cp_compute_fini(adev); 4861 gfx_v7_0_cp_compute_fini(adev);
4892 gfx_v7_0_rlc_fini(adev); 4862 gfx_v7_0_rlc_fini(adev);
4893 gfx_v7_0_mec_fini(adev); 4863 gfx_v7_0_mec_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 53f07439a512..cb4f68f53f24 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -868,7 +868,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
868 r = amdgpu_bo_create(adev, 868 r = amdgpu_bo_create(adev,
869 adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, 869 adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
870 PAGE_SIZE, true, 870 PAGE_SIZE, true,
871 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, 871 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
872 &adev->gfx.mec.hpd_eop_obj); 872 &adev->gfx.mec.hpd_eop_obj);
873 if (r) { 873 if (r) {
874 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 874 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -940,12 +940,6 @@ static int gfx_v8_0_sw_init(void *handle)
940 return r; 940 return r;
941 } 941 }
942 942
943 r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs);
944 if (r) {
945 DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r);
946 return r;
947 }
948
949 /* set up the gfx ring */ 943 /* set up the gfx ring */
950 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 944 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
951 ring = &adev->gfx.gfx_ring[i]; 945 ring = &adev->gfx.gfx_ring[i];
@@ -995,21 +989,21 @@ static int gfx_v8_0_sw_init(void *handle)
995 /* reserve GDS, GWS and OA resource for gfx */ 989 /* reserve GDS, GWS and OA resource for gfx */
996 r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, 990 r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
997 PAGE_SIZE, true, 991 PAGE_SIZE, true,
998 AMDGPU_GEM_DOMAIN_GDS, 0, 992 AMDGPU_GEM_DOMAIN_GDS, 0, NULL,
999 NULL, &adev->gds.gds_gfx_bo); 993 NULL, &adev->gds.gds_gfx_bo);
1000 if (r) 994 if (r)
1001 return r; 995 return r;
1002 996
1003 r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, 997 r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
1004 PAGE_SIZE, true, 998 PAGE_SIZE, true,
1005 AMDGPU_GEM_DOMAIN_GWS, 0, 999 AMDGPU_GEM_DOMAIN_GWS, 0, NULL,
1006 NULL, &adev->gds.gws_gfx_bo); 1000 NULL, &adev->gds.gws_gfx_bo);
1007 if (r) 1001 if (r)
1008 return r; 1002 return r;
1009 1003
1010 r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, 1004 r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
1011 PAGE_SIZE, true, 1005 PAGE_SIZE, true,
1012 AMDGPU_GEM_DOMAIN_OA, 0, 1006 AMDGPU_GEM_DOMAIN_OA, 0, NULL,
1013 NULL, &adev->gds.oa_gfx_bo); 1007 NULL, &adev->gds.oa_gfx_bo);
1014 if (r) 1008 if (r)
1015 return r; 1009 return r;
@@ -1033,8 +1027,6 @@ static int gfx_v8_0_sw_fini(void *handle)
1033 for (i = 0; i < adev->gfx.num_compute_rings; i++) 1027 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1034 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1028 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1035 1029
1036 amdgpu_wb_free(adev, adev->gfx.ce_sync_offs);
1037
1038 gfx_v8_0_mec_fini(adev); 1030 gfx_v8_0_mec_fini(adev);
1039 1031
1040 return 0; 1032 return 0;
@@ -3106,7 +3098,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
3106 sizeof(struct vi_mqd), 3098 sizeof(struct vi_mqd),
3107 PAGE_SIZE, true, 3099 PAGE_SIZE, true,
3108 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, 3100 AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
3109 &ring->mqd_obj); 3101 NULL, &ring->mqd_obj);
3110 if (r) { 3102 if (r) {
3111 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); 3103 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
3112 return r; 3104 return r;
@@ -3965,6 +3957,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
3965 DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 3957 DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
3966 amdgpu_ring_write(ring, lower_32_bits(seq)); 3958 amdgpu_ring_write(ring, lower_32_bits(seq));
3967 amdgpu_ring_write(ring, upper_32_bits(seq)); 3959 amdgpu_ring_write(ring, upper_32_bits(seq));
3960
3968} 3961}
3969 3962
3970/** 3963/**
@@ -4005,49 +3998,34 @@ static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring,
4005 return true; 3998 return true;
4006} 3999}
4007 4000
4008static void gfx_v8_0_ce_sync_me(struct amdgpu_ring *ring) 4001static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4002 unsigned vm_id, uint64_t pd_addr)
4009{ 4003{
4010 struct amdgpu_device *adev = ring->adev; 4004 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
4011 u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4; 4005 uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
4012 4006 uint64_t addr = ring->fence_drv.gpu_addr;
4013 /* instruct DE to set a magic number */
4014 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4015 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4016 WRITE_DATA_DST_SEL(5)));
4017 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
4018 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
4019 amdgpu_ring_write(ring, 1);
4020 4007
4021 /* let CE wait till condition satisfied */
4022 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 4008 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
4023 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ 4009 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
4024 WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ 4010 WAIT_REG_MEM_FUNCTION(3))); /* equal */
4025 WAIT_REG_MEM_FUNCTION(3) | /* == */ 4011 amdgpu_ring_write(ring, addr & 0xfffffffc);
4026 WAIT_REG_MEM_ENGINE(2))); /* ce */ 4012 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
4027 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); 4013 amdgpu_ring_write(ring, seq);
4028 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
4029 amdgpu_ring_write(ring, 1);
4030 amdgpu_ring_write(ring, 0xffffffff); 4014 amdgpu_ring_write(ring, 0xffffffff);
4031 amdgpu_ring_write(ring, 4); /* poll interval */ 4015 amdgpu_ring_write(ring, 4); /* poll interval */
4032 4016
4033 /* instruct CE to reset wb of ce_sync to zero */ 4017 if (usepfp) {
4034 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4018 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
4035 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | 4019 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4036 WRITE_DATA_DST_SEL(5) | 4020 amdgpu_ring_write(ring, 0);
4037 WR_CONFIRM)); 4021 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4038 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); 4022 amdgpu_ring_write(ring, 0);
4039 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); 4023 }
4040 amdgpu_ring_write(ring, 0);
4041}
4042
4043static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4044 unsigned vm_id, uint64_t pd_addr)
4045{
4046 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
4047 4024
4048 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4025 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4049 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | 4026 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
4050 WRITE_DATA_DST_SEL(0))); 4027 WRITE_DATA_DST_SEL(0)) |
4028 WR_CONFIRM);
4051 if (vm_id < 8) { 4029 if (vm_id < 8) {
4052 amdgpu_ring_write(ring, 4030 amdgpu_ring_write(ring,
4053 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); 4031 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
@@ -4083,9 +4061,10 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4083 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 4061 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4084 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 4062 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4085 amdgpu_ring_write(ring, 0x0); 4063 amdgpu_ring_write(ring, 0x0);
4086 4064 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4087 /* synce CE with ME to prevent CE fetch CEIB before context switch done */ 4065 amdgpu_ring_write(ring, 0);
4088 gfx_v8_0_ce_sync_me(ring); 4066 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4067 amdgpu_ring_write(ring, 0);
4089 } 4068 }
4090} 4069}
4091 4070
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index c900aa942ade..966d4b2ed9da 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -625,7 +625,7 @@ int iceland_smu_init(struct amdgpu_device *adev)
625 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, 625 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
626 true, AMDGPU_GEM_DOMAIN_VRAM, 626 true, AMDGPU_GEM_DOMAIN_VRAM,
627 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 627 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
628 NULL, toc_buf); 628 NULL, NULL, toc_buf);
629 if (ret) { 629 if (ret) {
630 DRM_ERROR("Failed to allocate memory for TOC buffer\n"); 630 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
631 return -ENOMEM; 631 return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
index 1f5ac941a610..5421309c1862 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
@@ -763,7 +763,7 @@ int tonga_smu_init(struct amdgpu_device *adev)
763 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, 763 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
764 true, AMDGPU_GEM_DOMAIN_VRAM, 764 true, AMDGPU_GEM_DOMAIN_VRAM,
765 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 765 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
766 NULL, toc_buf); 766 NULL, NULL, toc_buf);
767 if (ret) { 767 if (ret) {
768 DRM_ERROR("Failed to allocate memory for TOC buffer\n"); 768 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
769 return -ENOMEM; 769 return -ENOMEM;
@@ -773,7 +773,7 @@ int tonga_smu_init(struct amdgpu_device *adev)
773 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, 773 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
774 true, AMDGPU_GEM_DOMAIN_VRAM, 774 true, AMDGPU_GEM_DOMAIN_VRAM,
775 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 775 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
776 NULL, smu_buf); 776 NULL, NULL, smu_buf);
777 if (ret) { 777 if (ret) {
778 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); 778 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
779 return -ENOMEM; 779 return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 5fac5da694f0..ed50dd725788 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle)
224 int r; 224 int r;
225 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 225 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
226 226
227 r = uvd_v4_2_hw_fini(adev); 227 r = amdgpu_uvd_suspend(adev);
228 if (r) 228 if (r)
229 return r; 229 return r;
230 230
231 r = amdgpu_uvd_suspend(adev); 231 r = uvd_v4_2_hw_fini(adev);
232 if (r) 232 if (r)
233 return r; 233 return r;
234 234
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 2d5c59c318af..9ad8b9906c0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle)
220 int r; 220 int r;
221 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 221 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222 222
223 r = uvd_v5_0_hw_fini(adev); 223 r = amdgpu_uvd_suspend(adev);
224 if (r) 224 if (r)
225 return r; 225 return r;
226 226
227 r = amdgpu_uvd_suspend(adev); 227 r = uvd_v5_0_hw_fini(adev);
228 if (r) 228 if (r)
229 return r; 229 return r;
230 230
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index d9f553fce531..7e9934fa4193 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -214,14 +214,16 @@ static int uvd_v6_0_suspend(void *handle)
214 int r; 214 int r;
215 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 215 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
216 216
217 /* Skip this for APU for now */
218 if (!(adev->flags & AMD_IS_APU)) {
219 r = amdgpu_uvd_suspend(adev);
220 if (r)
221 return r;
222 }
217 r = uvd_v6_0_hw_fini(adev); 223 r = uvd_v6_0_hw_fini(adev);
218 if (r) 224 if (r)
219 return r; 225 return r;
220 226
221 r = amdgpu_uvd_suspend(adev);
222 if (r)
223 return r;
224
225 return r; 227 return r;
226} 228}
227 229
@@ -230,10 +232,12 @@ static int uvd_v6_0_resume(void *handle)
230 int r; 232 int r;
231 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 233 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
232 234
233 r = amdgpu_uvd_resume(adev); 235 /* Skip this for APU for now */
234 if (r) 236 if (!(adev->flags & AMD_IS_APU)) {
235 return r; 237 r = amdgpu_uvd_resume(adev);
236 238 if (r)
239 return r;
240 }
237 r = uvd_v6_0_hw_init(adev); 241 r = uvd_v6_0_hw_init(adev);
238 if (r) 242 if (r)
239 return r; 243 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 552d9e75ad1b..b55ceb14fdcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1400,7 +1400,8 @@ static int vi_common_early_init(void *handle)
1400 case CHIP_CARRIZO: 1400 case CHIP_CARRIZO:
1401 adev->has_uvd = true; 1401 adev->has_uvd = true;
1402 adev->cg_flags = 0; 1402 adev->cg_flags = 0;
1403 adev->pg_flags = AMDGPU_PG_SUPPORT_UVD | AMDGPU_PG_SUPPORT_VCE; 1403 /* Disable UVD pg */
1404 adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
1404 adev->external_rev_id = adev->rev_id + 0x1; 1405 adev->external_rev_id = adev->rev_id + 0x1;
1405 if (amdgpu_smc_load_fw && smc_enabled) 1406 if (amdgpu_smc_load_fw && smc_enabled)
1406 adev->firmware.smu_load = true; 1407 adev->firmware.smu_load = true;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
new file mode 100644
index 000000000000..144f50acc971
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -0,0 +1,41 @@
1#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _GPU_SCHED_TRACE_H_
3
4#include <linux/stringify.h>
5#include <linux/types.h>
6#include <linux/tracepoint.h>
7
8#include <drm/drmP.h>
9
10#undef TRACE_SYSTEM
11#define TRACE_SYSTEM gpu_sched
12#define TRACE_INCLUDE_FILE gpu_sched_trace
13
14TRACE_EVENT(amd_sched_job,
15 TP_PROTO(struct amd_sched_job *sched_job),
16 TP_ARGS(sched_job),
17 TP_STRUCT__entry(
18 __field(struct amd_sched_entity *, entity)
19 __field(const char *, name)
20 __field(u32, job_count)
21 __field(int, hw_job_count)
22 ),
23
24 TP_fast_assign(
25 __entry->entity = sched_job->s_entity;
26 __entry->name = sched_job->sched->name;
27 __entry->job_count = kfifo_len(
28 &sched_job->s_entity->job_queue) / sizeof(sched_job);
29 __entry->hw_job_count = atomic_read(
30 &sched_job->sched->hw_rq_count);
31 ),
32 TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d",
33 __entry->entity, __entry->name, __entry->job_count,
34 __entry->hw_job_count)
35);
36#endif
37
38/* This part must be outside protection */
39#undef TRACE_INCLUDE_PATH
40#define TRACE_INCLUDE_PATH .
41#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 9259f1b6664c..3697eeeecf82 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -27,6 +27,9 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include "gpu_scheduler.h" 28#include "gpu_scheduler.h"
29 29
30#define CREATE_TRACE_POINTS
31#include "gpu_sched_trace.h"
32
30static struct amd_sched_job * 33static struct amd_sched_job *
31amd_sched_entity_pop_job(struct amd_sched_entity *entity); 34amd_sched_entity_pop_job(struct amd_sched_entity *entity);
32static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); 35static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
@@ -65,29 +68,29 @@ static struct amd_sched_job *
65amd_sched_rq_select_job(struct amd_sched_rq *rq) 68amd_sched_rq_select_job(struct amd_sched_rq *rq)
66{ 69{
67 struct amd_sched_entity *entity; 70 struct amd_sched_entity *entity;
68 struct amd_sched_job *job; 71 struct amd_sched_job *sched_job;
69 72
70 spin_lock(&rq->lock); 73 spin_lock(&rq->lock);
71 74
72 entity = rq->current_entity; 75 entity = rq->current_entity;
73 if (entity) { 76 if (entity) {
74 list_for_each_entry_continue(entity, &rq->entities, list) { 77 list_for_each_entry_continue(entity, &rq->entities, list) {
75 job = amd_sched_entity_pop_job(entity); 78 sched_job = amd_sched_entity_pop_job(entity);
76 if (job) { 79 if (sched_job) {
77 rq->current_entity = entity; 80 rq->current_entity = entity;
78 spin_unlock(&rq->lock); 81 spin_unlock(&rq->lock);
79 return job; 82 return sched_job;
80 } 83 }
81 } 84 }
82 } 85 }
83 86
84 list_for_each_entry(entity, &rq->entities, list) { 87 list_for_each_entry(entity, &rq->entities, list) {
85 88
86 job = amd_sched_entity_pop_job(entity); 89 sched_job = amd_sched_entity_pop_job(entity);
87 if (job) { 90 if (sched_job) {
88 rq->current_entity = entity; 91 rq->current_entity = entity;
89 spin_unlock(&rq->lock); 92 spin_unlock(&rq->lock);
90 return job; 93 return sched_job;
91 } 94 }
92 95
93 if (entity == rq->current_entity) 96 if (entity == rq->current_entity)
@@ -115,23 +118,27 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
115 struct amd_sched_rq *rq, 118 struct amd_sched_rq *rq,
116 uint32_t jobs) 119 uint32_t jobs)
117{ 120{
121 int r;
122
118 if (!(sched && entity && rq)) 123 if (!(sched && entity && rq))
119 return -EINVAL; 124 return -EINVAL;
120 125
121 memset(entity, 0, sizeof(struct amd_sched_entity)); 126 memset(entity, 0, sizeof(struct amd_sched_entity));
122 entity->belongto_rq = rq; 127 INIT_LIST_HEAD(&entity->list);
123 entity->scheduler = sched; 128 entity->rq = rq;
124 entity->fence_context = fence_context_alloc(1); 129 entity->sched = sched;
125 if(kfifo_alloc(&entity->job_queue,
126 jobs * sizeof(void *),
127 GFP_KERNEL))
128 return -EINVAL;
129 130
130 spin_lock_init(&entity->queue_lock); 131 spin_lock_init(&entity->queue_lock);
132 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
133 if (r)
134 return r;
135
131 atomic_set(&entity->fence_seq, 0); 136 atomic_set(&entity->fence_seq, 0);
137 entity->fence_context = fence_context_alloc(1);
132 138
133 /* Add the entity to the run queue */ 139 /* Add the entity to the run queue */
134 amd_sched_rq_add_entity(rq, entity); 140 amd_sched_rq_add_entity(rq, entity);
141
135 return 0; 142 return 0;
136} 143}
137 144
@@ -146,8 +153,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
146static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, 153static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
147 struct amd_sched_entity *entity) 154 struct amd_sched_entity *entity)
148{ 155{
149 return entity->scheduler == sched && 156 return entity->sched == sched &&
150 entity->belongto_rq != NULL; 157 entity->rq != NULL;
151} 158}
152 159
153/** 160/**
@@ -177,7 +184,7 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
177void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, 184void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
178 struct amd_sched_entity *entity) 185 struct amd_sched_entity *entity)
179{ 186{
180 struct amd_sched_rq *rq = entity->belongto_rq; 187 struct amd_sched_rq *rq = entity->rq;
181 188
182 if (!amd_sched_entity_is_initialized(sched, entity)) 189 if (!amd_sched_entity_is_initialized(sched, entity))
183 return; 190 return;
@@ -198,22 +205,22 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
198 container_of(cb, struct amd_sched_entity, cb); 205 container_of(cb, struct amd_sched_entity, cb);
199 entity->dependency = NULL; 206 entity->dependency = NULL;
200 fence_put(f); 207 fence_put(f);
201 amd_sched_wakeup(entity->scheduler); 208 amd_sched_wakeup(entity->sched);
202} 209}
203 210
204static struct amd_sched_job * 211static struct amd_sched_job *
205amd_sched_entity_pop_job(struct amd_sched_entity *entity) 212amd_sched_entity_pop_job(struct amd_sched_entity *entity)
206{ 213{
207 struct amd_gpu_scheduler *sched = entity->scheduler; 214 struct amd_gpu_scheduler *sched = entity->sched;
208 struct amd_sched_job *job; 215 struct amd_sched_job *sched_job;
209 216
210 if (ACCESS_ONCE(entity->dependency)) 217 if (ACCESS_ONCE(entity->dependency))
211 return NULL; 218 return NULL;
212 219
213 if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job))) 220 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
214 return NULL; 221 return NULL;
215 222
216 while ((entity->dependency = sched->ops->dependency(job))) { 223 while ((entity->dependency = sched->ops->dependency(sched_job))) {
217 224
218 if (fence_add_callback(entity->dependency, &entity->cb, 225 if (fence_add_callback(entity->dependency, &entity->cb,
219 amd_sched_entity_wakeup)) 226 amd_sched_entity_wakeup))
@@ -222,32 +229,33 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
222 return NULL; 229 return NULL;
223 } 230 }
224 231
225 return job; 232 return sched_job;
226} 233}
227 234
228/** 235/**
229 * Helper to submit a job to the job queue 236 * Helper to submit a job to the job queue
230 * 237 *
231 * @job The pointer to job required to submit 238 * @sched_job The pointer to job required to submit
232 * 239 *
233 * Returns true if we could submit the job. 240 * Returns true if we could submit the job.
234 */ 241 */
235static bool amd_sched_entity_in(struct amd_sched_job *job) 242static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
236{ 243{
237 struct amd_sched_entity *entity = job->s_entity; 244 struct amd_sched_entity *entity = sched_job->s_entity;
238 bool added, first = false; 245 bool added, first = false;
239 246
240 spin_lock(&entity->queue_lock); 247 spin_lock(&entity->queue_lock);
241 added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job); 248 added = kfifo_in(&entity->job_queue, &sched_job,
249 sizeof(sched_job)) == sizeof(sched_job);
242 250
243 if (added && kfifo_len(&entity->job_queue) == sizeof(job)) 251 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
244 first = true; 252 first = true;
245 253
246 spin_unlock(&entity->queue_lock); 254 spin_unlock(&entity->queue_lock);
247 255
248 /* first job wakes up scheduler */ 256 /* first job wakes up scheduler */
249 if (first) 257 if (first)
250 amd_sched_wakeup(job->sched); 258 amd_sched_wakeup(sched_job->sched);
251 259
252 return added; 260 return added;
253} 261}
@@ -255,7 +263,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *job)
255/** 263/**
256 * Submit a job to the job queue 264 * Submit a job to the job queue
257 * 265 *
258 * @job The pointer to job required to submit 266 * @sched_job The pointer to job required to submit
259 * 267 *
260 * Returns 0 for success, negative error code otherwise. 268 * Returns 0 for success, negative error code otherwise.
261 */ 269 */
@@ -271,9 +279,9 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
271 fence_get(&fence->base); 279 fence_get(&fence->base);
272 sched_job->s_fence = fence; 280 sched_job->s_fence = fence;
273 281
274 wait_event(entity->scheduler->job_scheduled, 282 wait_event(entity->sched->job_scheduled,
275 amd_sched_entity_in(sched_job)); 283 amd_sched_entity_in(sched_job));
276 284 trace_amd_sched_job(sched_job);
277 return 0; 285 return 0;
278} 286}
279 287
@@ -301,30 +309,28 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
301static struct amd_sched_job * 309static struct amd_sched_job *
302amd_sched_select_job(struct amd_gpu_scheduler *sched) 310amd_sched_select_job(struct amd_gpu_scheduler *sched)
303{ 311{
304 struct amd_sched_job *job; 312 struct amd_sched_job *sched_job;
305 313
306 if (!amd_sched_ready(sched)) 314 if (!amd_sched_ready(sched))
307 return NULL; 315 return NULL;
308 316
309 /* Kernel run queue has higher priority than normal run queue*/ 317 /* Kernel run queue has higher priority than normal run queue*/
310 job = amd_sched_rq_select_job(&sched->kernel_rq); 318 sched_job = amd_sched_rq_select_job(&sched->kernel_rq);
311 if (job == NULL) 319 if (sched_job == NULL)
312 job = amd_sched_rq_select_job(&sched->sched_rq); 320 sched_job = amd_sched_rq_select_job(&sched->sched_rq);
313 321
314 return job; 322 return sched_job;
315} 323}
316 324
317static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) 325static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
318{ 326{
319 struct amd_sched_job *sched_job = 327 struct amd_sched_fence *s_fence =
320 container_of(cb, struct amd_sched_job, cb); 328 container_of(cb, struct amd_sched_fence, cb);
321 struct amd_gpu_scheduler *sched; 329 struct amd_gpu_scheduler *sched = s_fence->sched;
322 330
323 sched = sched_job->sched;
324 amd_sched_fence_signal(sched_job->s_fence);
325 atomic_dec(&sched->hw_rq_count); 331 atomic_dec(&sched->hw_rq_count);
326 fence_put(&sched_job->s_fence->base); 332 amd_sched_fence_signal(s_fence);
327 sched->ops->process_job(sched_job); 333 fence_put(&s_fence->base);
328 wake_up_interruptible(&sched->wake_up_worker); 334 wake_up_interruptible(&sched->wake_up_worker);
329} 335}
330 336
@@ -338,87 +344,82 @@ static int amd_sched_main(void *param)
338 344
339 while (!kthread_should_stop()) { 345 while (!kthread_should_stop()) {
340 struct amd_sched_entity *entity; 346 struct amd_sched_entity *entity;
341 struct amd_sched_job *job; 347 struct amd_sched_fence *s_fence;
348 struct amd_sched_job *sched_job;
342 struct fence *fence; 349 struct fence *fence;
343 350
344 wait_event_interruptible(sched->wake_up_worker, 351 wait_event_interruptible(sched->wake_up_worker,
345 kthread_should_stop() || 352 kthread_should_stop() ||
346 (job = amd_sched_select_job(sched))); 353 (sched_job = amd_sched_select_job(sched)));
347 354
348 if (!job) 355 if (!sched_job)
349 continue; 356 continue;
350 357
351 entity = job->s_entity; 358 entity = sched_job->s_entity;
359 s_fence = sched_job->s_fence;
352 atomic_inc(&sched->hw_rq_count); 360 atomic_inc(&sched->hw_rq_count);
353 fence = sched->ops->run_job(job); 361 fence = sched->ops->run_job(sched_job);
354 if (fence) { 362 if (fence) {
355 r = fence_add_callback(fence, &job->cb, 363 r = fence_add_callback(fence, &s_fence->cb,
356 amd_sched_process_job); 364 amd_sched_process_job);
357 if (r == -ENOENT) 365 if (r == -ENOENT)
358 amd_sched_process_job(fence, &job->cb); 366 amd_sched_process_job(fence, &s_fence->cb);
359 else if (r) 367 else if (r)
360 DRM_ERROR("fence add callback failed (%d)\n", r); 368 DRM_ERROR("fence add callback failed (%d)\n", r);
361 fence_put(fence); 369 fence_put(fence);
370 } else {
371 DRM_ERROR("Failed to run job!\n");
372 amd_sched_process_job(NULL, &s_fence->cb);
362 } 373 }
363 374
364 count = kfifo_out(&entity->job_queue, &job, sizeof(job)); 375 count = kfifo_out(&entity->job_queue, &sched_job,
365 WARN_ON(count != sizeof(job)); 376 sizeof(sched_job));
377 WARN_ON(count != sizeof(sched_job));
366 wake_up(&sched->job_scheduled); 378 wake_up(&sched->job_scheduled);
367 } 379 }
368 return 0; 380 return 0;
369} 381}
370 382
371/** 383/**
372 * Create a gpu scheduler 384 * Init a gpu scheduler instance
373 * 385 *
386 * @sched The pointer to the scheduler
374 * @ops The backend operations for this scheduler. 387 * @ops The backend operations for this scheduler.
375 * @ring The the ring id for the scheduler.
376 * @hw_submissions Number of hw submissions to do. 388 * @hw_submissions Number of hw submissions to do.
389 * @name Name used for debugging
377 * 390 *
378 * Return the pointer to scheduler for success, otherwise return NULL 391 * Return 0 on success, otherwise error code.
379*/ 392*/
380struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops, 393int amd_sched_init(struct amd_gpu_scheduler *sched,
381 unsigned ring, unsigned hw_submission, 394 struct amd_sched_backend_ops *ops,
382 void *priv) 395 unsigned hw_submission, const char *name)
383{ 396{
384 struct amd_gpu_scheduler *sched;
385
386 sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
387 if (!sched)
388 return NULL;
389
390 sched->ops = ops; 397 sched->ops = ops;
391 sched->ring_id = ring;
392 sched->hw_submission_limit = hw_submission; 398 sched->hw_submission_limit = hw_submission;
393 sched->priv = priv; 399 sched->name = name;
394 snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring);
395 amd_sched_rq_init(&sched->sched_rq); 400 amd_sched_rq_init(&sched->sched_rq);
396 amd_sched_rq_init(&sched->kernel_rq); 401 amd_sched_rq_init(&sched->kernel_rq);
397 402
398 init_waitqueue_head(&sched->wake_up_worker); 403 init_waitqueue_head(&sched->wake_up_worker);
399 init_waitqueue_head(&sched->job_scheduled); 404 init_waitqueue_head(&sched->job_scheduled);
400 atomic_set(&sched->hw_rq_count, 0); 405 atomic_set(&sched->hw_rq_count, 0);
406
401 /* Each scheduler will run on a seperate kernel thread */ 407 /* Each scheduler will run on a seperate kernel thread */
402 sched->thread = kthread_run(amd_sched_main, sched, sched->name); 408 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
403 if (IS_ERR(sched->thread)) { 409 if (IS_ERR(sched->thread)) {
404 DRM_ERROR("Failed to create scheduler for id %d.\n", ring); 410 DRM_ERROR("Failed to create scheduler for %s.\n", name);
405 kfree(sched); 411 return PTR_ERR(sched->thread);
406 return NULL;
407 } 412 }
408 413
409 return sched; 414 return 0;
410} 415}
411 416
412/** 417/**
413 * Destroy a gpu scheduler 418 * Destroy a gpu scheduler
414 * 419 *
415 * @sched The pointer to the scheduler 420 * @sched The pointer to the scheduler
416 *
417 * return 0 if succeed. -1 if failed.
418 */ 421 */
419int amd_sched_destroy(struct amd_gpu_scheduler *sched) 422void amd_sched_fini(struct amd_gpu_scheduler *sched)
420{ 423{
421 kthread_stop(sched->thread); 424 kthread_stop(sched->thread);
422 kfree(sched);
423 return 0;
424} 425}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 2af0e4d4d817..80b64dc22214 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -38,13 +38,15 @@ struct amd_sched_rq;
38*/ 38*/
39struct amd_sched_entity { 39struct amd_sched_entity {
40 struct list_head list; 40 struct list_head list;
41 struct amd_sched_rq *belongto_rq; 41 struct amd_sched_rq *rq;
42 atomic_t fence_seq; 42 struct amd_gpu_scheduler *sched;
43 /* the job_queue maintains the jobs submitted by clients */ 43
44 struct kfifo job_queue;
45 spinlock_t queue_lock; 44 spinlock_t queue_lock;
46 struct amd_gpu_scheduler *scheduler; 45 struct kfifo job_queue;
46
47 atomic_t fence_seq;
47 uint64_t fence_context; 48 uint64_t fence_context;
49
48 struct fence *dependency; 50 struct fence *dependency;
49 struct fence_cb cb; 51 struct fence_cb cb;
50}; 52};
@@ -62,13 +64,13 @@ struct amd_sched_rq {
62 64
63struct amd_sched_fence { 65struct amd_sched_fence {
64 struct fence base; 66 struct fence base;
65 struct amd_gpu_scheduler *scheduler; 67 struct fence_cb cb;
68 struct amd_gpu_scheduler *sched;
66 spinlock_t lock; 69 spinlock_t lock;
67 void *owner; 70 void *owner;
68}; 71};
69 72
70struct amd_sched_job { 73struct amd_sched_job {
71 struct fence_cb cb;
72 struct amd_gpu_scheduler *sched; 74 struct amd_gpu_scheduler *sched;
73 struct amd_sched_entity *s_entity; 75 struct amd_sched_entity *s_entity;
74 struct amd_sched_fence *s_fence; 76 struct amd_sched_fence *s_fence;
@@ -91,32 +93,29 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
91 * these functions should be implemented in driver side 93 * these functions should be implemented in driver side
92*/ 94*/
93struct amd_sched_backend_ops { 95struct amd_sched_backend_ops {
94 struct fence *(*dependency)(struct amd_sched_job *job); 96 struct fence *(*dependency)(struct amd_sched_job *sched_job);
95 struct fence *(*run_job)(struct amd_sched_job *job); 97 struct fence *(*run_job)(struct amd_sched_job *sched_job);
96 void (*process_job)(struct amd_sched_job *job);
97}; 98};
98 99
99/** 100/**
100 * One scheduler is implemented for each hardware ring 101 * One scheduler is implemented for each hardware ring
101*/ 102*/
102struct amd_gpu_scheduler { 103struct amd_gpu_scheduler {
103 struct task_struct *thread; 104 struct amd_sched_backend_ops *ops;
105 uint32_t hw_submission_limit;
106 const char *name;
104 struct amd_sched_rq sched_rq; 107 struct amd_sched_rq sched_rq;
105 struct amd_sched_rq kernel_rq; 108 struct amd_sched_rq kernel_rq;
106 atomic_t hw_rq_count;
107 struct amd_sched_backend_ops *ops;
108 uint32_t ring_id;
109 wait_queue_head_t wake_up_worker; 109 wait_queue_head_t wake_up_worker;
110 wait_queue_head_t job_scheduled; 110 wait_queue_head_t job_scheduled;
111 uint32_t hw_submission_limit; 111 atomic_t hw_rq_count;
112 char name[20]; 112 struct task_struct *thread;
113 void *priv;
114}; 113};
115 114
116struct amd_gpu_scheduler * 115int amd_sched_init(struct amd_gpu_scheduler *sched,
117amd_sched_create(struct amd_sched_backend_ops *ops, 116 struct amd_sched_backend_ops *ops,
118 uint32_t ring, uint32_t hw_submission, void *priv); 117 uint32_t hw_submission, const char *name);
119int amd_sched_destroy(struct amd_gpu_scheduler *sched); 118void amd_sched_fini(struct amd_gpu_scheduler *sched);
120 119
121int amd_sched_entity_init(struct amd_gpu_scheduler *sched, 120int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
122 struct amd_sched_entity *entity, 121 struct amd_sched_entity *entity,
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index e62c37920e11..d802638094f4 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -36,7 +36,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
36 if (fence == NULL) 36 if (fence == NULL)
37 return NULL; 37 return NULL;
38 fence->owner = owner; 38 fence->owner = owner;
39 fence->scheduler = s_entity->scheduler; 39 fence->sched = s_entity->sched;
40 spin_lock_init(&fence->lock); 40 spin_lock_init(&fence->lock);
41 41
42 seq = atomic_inc_return(&s_entity->fence_seq); 42 seq = atomic_inc_return(&s_entity->fence_seq);
@@ -63,7 +63,7 @@ static const char *amd_sched_fence_get_driver_name(struct fence *fence)
63static const char *amd_sched_fence_get_timeline_name(struct fence *f) 63static const char *amd_sched_fence_get_timeline_name(struct fence *f)
64{ 64{
65 struct amd_sched_fence *fence = to_amd_sched_fence(f); 65 struct amd_sched_fence *fence = to_amd_sched_fence(f);
66 return (const char *)fence->scheduler->name; 66 return (const char *)fence->sched->name;
67} 67}
68 68
69static bool amd_sched_fence_enable_signaling(struct fence *f) 69static bool amd_sched_fence_enable_signaling(struct fence *f)
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 9a860ca1e9d7..d93e7378c077 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -520,7 +520,8 @@ EXPORT_SYMBOL(drm_ioctl_permit);
520 520
521/** Ioctl table */ 521/** Ioctl table */
522static const struct drm_ioctl_desc drm_ioctls[] = { 522static const struct drm_ioctl_desc drm_ioctls[] = {
523 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW), 523 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
524 DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
524 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 525 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
525 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 526 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
526 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), 527 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index df0b61a60501..bd1a4156f647 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -77,6 +77,7 @@ config DRM_EXYNOS_VIDI
77config DRM_EXYNOS_G2D 77config DRM_EXYNOS_G2D
78 bool "Exynos DRM G2D" 78 bool "Exynos DRM G2D"
79 depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D 79 depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
80 select FRAME_VECTOR
80 help 81 help
81 Choose this option if you want to use Exynos G2D for DRM. 82 Choose this option if you want to use Exynos G2D for DRM.
82 83
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 535b4ad6c4b1..3734c34aed16 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -194,10 +194,8 @@ struct g2d_cmdlist_userptr {
194 dma_addr_t dma_addr; 194 dma_addr_t dma_addr;
195 unsigned long userptr; 195 unsigned long userptr;
196 unsigned long size; 196 unsigned long size;
197 struct page **pages; 197 struct frame_vector *vec;
198 unsigned int npages;
199 struct sg_table *sgt; 198 struct sg_table *sgt;
200 struct vm_area_struct *vma;
201 atomic_t refcount; 199 atomic_t refcount;
202 bool in_pool; 200 bool in_pool;
203 bool out_of_list; 201 bool out_of_list;
@@ -367,6 +365,7 @@ static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
367{ 365{
368 struct g2d_cmdlist_userptr *g2d_userptr = 366 struct g2d_cmdlist_userptr *g2d_userptr =
369 (struct g2d_cmdlist_userptr *)obj; 367 (struct g2d_cmdlist_userptr *)obj;
368 struct page **pages;
370 369
371 if (!obj) 370 if (!obj)
372 return; 371 return;
@@ -386,19 +385,21 @@ out:
386 exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt, 385 exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
387 DMA_BIDIRECTIONAL); 386 DMA_BIDIRECTIONAL);
388 387
389 exynos_gem_put_pages_to_userptr(g2d_userptr->pages, 388 pages = frame_vector_pages(g2d_userptr->vec);
390 g2d_userptr->npages, 389 if (!IS_ERR(pages)) {
391 g2d_userptr->vma); 390 int i;
392 391
393 exynos_gem_put_vma(g2d_userptr->vma); 392 for (i = 0; i < frame_vector_count(g2d_userptr->vec); i++)
393 set_page_dirty_lock(pages[i]);
394 }
395 put_vaddr_frames(g2d_userptr->vec);
396 frame_vector_destroy(g2d_userptr->vec);
394 397
395 if (!g2d_userptr->out_of_list) 398 if (!g2d_userptr->out_of_list)
396 list_del_init(&g2d_userptr->list); 399 list_del_init(&g2d_userptr->list);
397 400
398 sg_free_table(g2d_userptr->sgt); 401 sg_free_table(g2d_userptr->sgt);
399 kfree(g2d_userptr->sgt); 402 kfree(g2d_userptr->sgt);
400
401 drm_free_large(g2d_userptr->pages);
402 kfree(g2d_userptr); 403 kfree(g2d_userptr);
403} 404}
404 405
@@ -412,9 +413,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
412 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; 413 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
413 struct g2d_cmdlist_userptr *g2d_userptr; 414 struct g2d_cmdlist_userptr *g2d_userptr;
414 struct g2d_data *g2d; 415 struct g2d_data *g2d;
415 struct page **pages;
416 struct sg_table *sgt; 416 struct sg_table *sgt;
417 struct vm_area_struct *vma;
418 unsigned long start, end; 417 unsigned long start, end;
419 unsigned int npages, offset; 418 unsigned int npages, offset;
420 int ret; 419 int ret;
@@ -460,65 +459,40 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
460 return ERR_PTR(-ENOMEM); 459 return ERR_PTR(-ENOMEM);
461 460
462 atomic_set(&g2d_userptr->refcount, 1); 461 atomic_set(&g2d_userptr->refcount, 1);
462 g2d_userptr->size = size;
463 463
464 start = userptr & PAGE_MASK; 464 start = userptr & PAGE_MASK;
465 offset = userptr & ~PAGE_MASK; 465 offset = userptr & ~PAGE_MASK;
466 end = PAGE_ALIGN(userptr + size); 466 end = PAGE_ALIGN(userptr + size);
467 npages = (end - start) >> PAGE_SHIFT; 467 npages = (end - start) >> PAGE_SHIFT;
468 g2d_userptr->npages = npages; 468 g2d_userptr->vec = frame_vector_create(npages);
469 469 if (!g2d_userptr->vec) {
470 pages = drm_calloc_large(npages, sizeof(struct page *));
471 if (!pages) {
472 DRM_ERROR("failed to allocate pages.\n");
473 ret = -ENOMEM; 470 ret = -ENOMEM;
474 goto err_free; 471 goto err_free;
475 } 472 }
476 473
477 down_read(&current->mm->mmap_sem); 474 ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec);
478 vma = find_vma(current->mm, userptr); 475 if (ret != npages) {
479 if (!vma) { 476 DRM_ERROR("failed to get user pages from userptr.\n");
480 up_read(&current->mm->mmap_sem); 477 if (ret < 0)
481 DRM_ERROR("failed to get vm region.\n"); 478 goto err_destroy_framevec;
482 ret = -EFAULT; 479 ret = -EFAULT;
483 goto err_free_pages; 480 goto err_put_framevec;
484 } 481 }
485 482 if (frame_vector_to_pages(g2d_userptr->vec) < 0) {
486 if (vma->vm_end < userptr + size) {
487 up_read(&current->mm->mmap_sem);
488 DRM_ERROR("vma is too small.\n");
489 ret = -EFAULT; 483 ret = -EFAULT;
490 goto err_free_pages; 484 goto err_put_framevec;
491 }
492
493 g2d_userptr->vma = exynos_gem_get_vma(vma);
494 if (!g2d_userptr->vma) {
495 up_read(&current->mm->mmap_sem);
496 DRM_ERROR("failed to copy vma.\n");
497 ret = -ENOMEM;
498 goto err_free_pages;
499 }
500
501 g2d_userptr->size = size;
502
503 ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
504 npages, pages, vma);
505 if (ret < 0) {
506 up_read(&current->mm->mmap_sem);
507 DRM_ERROR("failed to get user pages from userptr.\n");
508 goto err_put_vma;
509 } 485 }
510 486
511 up_read(&current->mm->mmap_sem);
512 g2d_userptr->pages = pages;
513
514 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 487 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
515 if (!sgt) { 488 if (!sgt) {
516 ret = -ENOMEM; 489 ret = -ENOMEM;
517 goto err_free_userptr; 490 goto err_put_framevec;
518 } 491 }
519 492
520 ret = sg_alloc_table_from_pages(sgt, pages, npages, offset, 493 ret = sg_alloc_table_from_pages(sgt,
521 size, GFP_KERNEL); 494 frame_vector_pages(g2d_userptr->vec),
495 npages, offset, size, GFP_KERNEL);
522 if (ret < 0) { 496 if (ret < 0) {
523 DRM_ERROR("failed to get sgt from pages.\n"); 497 DRM_ERROR("failed to get sgt from pages.\n");
524 goto err_free_sgt; 498 goto err_free_sgt;
@@ -553,16 +527,11 @@ err_sg_free_table:
553err_free_sgt: 527err_free_sgt:
554 kfree(sgt); 528 kfree(sgt);
555 529
556err_free_userptr: 530err_put_framevec:
557 exynos_gem_put_pages_to_userptr(g2d_userptr->pages, 531 put_vaddr_frames(g2d_userptr->vec);
558 g2d_userptr->npages,
559 g2d_userptr->vma);
560
561err_put_vma:
562 exynos_gem_put_vma(g2d_userptr->vma);
563 532
564err_free_pages: 533err_destroy_framevec:
565 drm_free_large(pages); 534 frame_vector_destroy(g2d_userptr->vec);
566 535
567err_free: 536err_free:
568 kfree(g2d_userptr); 537 kfree(g2d_userptr);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 62b9ea1b07fb..f12fbc36b120 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -366,103 +366,6 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
366 return 0; 366 return 0;
367} 367}
368 368
369struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
370{
371 struct vm_area_struct *vma_copy;
372
373 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
374 if (!vma_copy)
375 return NULL;
376
377 if (vma->vm_ops && vma->vm_ops->open)
378 vma->vm_ops->open(vma);
379
380 if (vma->vm_file)
381 get_file(vma->vm_file);
382
383 memcpy(vma_copy, vma, sizeof(*vma));
384
385 vma_copy->vm_mm = NULL;
386 vma_copy->vm_next = NULL;
387 vma_copy->vm_prev = NULL;
388
389 return vma_copy;
390}
391
392void exynos_gem_put_vma(struct vm_area_struct *vma)
393{
394 if (!vma)
395 return;
396
397 if (vma->vm_ops && vma->vm_ops->close)
398 vma->vm_ops->close(vma);
399
400 if (vma->vm_file)
401 fput(vma->vm_file);
402
403 kfree(vma);
404}
405
406int exynos_gem_get_pages_from_userptr(unsigned long start,
407 unsigned int npages,
408 struct page **pages,
409 struct vm_area_struct *vma)
410{
411 int get_npages;
412
413 /* the memory region mmaped with VM_PFNMAP. */
414 if (vma_is_io(vma)) {
415 unsigned int i;
416
417 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
418 unsigned long pfn;
419 int ret = follow_pfn(vma, start, &pfn);
420 if (ret)
421 return ret;
422
423 pages[i] = pfn_to_page(pfn);
424 }
425
426 if (i != npages) {
427 DRM_ERROR("failed to get user_pages.\n");
428 return -EINVAL;
429 }
430
431 return 0;
432 }
433
434 get_npages = get_user_pages(current, current->mm, start,
435 npages, 1, 1, pages, NULL);
436 get_npages = max(get_npages, 0);
437 if (get_npages != npages) {
438 DRM_ERROR("failed to get user_pages.\n");
439 while (get_npages)
440 put_page(pages[--get_npages]);
441 return -EFAULT;
442 }
443
444 return 0;
445}
446
447void exynos_gem_put_pages_to_userptr(struct page **pages,
448 unsigned int npages,
449 struct vm_area_struct *vma)
450{
451 if (!vma_is_io(vma)) {
452 unsigned int i;
453
454 for (i = 0; i < npages; i++) {
455 set_page_dirty_lock(pages[i]);
456
457 /*
458 * undo the reference we took when populating
459 * the table.
460 */
461 put_page(pages[i]);
462 }
463 }
464}
465
466int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, 369int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
467 struct sg_table *sgt, 370 struct sg_table *sgt,
468 enum dma_data_direction dir) 371 enum dma_data_direction dir)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 82be6b86a168..d1e300dcd544 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -58,7 +58,8 @@ static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane,
58 struct drm_plane_state *old_state) 58 struct drm_plane_state *old_state)
59{ 59{
60 struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private; 60 struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
61 unsigned int index, value, ret; 61 unsigned int value;
62 int index, ret;
62 63
63 index = fsl_dcu_drm_plane_index(plane); 64 index = fsl_dcu_drm_plane_index(plane);
64 if (index < 0) 65 if (index < 0)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5a244ab9395b..39d73dbc1c47 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -640,6 +640,32 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
640 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 640 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
641 641
642 /* 642 /*
643 * On HSW, the DSL reg (0x70000) appears to return 0 if we
644 * read it just before the start of vblank. So try it again
645 * so we don't accidentally end up spanning a vblank frame
646 * increment, causing the pipe_update_end() code to squak at us.
647 *
648 * The nature of this problem means we can't simply check the ISR
649 * bit and return the vblank start value; nor can we use the scanline
650 * debug register in the transcoder as it appears to have the same
651 * problem. We may need to extend this to include other platforms,
652 * but so far testing only shows the problem on HSW.
653 */
654 if (IS_HASWELL(dev) && !position) {
655 int i, temp;
656
657 for (i = 0; i < 100; i++) {
658 udelay(1);
659 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
660 DSL_LINEMASK_GEN3;
661 if (temp != position) {
662 position = temp;
663 break;
664 }
665 }
666 }
667
668 /*
643 * See update_scanline_offset() for the details on the 669 * See update_scanline_offset() for the details on the
644 * scanline_offset adjustment. 670 * scanline_offset adjustment.
645 */ 671 */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 89c1a8ce1f98..2a5c76faf9f8 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -430,7 +430,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
430 430
431/** 431/**
432 * intel_audio_codec_disable - Disable the audio codec for HD audio 432 * intel_audio_codec_disable - Disable the audio codec for HD audio
433 * @encoder: encoder on which to disable audio 433 * @intel_encoder: encoder on which to disable audio
434 * 434 *
435 * The disable sequences must be performed before disabling the transcoder or 435 * The disable sequences must be performed before disabling the transcoder or
436 * port. 436 * port.
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b3e437b3bb54..c19e669ffe50 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -42,7 +42,7 @@ find_section(const void *_bdb, int section_id)
42 const struct bdb_header *bdb = _bdb; 42 const struct bdb_header *bdb = _bdb;
43 const u8 *base = _bdb; 43 const u8 *base = _bdb;
44 int index = 0; 44 int index = 0;
45 u16 total, current_size; 45 u32 total, current_size;
46 u8 current_id; 46 u8 current_id;
47 47
48 /* skip to first section */ 48 /* skip to first section */
@@ -57,6 +57,10 @@ find_section(const void *_bdb, int section_id)
57 current_size = *((const u16 *)(base + index)); 57 current_size = *((const u16 *)(base + index));
58 index += 2; 58 index += 2;
59 59
60 /* The MIPI Sequence Block v3+ has a separate size field. */
61 if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
62 current_size = *((const u32 *)(base + index + 1));
63
60 if (index + current_size > total) 64 if (index + current_size > total)
61 return NULL; 65 return NULL;
62 66
@@ -799,6 +803,12 @@ parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
799 return; 803 return;
800 } 804 }
801 805
806 /* Fail gracefully for forward incompatible sequence block. */
807 if (sequence->version >= 3) {
808 DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
809 return;
810 }
811
802 DRM_DEBUG_DRIVER("Found MIPI sequence block\n"); 812 DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
803 813
804 block_size = get_blocksize(sequence); 814 block_size = get_blocksize(sequence);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8cc9264f7809..cf418be7d30a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15087,9 +15087,12 @@ static void readout_plane_state(struct intel_crtc *crtc,
15087 15087
15088 plane_state = to_intel_plane_state(p->base.state); 15088 plane_state = to_intel_plane_state(p->base.state);
15089 15089
15090 if (p->base.type == DRM_PLANE_TYPE_PRIMARY) 15090 if (p->base.type == DRM_PLANE_TYPE_PRIMARY) {
15091 plane_state->visible = primary_get_hw_state(crtc); 15091 plane_state->visible = primary_get_hw_state(crtc);
15092 else { 15092 if (plane_state->visible)
15093 crtc->base.state->plane_mask |=
15094 1 << drm_plane_index(&p->base);
15095 } else {
15093 if (active) 15096 if (active)
15094 p->disable_plane(&p->base, &crtc->base); 15097 p->disable_plane(&p->base, &crtc->base);
15095 15098
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 87de15ea1f93..b35b5b2db4ec 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -186,17 +186,19 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
186 186
187 sysram = vmalloc(size); 187 sysram = vmalloc(size);
188 if (!sysram) 188 if (!sysram)
189 return -ENOMEM; 189 goto err_sysram;
190 190
191 info = drm_fb_helper_alloc_fbi(helper); 191 info = drm_fb_helper_alloc_fbi(helper);
192 if (IS_ERR(info)) 192 if (IS_ERR(info)) {
193 return PTR_ERR(info); 193 ret = PTR_ERR(info);
194 goto err_alloc_fbi;
195 }
194 196
195 info->par = mfbdev; 197 info->par = mfbdev;
196 198
197 ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj); 199 ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
198 if (ret) 200 if (ret)
199 return ret; 201 goto err_framebuffer_init;
200 202
201 mfbdev->sysram = sysram; 203 mfbdev->sysram = sysram;
202 mfbdev->size = size; 204 mfbdev->size = size;
@@ -225,7 +227,17 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
225 227
226 DRM_DEBUG_KMS("allocated %dx%d\n", 228 DRM_DEBUG_KMS("allocated %dx%d\n",
227 fb->width, fb->height); 229 fb->width, fb->height);
230
228 return 0; 231 return 0;
232
233err_framebuffer_init:
234 drm_fb_helper_release_fbi(helper);
235err_alloc_fbi:
236 vfree(sysram);
237err_sysram:
238 drm_gem_object_unreference_unlocked(gobj);
239
240 return ret;
229} 241}
230 242
231static int mga_fbdev_destroy(struct drm_device *dev, 243static int mga_fbdev_destroy(struct drm_device *dev,
@@ -276,23 +288,26 @@ int mgag200_fbdev_init(struct mga_device *mdev)
276 ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper, 288 ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
277 mdev->num_crtc, MGAG200FB_CONN_LIMIT); 289 mdev->num_crtc, MGAG200FB_CONN_LIMIT);
278 if (ret) 290 if (ret)
279 return ret; 291 goto err_fb_helper;
280 292
281 ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper); 293 ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
282 if (ret) 294 if (ret)
283 goto fini; 295 goto err_fb_setup;
284 296
285 /* disable all the possible outputs/crtcs before entering KMS mode */ 297 /* disable all the possible outputs/crtcs before entering KMS mode */
286 drm_helper_disable_unused_functions(mdev->dev); 298 drm_helper_disable_unused_functions(mdev->dev);
287 299
288 ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel); 300 ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
289 if (ret) 301 if (ret)
290 goto fini; 302 goto err_fb_setup;
291 303
292 return 0; 304 return 0;
293 305
294fini: 306err_fb_setup:
295 drm_fb_helper_fini(&mfbdev->helper); 307 drm_fb_helper_fini(&mfbdev->helper);
308err_fb_helper:
309 mdev->mfbdev = NULL;
310
296 return ret; 311 return ret;
297} 312}
298 313
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index de06388069e7..b1a0f5656175 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -220,7 +220,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
220 } 220 }
221 r = mgag200_mm_init(mdev); 221 r = mgag200_mm_init(mdev);
222 if (r) 222 if (r)
223 goto out; 223 goto err_mm;
224 224
225 drm_mode_config_init(dev); 225 drm_mode_config_init(dev);
226 dev->mode_config.funcs = (void *)&mga_mode_funcs; 226 dev->mode_config.funcs = (void *)&mga_mode_funcs;
@@ -233,7 +233,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
233 r = mgag200_modeset_init(mdev); 233 r = mgag200_modeset_init(mdev);
234 if (r) { 234 if (r) {
235 dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r); 235 dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
236 goto out; 236 goto err_modeset;
237 } 237 }
238 238
239 /* Make small buffers to store a hardware cursor (double buffered icon updates) */ 239 /* Make small buffers to store a hardware cursor (double buffered icon updates) */
@@ -241,20 +241,24 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
241 &mdev->cursor.pixels_1); 241 &mdev->cursor.pixels_1);
242 mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0, 242 mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0,
243 &mdev->cursor.pixels_2); 243 &mdev->cursor.pixels_2);
244 if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1) 244 if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1) {
245 goto cursor_nospace; 245 mdev->cursor.pixels_1 = NULL;
246 mdev->cursor.pixels_current = mdev->cursor.pixels_1; 246 mdev->cursor.pixels_2 = NULL;
247 mdev->cursor.pixels_prev = mdev->cursor.pixels_2; 247 dev_warn(&dev->pdev->dev,
248 goto cursor_done; 248 "Could not allocate space for cursors. Not doing hardware cursors.\n");
249 cursor_nospace: 249 } else {
250 mdev->cursor.pixels_1 = NULL; 250 mdev->cursor.pixels_current = mdev->cursor.pixels_1;
251 mdev->cursor.pixels_2 = NULL; 251 mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
252 dev_warn(&dev->pdev->dev, "Could not allocate space for cursors. Not doing hardware cursors.\n"); 252 }
253 cursor_done: 253
254 254 return 0;
255out: 255
256 if (r) 256err_modeset:
257 mgag200_driver_unload(dev); 257 drm_mode_config_cleanup(dev);
258 mgag200_mm_fini(mdev);
259err_mm:
260 dev->dev_private = NULL;
261
258 return r; 262 return r;
259} 263}
260 264
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index b1f73bee1368..b0d4b53b97f4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -178,7 +178,6 @@ static int mdp5_hw_irqdomain_map(struct irq_domain *d,
178 178
179 irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq); 179 irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
180 irq_set_chip_data(irq, mdp5_kms); 180 irq_set_chip_data(irq, mdp5_kms);
181 set_irq_flags(irq, IRQF_VALID);
182 181
183 return 0; 182 return 0;
184} 183}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 7c6225c84ba6..dd845f82cc24 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -886,13 +886,15 @@ static enum drm_connector_status qxl_conn_detect(
886 drm_connector_to_qxl_output(connector); 886 drm_connector_to_qxl_output(connector);
887 struct drm_device *ddev = connector->dev; 887 struct drm_device *ddev = connector->dev;
888 struct qxl_device *qdev = ddev->dev_private; 888 struct qxl_device *qdev = ddev->dev_private;
889 int connected; 889 bool connected = false;
890 890
891 /* The first monitor is always connected */ 891 /* The first monitor is always connected */
892 connected = (output->index == 0) || 892 if (!qdev->client_monitors_config) {
893 (qdev->client_monitors_config && 893 if (output->index == 0)
894 qdev->client_monitors_config->count > output->index && 894 connected = true;
895 qxl_head_enabled(&qdev->client_monitors_config->heads[output->index])); 895 } else
896 connected = qdev->client_monitors_config->count > output->index &&
897 qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
896 898
897 DRM_DEBUG("#%d connected: %d\n", output->index, connected); 899 DRM_DEBUG("#%d connected: %d\n", output->index, connected);
898 if (!connected) 900 if (!connected)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index d8319dae8358..f3f562f6d848 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1573,10 +1573,12 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1573 1573
1574 drm_kms_helper_poll_disable(dev); 1574 drm_kms_helper_poll_disable(dev);
1575 1575
1576 drm_modeset_lock_all(dev);
1576 /* turn off display hw */ 1577 /* turn off display hw */
1577 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1578 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1578 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 1579 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1579 } 1580 }
1581 drm_modeset_unlock_all(dev);
1580 1582
1581 /* unpin the front buffers and cursors */ 1583 /* unpin the front buffers and cursors */
1582 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1584 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1734,9 +1736,11 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1734 if (fbcon) { 1736 if (fbcon) {
1735 drm_helper_resume_force_mode(dev); 1737 drm_helper_resume_force_mode(dev);
1736 /* turn on display hw */ 1738 /* turn on display hw */
1739 drm_modeset_lock_all(dev);
1737 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1740 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1738 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 1741 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1739 } 1742 }
1743 drm_modeset_unlock_all(dev);
1740 } 1744 }
1741 1745
1742 drm_kms_helper_poll_enable(dev); 1746 drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 787cd8fd897f..e9115d3f67b0 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2927,6 +2927,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, 2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, 2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
2929 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, 2929 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
2930 { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
2930 { 0, 0, 0, 0 }, 2931 { 0, 0, 0, 0 },
2931}; 2932};
2932 2933
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8d9b7de25613..745e996d2dbc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -882,6 +882,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
882 if (ret) 882 if (ret)
883 return ret; 883 return ret;
884 man = &bdev->man[mem_type]; 884 man = &bdev->man[mem_type];
885 if (!man->has_type || !man->use_type)
886 continue;
885 887
886 type_ok = ttm_bo_mt_compatible(man, mem_type, place, 888 type_ok = ttm_bo_mt_compatible(man, mem_type, place,
887 &cur_flags); 889 &cur_flags);
@@ -889,6 +891,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
889 if (!type_ok) 891 if (!type_ok)
890 continue; 892 continue;
891 893
894 type_found = true;
892 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 895 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
893 cur_flags); 896 cur_flags);
894 /* 897 /*
@@ -901,12 +904,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
901 if (mem_type == TTM_PL_SYSTEM) 904 if (mem_type == TTM_PL_SYSTEM)
902 break; 905 break;
903 906
904 if (man->has_type && man->use_type) { 907 ret = (*man->func->get_node)(man, bo, place, mem);
905 type_found = true; 908 if (unlikely(ret))
906 ret = (*man->func->get_node)(man, bo, place, mem); 909 return ret;
907 if (unlikely(ret)) 910
908 return ret;
909 }
910 if (mem->mm_node) 911 if (mem->mm_node)
911 break; 912 break;
912 } 913 }
@@ -917,9 +918,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
917 return 0; 918 return 0;
918 } 919 }
919 920
920 if (!type_found)
921 return -EINVAL;
922
923 for (i = 0; i < placement->num_busy_placement; ++i) { 921 for (i = 0; i < placement->num_busy_placement; ++i) {
924 const struct ttm_place *place = &placement->busy_placement[i]; 922 const struct ttm_place *place = &placement->busy_placement[i];
925 923
@@ -927,11 +925,12 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
927 if (ret) 925 if (ret)
928 return ret; 926 return ret;
929 man = &bdev->man[mem_type]; 927 man = &bdev->man[mem_type];
930 if (!man->has_type) 928 if (!man->has_type || !man->use_type)
931 continue; 929 continue;
932 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) 930 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
933 continue; 931 continue;
934 932
933 type_found = true;
935 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 934 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
936 cur_flags); 935 cur_flags);
937 /* 936 /*
@@ -957,8 +956,13 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
957 if (ret == -ERESTARTSYS) 956 if (ret == -ERESTARTSYS)
958 has_erestartsys = true; 957 has_erestartsys = true;
959 } 958 }
960 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; 959
961 return ret; 960 if (!type_found) {
961 printk(KERN_ERR TTM_PFX "No compatible memory type found.\n");
962 return -EINVAL;
963 }
964
965 return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
962} 966}
963EXPORT_SYMBOL(ttm_bo_mem_space); 967EXPORT_SYMBOL(ttm_bo_mem_space);
964 968
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 67720f70fe29..b49445df8a7e 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,6 +1,6 @@
1config DRM_VMWGFX 1config DRM_VMWGFX
2 tristate "DRM driver for VMware Virtual GPU" 2 tristate "DRM driver for VMware Virtual GPU"
3 depends on DRM && PCI 3 depends on DRM && PCI && X86
4 select FB_DEFERRED_IO 4 select FB_DEFERRED_IO
5 select FB_CFB_FILLRECT 5 select FB_CFB_FILLRECT
6 select FB_CFB_COPYAREA 6 select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index ce659a125f2b..092ea81eeff7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -311,7 +311,6 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
311 struct vmw_private *dev_priv = res->dev_priv; 311 struct vmw_private *dev_priv = res->dev_priv;
312 struct ttm_buffer_object *bo = val_buf->bo; 312 struct ttm_buffer_object *bo = val_buf->bo;
313 struct vmw_fence_obj *fence; 313 struct vmw_fence_obj *fence;
314 int ret;
315 314
316 if (list_empty(&res->mob_head)) 315 if (list_empty(&res->mob_head))
317 return 0; 316 return 0;
@@ -328,7 +327,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
328 if (likely(fence != NULL)) 327 if (likely(fence != NULL))
329 vmw_fence_obj_unreference(&fence); 328 vmw_fence_obj_unreference(&fence);
330 329
331 return ret; 330 return 0;
332} 331}
333 332
334/** 333/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e13b20bd9908..2c7a25c71af2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -752,12 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
752 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 752 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
753 dev_priv->active_master = &dev_priv->fbdev_master; 753 dev_priv->active_master = &dev_priv->fbdev_master;
754 754
755 755 dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start,
756 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, 756 dev_priv->mmio_size);
757 dev_priv->mmio_size);
758
759 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
760 dev_priv->mmio_size);
761 757
762 if (unlikely(dev_priv->mmio_virt == NULL)) { 758 if (unlikely(dev_priv->mmio_virt == NULL)) {
763 ret = -ENOMEM; 759 ret = -ENOMEM;
@@ -913,7 +909,6 @@ out_no_device:
913out_err4: 909out_err4:
914 iounmap(dev_priv->mmio_virt); 910 iounmap(dev_priv->mmio_virt);
915out_err3: 911out_err3:
916 arch_phys_wc_del(dev_priv->mmio_mtrr);
917 vmw_ttm_global_release(dev_priv); 912 vmw_ttm_global_release(dev_priv);
918out_err0: 913out_err0:
919 for (i = vmw_res_context; i < vmw_res_max; ++i) 914 for (i = vmw_res_context; i < vmw_res_max; ++i)
@@ -964,7 +959,6 @@ static int vmw_driver_unload(struct drm_device *dev)
964 959
965 ttm_object_device_release(&dev_priv->tdev); 960 ttm_object_device_release(&dev_priv->tdev);
966 iounmap(dev_priv->mmio_virt); 961 iounmap(dev_priv->mmio_virt);
967 arch_phys_wc_del(dev_priv->mmio_mtrr);
968 if (dev_priv->ctx.staged_bindings) 962 if (dev_priv->ctx.staged_bindings)
969 vmw_binding_state_free(dev_priv->ctx.staged_bindings); 963 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
970 vmw_ttm_global_release(dev_priv); 964 vmw_ttm_global_release(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 6d02de6dc36c..f19fd39b43e1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -376,7 +376,6 @@ struct vmw_private {
376 uint32_t initial_width; 376 uint32_t initial_width;
377 uint32_t initial_height; 377 uint32_t initial_height;
378 u32 __iomem *mmio_virt; 378 u32 __iomem *mmio_virt;
379 int mmio_mtrr;
380 uint32_t capabilities; 379 uint32_t capabilities;
381 uint32_t max_gmr_ids; 380 uint32_t max_gmr_ids;
382 uint32_t max_gmr_pages; 381 uint32_t max_gmr_pages;
@@ -631,7 +630,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
631 uint32_t size, 630 uint32_t size,
632 bool shareable, 631 bool shareable,
633 uint32_t *handle, 632 uint32_t *handle,
634 struct vmw_dma_buffer **p_dma_buf); 633 struct vmw_dma_buffer **p_dma_buf,
634 struct ttm_base_object **p_base);
635extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, 635extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
636 struct vmw_dma_buffer *dma_buf, 636 struct vmw_dma_buffer *dma_buf,
637 uint32_t *handle); 637 uint32_t *handle);
@@ -645,7 +645,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
645 uint32_t cur_validate_node); 645 uint32_t cur_validate_node);
646extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); 646extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
647extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, 647extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
648 uint32_t id, struct vmw_dma_buffer **out); 648 uint32_t id, struct vmw_dma_buffer **out,
649 struct ttm_base_object **base);
649extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 650extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
650 struct drm_file *file_priv); 651 struct drm_file *file_priv);
651extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, 652extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index b56565457c96..5da5de0cb522 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1236,7 +1236,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1236 struct vmw_relocation *reloc; 1236 struct vmw_relocation *reloc;
1237 int ret; 1237 int ret;
1238 1238
1239 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); 1239 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1240 NULL);
1240 if (unlikely(ret != 0)) { 1241 if (unlikely(ret != 0)) {
1241 DRM_ERROR("Could not find or use MOB buffer.\n"); 1242 DRM_ERROR("Could not find or use MOB buffer.\n");
1242 ret = -EINVAL; 1243 ret = -EINVAL;
@@ -1296,7 +1297,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1296 struct vmw_relocation *reloc; 1297 struct vmw_relocation *reloc;
1297 int ret; 1298 int ret;
1298 1299
1299 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); 1300 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1301 NULL);
1300 if (unlikely(ret != 0)) { 1302 if (unlikely(ret != 0)) {
1301 DRM_ERROR("Could not find or use GMR region.\n"); 1303 DRM_ERROR("Could not find or use GMR region.\n");
1302 ret = -EINVAL; 1304 ret = -EINVAL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 61fb7f3de311..15a6c01cd016 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1685,7 +1685,6 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
1685 struct drm_crtc *crtc; 1685 struct drm_crtc *crtc;
1686 u32 num_units = 0; 1686 u32 num_units = 0;
1687 u32 i, k; 1687 u32 i, k;
1688 int ret;
1689 1688
1690 dirty->dev_priv = dev_priv; 1689 dirty->dev_priv = dev_priv;
1691 1690
@@ -1711,7 +1710,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
1711 if (!dirty->cmd) { 1710 if (!dirty->cmd) {
1712 DRM_ERROR("Couldn't reserve fifo space " 1711 DRM_ERROR("Couldn't reserve fifo space "
1713 "for dirty blits.\n"); 1712 "for dirty blits.\n");
1714 return ret; 1713 return -ENOMEM;
1715 } 1714 }
1716 memset(dirty->cmd, 0, dirty->fifo_reserve_size); 1715 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
1717 } 1716 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 76069f093ccf..222c9c2123a1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
484 goto out_unlock; 484 goto out_unlock;
485 } 485 }
486 486
487 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf); 487 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
488 if (ret) 488 if (ret)
489 goto out_unlock; 489 goto out_unlock;
490 490
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c1912f852b42..e57667ca7557 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -354,7 +354,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
354 } 354 }
355 355
356 *out_surf = NULL; 356 *out_surf = NULL;
357 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); 357 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
358 return ret; 358 return ret;
359} 359}
360 360
@@ -481,7 +481,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
481 uint32_t size, 481 uint32_t size,
482 bool shareable, 482 bool shareable,
483 uint32_t *handle, 483 uint32_t *handle,
484 struct vmw_dma_buffer **p_dma_buf) 484 struct vmw_dma_buffer **p_dma_buf,
485 struct ttm_base_object **p_base)
485{ 486{
486 struct vmw_user_dma_buffer *user_bo; 487 struct vmw_user_dma_buffer *user_bo;
487 struct ttm_buffer_object *tmp; 488 struct ttm_buffer_object *tmp;
@@ -515,6 +516,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
515 } 516 }
516 517
517 *p_dma_buf = &user_bo->dma; 518 *p_dma_buf = &user_bo->dma;
519 if (p_base) {
520 *p_base = &user_bo->prime.base;
521 kref_get(&(*p_base)->refcount);
522 }
518 *handle = user_bo->prime.base.hash.key; 523 *handle = user_bo->prime.base.hash.key;
519 524
520out_no_base_object: 525out_no_base_object:
@@ -631,6 +636,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
631 struct vmw_dma_buffer *dma_buf; 636 struct vmw_dma_buffer *dma_buf;
632 struct vmw_user_dma_buffer *user_bo; 637 struct vmw_user_dma_buffer *user_bo;
633 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 638 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
639 struct ttm_base_object *buffer_base;
634 int ret; 640 int ret;
635 641
636 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 642 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
@@ -643,7 +649,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
643 649
644 switch (arg->op) { 650 switch (arg->op) {
645 case drm_vmw_synccpu_grab: 651 case drm_vmw_synccpu_grab:
646 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf); 652 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
653 &buffer_base);
647 if (unlikely(ret != 0)) 654 if (unlikely(ret != 0))
648 return ret; 655 return ret;
649 656
@@ -651,6 +658,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
651 dma); 658 dma);
652 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); 659 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
653 vmw_dmabuf_unreference(&dma_buf); 660 vmw_dmabuf_unreference(&dma_buf);
661 ttm_base_object_unref(&buffer_base);
654 if (unlikely(ret != 0 && ret != -ERESTARTSYS && 662 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
655 ret != -EBUSY)) { 663 ret != -EBUSY)) {
656 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", 664 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
@@ -692,7 +700,8 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
692 return ret; 700 return ret;
693 701
694 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, 702 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
695 req->size, false, &handle, &dma_buf); 703 req->size, false, &handle, &dma_buf,
704 NULL);
696 if (unlikely(ret != 0)) 705 if (unlikely(ret != 0))
697 goto out_no_dmabuf; 706 goto out_no_dmabuf;
698 707
@@ -721,7 +730,8 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
721} 730}
722 731
723int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, 732int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
724 uint32_t handle, struct vmw_dma_buffer **out) 733 uint32_t handle, struct vmw_dma_buffer **out,
734 struct ttm_base_object **p_base)
725{ 735{
726 struct vmw_user_dma_buffer *vmw_user_bo; 736 struct vmw_user_dma_buffer *vmw_user_bo;
727 struct ttm_base_object *base; 737 struct ttm_base_object *base;
@@ -743,7 +753,10 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
743 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, 753 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
744 prime.base); 754 prime.base);
745 (void)ttm_bo_reference(&vmw_user_bo->dma.base); 755 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
746 ttm_base_object_unref(&base); 756 if (p_base)
757 *p_base = base;
758 else
759 ttm_base_object_unref(&base);
747 *out = &vmw_user_bo->dma; 760 *out = &vmw_user_bo->dma;
748 761
749 return 0; 762 return 0;
@@ -1004,7 +1017,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
1004 1017
1005 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, 1018 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1006 args->size, false, &args->handle, 1019 args->size, false, &args->handle,
1007 &dma_buf); 1020 &dma_buf, NULL);
1008 if (unlikely(ret != 0)) 1021 if (unlikely(ret != 0))
1009 goto out_no_dmabuf; 1022 goto out_no_dmabuf;
1010 1023
@@ -1032,7 +1045,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
1032 struct vmw_dma_buffer *out_buf; 1045 struct vmw_dma_buffer *out_buf;
1033 int ret; 1046 int ret;
1034 1047
1035 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf); 1048 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
1036 if (ret != 0) 1049 if (ret != 0)
1037 return -EINVAL; 1050 return -EINVAL;
1038 1051
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index bba1ee395478..fd47547b0234 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -855,7 +855,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
855 855
856 if (buffer_handle != SVGA3D_INVALID_ID) { 856 if (buffer_handle != SVGA3D_INVALID_ID) {
857 ret = vmw_user_dmabuf_lookup(tfile, buffer_handle, 857 ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
858 &buffer); 858 &buffer, NULL);
859 if (unlikely(ret != 0)) { 859 if (unlikely(ret != 0)) {
860 DRM_ERROR("Could not find buffer for shader " 860 DRM_ERROR("Could not find buffer for shader "
861 "creation.\n"); 861 "creation.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 3361769842f4..64b50409fa07 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -46,6 +46,7 @@ struct vmw_user_surface {
46 struct vmw_surface srf; 46 struct vmw_surface srf;
47 uint32_t size; 47 uint32_t size;
48 struct drm_master *master; 48 struct drm_master *master;
49 struct ttm_base_object *backup_base;
49}; 50};
50 51
51/** 52/**
@@ -656,6 +657,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
656 struct vmw_resource *res = &user_srf->srf.res; 657 struct vmw_resource *res = &user_srf->srf.res;
657 658
658 *p_base = NULL; 659 *p_base = NULL;
660 ttm_base_object_unref(&user_srf->backup_base);
659 vmw_resource_unreference(&res); 661 vmw_resource_unreference(&res);
660} 662}
661 663
@@ -851,7 +853,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
851 res->backup_size, 853 res->backup_size,
852 true, 854 true,
853 &backup_handle, 855 &backup_handle,
854 &res->backup); 856 &res->backup,
857 &user_srf->backup_base);
855 if (unlikely(ret != 0)) { 858 if (unlikely(ret != 0)) {
856 vmw_resource_unreference(&res); 859 vmw_resource_unreference(&res);
857 goto out_unlock; 860 goto out_unlock;
@@ -1321,7 +1324,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1321 1324
1322 if (req->buffer_handle != SVGA3D_INVALID_ID) { 1325 if (req->buffer_handle != SVGA3D_INVALID_ID) {
1323 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, 1326 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
1324 &res->backup); 1327 &res->backup,
1328 &user_srf->backup_base);
1325 if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < 1329 if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
1326 res->backup_size) { 1330 res->backup_size) {
1327 DRM_ERROR("Surface backup buffer is too small.\n"); 1331 DRM_ERROR("Surface backup buffer is too small.\n");
@@ -1335,7 +1339,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1335 req->drm_surface_flags & 1339 req->drm_surface_flags &
1336 drm_vmw_surface_flag_shareable, 1340 drm_vmw_surface_flag_shareable,
1337 &backup_handle, 1341 &backup_handle,
1338 &res->backup); 1342 &res->backup,
1343 &user_srf->backup_base);
1339 1344
1340 if (unlikely(ret != 0)) { 1345 if (unlikely(ret != 0)) {
1341 vmw_resource_unreference(&res); 1346 vmw_resource_unreference(&res);
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 243f99a80253..e5a38d202a21 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -912,7 +912,7 @@ static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
912 } 912 }
913} 913}
914 914
915static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc) 915static void ipu_irq_handler(struct irq_desc *desc)
916{ 916{
917 struct ipu_soc *ipu = irq_desc_get_handler_data(desc); 917 struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
918 struct irq_chip *chip = irq_desc_get_chip(desc); 918 struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -925,7 +925,7 @@ static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
925 chained_irq_exit(chip, desc); 925 chained_irq_exit(chip, desc);
926} 926}
927 927
928static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc) 928static void ipu_err_irq_handler(struct irq_desc *desc)
929{ 929{
930 struct ipu_soc *ipu = irq_desc_get_handler_data(desc); 930 struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
931 struct irq_chip *chip = irq_desc_get_chip(desc); 931 struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -1099,8 +1099,7 @@ static int ipu_irq_init(struct ipu_soc *ipu)
1099 } 1099 }
1100 1100
1101 ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU", 1101 ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
1102 handle_level_irq, 0, 1102 handle_level_irq, 0, 0, 0);
1103 IRQF_VALID, 0);
1104 if (ret < 0) { 1103 if (ret < 0) {
1105 dev_err(ipu->dev, "failed to alloc generic irq chips\n"); 1104 dev_err(ipu->dev, "failed to alloc generic irq chips\n");
1106 irq_domain_remove(ipu->domain); 1105 irq_domain_remove(ipu->domain);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 500b262b89bb..e13c902e8966 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1140,8 +1140,8 @@ config SENSORS_NCT6775
1140 help 1140 help
1141 If you say yes here you get support for the hardware monitoring 1141 If you say yes here you get support for the hardware monitoring
1142 functionality of the Nuvoton NCT6106D, NCT6775F, NCT6776F, NCT6779D, 1142 functionality of the Nuvoton NCT6106D, NCT6775F, NCT6776F, NCT6779D,
1143 NCT6791D, NCT6792D and compatible Super-I/O chips. This driver 1143 NCT6791D, NCT6792D, NCT6793D, and compatible Super-I/O chips. This
1144 replaces the w83627ehf driver for NCT6775F and NCT6776F. 1144 driver replaces the w83627ehf driver for NCT6775F and NCT6776F.
1145 1145
1146 This driver can also be built as a module. If so, the module 1146 This driver can also be built as a module. If so, the module
1147 will be called nct6775. 1147 will be called nct6775.
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index fe41d5ae7cb2..e4e57bbafb10 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -104,7 +104,7 @@ static inline long lm75_reg_to_mc(s16 temp, u8 resolution)
104 104
105/* sysfs attributes for hwmon */ 105/* sysfs attributes for hwmon */
106 106
107static int lm75_read_temp(void *dev, long *temp) 107static int lm75_read_temp(void *dev, int *temp)
108{ 108{
109 struct lm75_data *data = lm75_update_device(dev); 109 struct lm75_data *data = lm75_update_device(dev);
110 110
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index bd1c99deac71..8b4fa55e46c6 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -39,6 +39,7 @@
39 * nct6779d 15 5 5 2+6 0xc560 0xc1 0x5ca3 39 * nct6779d 15 5 5 2+6 0xc560 0xc1 0x5ca3
40 * nct6791d 15 6 6 2+6 0xc800 0xc1 0x5ca3 40 * nct6791d 15 6 6 2+6 0xc800 0xc1 0x5ca3
41 * nct6792d 15 6 6 2+6 0xc910 0xc1 0x5ca3 41 * nct6792d 15 6 6 2+6 0xc910 0xc1 0x5ca3
42 * nct6793d 15 6 6 2+6 0xd120 0xc1 0x5ca3
42 * 43 *
43 * #temp lists the number of monitored temperature sources (first value) plus 44 * #temp lists the number of monitored temperature sources (first value) plus
44 * the number of directly connectable temperature sensors (second value). 45 * the number of directly connectable temperature sensors (second value).
@@ -63,7 +64,7 @@
63 64
64#define USE_ALTERNATE 65#define USE_ALTERNATE
65 66
66enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791, nct6792 }; 67enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791, nct6792, nct6793 };
67 68
68/* used to set data->name = nct6775_device_names[data->sio_kind] */ 69/* used to set data->name = nct6775_device_names[data->sio_kind] */
69static const char * const nct6775_device_names[] = { 70static const char * const nct6775_device_names[] = {
@@ -73,6 +74,17 @@ static const char * const nct6775_device_names[] = {
73 "nct6779", 74 "nct6779",
74 "nct6791", 75 "nct6791",
75 "nct6792", 76 "nct6792",
77 "nct6793",
78};
79
80static const char * const nct6775_sio_names[] __initconst = {
81 "NCT6106D",
82 "NCT6775F",
83 "NCT6776D/F",
84 "NCT6779D",
85 "NCT6791D",
86 "NCT6792D",
87 "NCT6793D",
76}; 88};
77 89
78static unsigned short force_id; 90static unsigned short force_id;
@@ -104,6 +116,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
104#define SIO_NCT6779_ID 0xc560 116#define SIO_NCT6779_ID 0xc560
105#define SIO_NCT6791_ID 0xc800 117#define SIO_NCT6791_ID 0xc800
106#define SIO_NCT6792_ID 0xc910 118#define SIO_NCT6792_ID 0xc910
119#define SIO_NCT6793_ID 0xd120
107#define SIO_ID_MASK 0xFFF0 120#define SIO_ID_MASK 0xFFF0
108 121
109enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 }; 122enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
@@ -354,6 +367,10 @@ static const u16 NCT6775_REG_TEMP_CRIT[ARRAY_SIZE(nct6775_temp_label) - 1]
354 367
355/* NCT6776 specific data */ 368/* NCT6776 specific data */
356 369
370/* STEP_UP_TIME and STEP_DOWN_TIME regs are swapped for all chips but NCT6775 */
371#define NCT6776_REG_FAN_STEP_UP_TIME NCT6775_REG_FAN_STEP_DOWN_TIME
372#define NCT6776_REG_FAN_STEP_DOWN_TIME NCT6775_REG_FAN_STEP_UP_TIME
373
357static const s8 NCT6776_ALARM_BITS[] = { 374static const s8 NCT6776_ALARM_BITS[] = {
358 0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */ 375 0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */
359 17, -1, -1, -1, -1, -1, -1, /* in8..in14 */ 376 17, -1, -1, -1, -1, -1, -1, /* in8..in14 */
@@ -533,7 +550,7 @@ static const s8 NCT6791_ALARM_BITS[] = {
533 4, 5, 13, -1, -1, -1, /* temp1..temp6 */ 550 4, 5, 13, -1, -1, -1, /* temp1..temp6 */
534 12, 9 }; /* intrusion0, intrusion1 */ 551 12, 9 }; /* intrusion0, intrusion1 */
535 552
536/* NCT6792 specific data */ 553/* NCT6792/NCT6793 specific data */
537 554
538static const u16 NCT6792_REG_TEMP_MON[] = { 555static const u16 NCT6792_REG_TEMP_MON[] = {
539 0x73, 0x75, 0x77, 0x79, 0x7b, 0x7d }; 556 0x73, 0x75, 0x77, 0x79, 0x7b, 0x7d };
@@ -1056,6 +1073,7 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
1056 case nct6779: 1073 case nct6779:
1057 case nct6791: 1074 case nct6791:
1058 case nct6792: 1075 case nct6792:
1076 case nct6793:
1059 return reg == 0x150 || reg == 0x153 || reg == 0x155 || 1077 return reg == 0x150 || reg == 0x153 || reg == 0x155 ||
1060 ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) || 1078 ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) ||
1061 reg == 0x402 || 1079 reg == 0x402 ||
@@ -1407,6 +1425,7 @@ static void nct6775_update_pwm_limits(struct device *dev)
1407 case nct6779: 1425 case nct6779:
1408 case nct6791: 1426 case nct6791:
1409 case nct6792: 1427 case nct6792:
1428 case nct6793:
1410 reg = nct6775_read_value(data, 1429 reg = nct6775_read_value(data,
1411 data->REG_CRITICAL_PWM_ENABLE[i]); 1430 data->REG_CRITICAL_PWM_ENABLE[i]);
1412 if (reg & data->CRITICAL_PWM_ENABLE_MASK) 1431 if (reg & data->CRITICAL_PWM_ENABLE_MASK)
@@ -2822,6 +2841,7 @@ store_auto_pwm(struct device *dev, struct device_attribute *attr,
2822 case nct6779: 2841 case nct6779:
2823 case nct6791: 2842 case nct6791:
2824 case nct6792: 2843 case nct6792:
2844 case nct6793:
2825 nct6775_write_value(data, data->REG_CRITICAL_PWM[nr], 2845 nct6775_write_value(data, data->REG_CRITICAL_PWM[nr],
2826 val); 2846 val);
2827 reg = nct6775_read_value(data, 2847 reg = nct6775_read_value(data,
@@ -3256,7 +3276,7 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
3256 pwm4pin = false; 3276 pwm4pin = false;
3257 pwm5pin = false; 3277 pwm5pin = false;
3258 pwm6pin = false; 3278 pwm6pin = false;
3259 } else { /* NCT6779D, NCT6791D, or NCT6792D */ 3279 } else { /* NCT6779D, NCT6791D, NCT6792D, or NCT6793D */
3260 regval = superio_inb(sioreg, 0x1c); 3280 regval = superio_inb(sioreg, 0x1c);
3261 3281
3262 fan3pin = !(regval & (1 << 5)); 3282 fan3pin = !(regval & (1 << 5));
@@ -3269,7 +3289,8 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
3269 3289
3270 fan4min = fan4pin; 3290 fan4min = fan4pin;
3271 3291
3272 if (data->kind == nct6791 || data->kind == nct6792) { 3292 if (data->kind == nct6791 || data->kind == nct6792 ||
3293 data->kind == nct6793) {
3273 regval = superio_inb(sioreg, 0x2d); 3294 regval = superio_inb(sioreg, 0x2d);
3274 fan6pin = (regval & (1 << 1)); 3295 fan6pin = (regval & (1 << 1));
3275 pwm6pin = (regval & (1 << 0)); 3296 pwm6pin = (regval & (1 << 0));
@@ -3528,8 +3549,8 @@ static int nct6775_probe(struct platform_device *pdev)
3528 data->REG_FAN_PULSES = NCT6776_REG_FAN_PULSES; 3549 data->REG_FAN_PULSES = NCT6776_REG_FAN_PULSES;
3529 data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT; 3550 data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
3530 data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME; 3551 data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
3531 data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME; 3552 data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
3532 data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME; 3553 data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
3533 data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H; 3554 data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
3534 data->REG_PWM[0] = NCT6775_REG_PWM; 3555 data->REG_PWM[0] = NCT6775_REG_PWM;
3535 data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT; 3556 data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
@@ -3600,8 +3621,8 @@ static int nct6775_probe(struct platform_device *pdev)
3600 data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES; 3621 data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
3601 data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT; 3622 data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
3602 data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME; 3623 data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
3603 data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME; 3624 data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
3604 data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME; 3625 data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
3605 data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H; 3626 data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
3606 data->REG_PWM[0] = NCT6775_REG_PWM; 3627 data->REG_PWM[0] = NCT6775_REG_PWM;
3607 data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT; 3628 data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
@@ -3643,6 +3664,7 @@ static int nct6775_probe(struct platform_device *pdev)
3643 break; 3664 break;
3644 case nct6791: 3665 case nct6791:
3645 case nct6792: 3666 case nct6792:
3667 case nct6793:
3646 data->in_num = 15; 3668 data->in_num = 15;
3647 data->pwm_num = 6; 3669 data->pwm_num = 6;
3648 data->auto_pwm_num = 4; 3670 data->auto_pwm_num = 4;
@@ -3677,8 +3699,8 @@ static int nct6775_probe(struct platform_device *pdev)
3677 data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES; 3699 data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
3678 data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT; 3700 data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
3679 data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME; 3701 data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
3680 data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME; 3702 data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
3681 data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME; 3703 data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
3682 data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H; 3704 data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
3683 data->REG_PWM[0] = NCT6775_REG_PWM; 3705 data->REG_PWM[0] = NCT6775_REG_PWM;
3684 data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT; 3706 data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
@@ -3918,6 +3940,7 @@ static int nct6775_probe(struct platform_device *pdev)
3918 case nct6779: 3940 case nct6779:
3919 case nct6791: 3941 case nct6791:
3920 case nct6792: 3942 case nct6792:
3943 case nct6793:
3921 break; 3944 break;
3922 } 3945 }
3923 3946
@@ -3950,6 +3973,7 @@ static int nct6775_probe(struct platform_device *pdev)
3950 break; 3973 break;
3951 case nct6791: 3974 case nct6791:
3952 case nct6792: 3975 case nct6792:
3976 case nct6793:
3953 tmp |= 0x7e; 3977 tmp |= 0x7e;
3954 break; 3978 break;
3955 } 3979 }
@@ -4047,7 +4071,8 @@ static int __maybe_unused nct6775_resume(struct device *dev)
4047 if (reg != data->sio_reg_enable) 4071 if (reg != data->sio_reg_enable)
4048 superio_outb(sioreg, SIO_REG_ENABLE, data->sio_reg_enable); 4072 superio_outb(sioreg, SIO_REG_ENABLE, data->sio_reg_enable);
4049 4073
4050 if (data->kind == nct6791 || data->kind == nct6792) 4074 if (data->kind == nct6791 || data->kind == nct6792 ||
4075 data->kind == nct6793)
4051 nct6791_enable_io_mapping(sioreg); 4076 nct6791_enable_io_mapping(sioreg);
4052 4077
4053 superio_exit(sioreg); 4078 superio_exit(sioreg);
@@ -4106,15 +4131,6 @@ static struct platform_driver nct6775_driver = {
4106 .probe = nct6775_probe, 4131 .probe = nct6775_probe,
4107}; 4132};
4108 4133
4109static const char * const nct6775_sio_names[] __initconst = {
4110 "NCT6106D",
4111 "NCT6775F",
4112 "NCT6776D/F",
4113 "NCT6779D",
4114 "NCT6791D",
4115 "NCT6792D",
4116};
4117
4118/* nct6775_find() looks for a '627 in the Super-I/O config space */ 4134/* nct6775_find() looks for a '627 in the Super-I/O config space */
4119static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data) 4135static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
4120{ 4136{
@@ -4150,6 +4166,9 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
4150 case SIO_NCT6792_ID: 4166 case SIO_NCT6792_ID:
4151 sio_data->kind = nct6792; 4167 sio_data->kind = nct6792;
4152 break; 4168 break;
4169 case SIO_NCT6793_ID:
4170 sio_data->kind = nct6793;
4171 break;
4153 default: 4172 default:
4154 if (val != 0xffff) 4173 if (val != 0xffff)
4155 pr_debug("unsupported chip ID: 0x%04x\n", val); 4174 pr_debug("unsupported chip ID: 0x%04x\n", val);
@@ -4175,7 +4194,8 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
4175 superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01); 4194 superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
4176 } 4195 }
4177 4196
4178 if (sio_data->kind == nct6791 || sio_data->kind == nct6792) 4197 if (sio_data->kind == nct6791 || sio_data->kind == nct6792 ||
4198 sio_data->kind == nct6793)
4179 nct6791_enable_io_mapping(sioaddr); 4199 nct6791_enable_io_mapping(sioaddr);
4180 4200
4181 superio_exit(sioaddr); 4201 superio_exit(sioaddr);
@@ -4285,7 +4305,7 @@ static void __exit sensors_nct6775_exit(void)
4285} 4305}
4286 4306
4287MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>"); 4307MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
4288MODULE_DESCRIPTION("NCT6106D/NCT6775F/NCT6776F/NCT6779D/NCT6791D/NCT6792D driver"); 4308MODULE_DESCRIPTION("Driver for NCT6775F and compatible chips");
4289MODULE_LICENSE("GPL"); 4309MODULE_LICENSE("GPL");
4290 4310
4291module_init(sensors_nct6775_init); 4311module_init(sensors_nct6775_init);
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index dc0b76c5e302..feed30646d91 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -477,7 +477,7 @@ static int ntc_thermistor_get_ohm(struct ntc_data *data)
477 return -EINVAL; 477 return -EINVAL;
478} 478}
479 479
480static int ntc_read_temp(void *dev, long *temp) 480static int ntc_read_temp(void *dev, int *temp)
481{ 481{
482 struct ntc_data *data = dev_get_drvdata(dev); 482 struct ntc_data *data = dev_get_drvdata(dev);
483 int ohm; 483 int ohm;
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 9da2735f1424..65482624ea2c 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -98,7 +98,7 @@ static struct tmp102 *tmp102_update_device(struct device *dev)
98 return tmp102; 98 return tmp102;
99} 99}
100 100
101static int tmp102_read_temp(void *dev, long *temp) 101static int tmp102_read_temp(void *dev, int *temp)
102{ 102{
103 struct tmp102 *tmp102 = tmp102_update_device(dev); 103 struct tmp102 *tmp102 = tmp102_update_device(dev);
104 104
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index da4c6979fbb8..aa26f3c3416b 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -56,7 +56,6 @@ config INFINIBAND_ADDR_TRANS
56 56
57source "drivers/infiniband/hw/mthca/Kconfig" 57source "drivers/infiniband/hw/mthca/Kconfig"
58source "drivers/infiniband/hw/qib/Kconfig" 58source "drivers/infiniband/hw/qib/Kconfig"
59source "drivers/infiniband/hw/ehca/Kconfig"
60source "drivers/infiniband/hw/cxgb3/Kconfig" 59source "drivers/infiniband/hw/cxgb3/Kconfig"
61source "drivers/infiniband/hw/cxgb4/Kconfig" 60source "drivers/infiniband/hw/cxgb4/Kconfig"
62source "drivers/infiniband/hw/mlx4/Kconfig" 61source "drivers/infiniband/hw/mlx4/Kconfig"
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index 1bdb9996d371..aded2a5cc2d5 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -1,6 +1,5 @@
1obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/ 1obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/
2obj-$(CONFIG_INFINIBAND_QIB) += qib/ 2obj-$(CONFIG_INFINIBAND_QIB) += qib/
3obj-$(CONFIG_INFINIBAND_EHCA) += ehca/
4obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/ 3obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/
5obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/ 4obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/
6obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/ 5obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index dc439a40da3f..403bd29443b8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -3095,7 +3095,7 @@ out:
3095 3095
3096static int 3096static int
3097isert_setup_np(struct iscsi_np *np, 3097isert_setup_np(struct iscsi_np *np,
3098 struct __kernel_sockaddr_storage *ksockaddr) 3098 struct sockaddr_storage *ksockaddr)
3099{ 3099{
3100 struct isert_np *isert_np; 3100 struct isert_np *isert_np;
3101 struct rdma_cm_id *isert_lid; 3101 struct rdma_cm_id *isert_lid;
@@ -3117,7 +3117,7 @@ isert_setup_np(struct iscsi_np *np,
3117 * in iscsi_target_configfs.c code.. 3117 * in iscsi_target_configfs.c code..
3118 */ 3118 */
3119 memcpy(&np->np_sockaddr, ksockaddr, 3119 memcpy(&np->np_sockaddr, ksockaddr,
3120 sizeof(struct __kernel_sockaddr_storage)); 3120 sizeof(struct sockaddr_storage));
3121 3121
3122 isert_lid = isert_setup_id(isert_np); 3122 isert_lid = isert_setup_id(isert_np);
3123 if (IS_ERR(isert_lid)) { 3123 if (IS_ERR(isert_lid)) {
@@ -3199,32 +3199,11 @@ isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
3199{ 3199{
3200 struct rdma_cm_id *cm_id = isert_conn->cm_id; 3200 struct rdma_cm_id *cm_id = isert_conn->cm_id;
3201 struct rdma_route *cm_route = &cm_id->route; 3201 struct rdma_route *cm_route = &cm_id->route;
3202 struct sockaddr_in *sock_in;
3203 struct sockaddr_in6 *sock_in6;
3204 3202
3205 conn->login_family = np->np_sockaddr.ss_family; 3203 conn->login_family = np->np_sockaddr.ss_family;
3206 3204
3207 if (np->np_sockaddr.ss_family == AF_INET6) { 3205 conn->login_sockaddr = cm_route->addr.dst_addr;
3208 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr; 3206 conn->local_sockaddr = cm_route->addr.src_addr;
3209 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
3210 &sock_in6->sin6_addr.in6_u);
3211 conn->login_port = ntohs(sock_in6->sin6_port);
3212
3213 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
3214 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
3215 &sock_in6->sin6_addr.in6_u);
3216 conn->local_port = ntohs(sock_in6->sin6_port);
3217 } else {
3218 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
3219 sprintf(conn->login_ip, "%pI4",
3220 &sock_in->sin_addr.s_addr);
3221 conn->login_port = ntohs(sock_in->sin_port);
3222
3223 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
3224 sprintf(conn->local_ip, "%pI4",
3225 &sock_in->sin_addr.s_addr);
3226 conn->local_port = ntohs(sock_in->sin_port);
3227 }
3228} 3207}
3229 3208
3230static int 3209static int
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 9d35499faca4..08d496411f75 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -290,19 +290,14 @@ static int evdev_flush(struct file *file, fl_owner_t id)
290{ 290{
291 struct evdev_client *client = file->private_data; 291 struct evdev_client *client = file->private_data;
292 struct evdev *evdev = client->evdev; 292 struct evdev *evdev = client->evdev;
293 int retval;
294 293
295 retval = mutex_lock_interruptible(&evdev->mutex); 294 mutex_lock(&evdev->mutex);
296 if (retval)
297 return retval;
298 295
299 if (!evdev->exist || client->revoked) 296 if (evdev->exist && !client->revoked)
300 retval = -ENODEV; 297 input_flush_device(&evdev->handle, file);
301 else
302 retval = input_flush_device(&evdev->handle, file);
303 298
304 mutex_unlock(&evdev->mutex); 299 mutex_unlock(&evdev->mutex);
305 return retval; 300 return 0;
306} 301}
307 302
308static void evdev_free(struct device *dev) 303static void evdev_free(struct device *dev)
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index d2ea863d6a45..2165f3dd328b 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -5,8 +5,6 @@
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as 6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 *
9 * <<Power management needs to be implemented>>.
10 */ 8 */
11 9
12#include <linux/clk.h> 10#include <linux/clk.h>
diff --git a/drivers/input/misc/ab8500-ponkey.c b/drivers/input/misc/ab8500-ponkey.c
index 1f7e15ca5fbe..4f5ef5bb535b 100644
--- a/drivers/input/misc/ab8500-ponkey.c
+++ b/drivers/input/misc/ab8500-ponkey.c
@@ -118,6 +118,7 @@ static const struct of_device_id ab8500_ponkey_match[] = {
118 { .compatible = "stericsson,ab8500-ponkey", }, 118 { .compatible = "stericsson,ab8500-ponkey", },
119 {} 119 {}
120}; 120};
121MODULE_DEVICE_TABLE(of, ab8500_ponkey_match);
121#endif 122#endif
122 123
123static struct platform_driver ab8500_ponkey_driver = { 124static struct platform_driver ab8500_ponkey_driver = {
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index e82edf810d1f..f2261ab54701 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -173,6 +173,7 @@ static const struct of_device_id pwm_beeper_match[] = {
173 { .compatible = "pwm-beeper", }, 173 { .compatible = "pwm-beeper", },
174 { }, 174 { },
175}; 175};
176MODULE_DEVICE_TABLE(of, pwm_beeper_match);
176#endif 177#endif
177 178
178static struct platform_driver pwm_beeper_driver = { 179static struct platform_driver pwm_beeper_driver = {
diff --git a/drivers/input/misc/regulator-haptic.c b/drivers/input/misc/regulator-haptic.c
index 6bf3f1082f71..a804705eb04a 100644
--- a/drivers/input/misc/regulator-haptic.c
+++ b/drivers/input/misc/regulator-haptic.c
@@ -249,6 +249,7 @@ static const struct of_device_id regulator_haptic_dt_match[] = {
249 { .compatible = "regulator-haptic" }, 249 { .compatible = "regulator-haptic" },
250 { /* sentinel */ }, 250 { /* sentinel */ },
251}; 251};
252MODULE_DEVICE_TABLE(of, regulator_haptic_dt_match);
252 253
253static struct platform_driver regulator_haptic_driver = { 254static struct platform_driver regulator_haptic_driver = {
254 .probe = regulator_haptic_probe, 255 .probe = regulator_haptic_probe,
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index 54116e544c96..6f997aa49183 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -253,6 +253,7 @@ static const struct of_device_id bbc_beep_match[] = {
253 }, 253 },
254 {}, 254 {},
255}; 255};
256MODULE_DEVICE_TABLE(of, bbc_beep_match);
256 257
257static struct platform_driver bbc_beep_driver = { 258static struct platform_driver bbc_beep_driver = {
258 .driver = { 259 .driver = {
@@ -332,6 +333,7 @@ static const struct of_device_id grover_beep_match[] = {
332 }, 333 },
333 {}, 334 {},
334}; 335};
336MODULE_DEVICE_TABLE(of, grover_beep_match);
335 337
336static struct platform_driver grover_beep_driver = { 338static struct platform_driver grover_beep_driver = {
337 .driver = { 339 .driver = {
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index e2b7420eed97..fa945304b9a5 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1170,6 +1170,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
1170 { "ELAN0000", 0 }, 1170 { "ELAN0000", 0 },
1171 { "ELAN0100", 0 }, 1171 { "ELAN0100", 0 },
1172 { "ELAN0600", 0 }, 1172 { "ELAN0600", 0 },
1173 { "ELAN1000", 0 },
1173 { } 1174 { }
1174}; 1175};
1175MODULE_DEVICE_TABLE(acpi, elan_acpi_id); 1176MODULE_DEVICE_TABLE(acpi, elan_acpi_id);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index c9c98f0ab284..db91de539ee3 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -877,7 +877,7 @@ static int __init i8042_check_aux(void)
877static int i8042_controller_check(void) 877static int i8042_controller_check(void)
878{ 878{
879 if (i8042_flush()) { 879 if (i8042_flush()) {
880 pr_err("No controller found\n"); 880 pr_info("No controller found\n");
881 return -ENODEV; 881 return -ENODEV;
882 } 882 }
883 883
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 059edeb7f04a..600dcceff542 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -479,6 +479,18 @@ config TOUCHSCREEN_MTOUCH
479 To compile this driver as a module, choose M here: the 479 To compile this driver as a module, choose M here: the
480 module will be called mtouch. 480 module will be called mtouch.
481 481
482config TOUCHSCREEN_IMX6UL_TSC
483 tristate "Freescale i.MX6UL touchscreen controller"
484 depends on (OF && GPIOLIB) || COMPILE_TEST
485 help
486 Say Y here if you have a Freescale i.MX6UL, and want to
487 use the internal touchscreen controller.
488
489 If unsure, say N.
490
491 To compile this driver as a module, choose M here: the
492 module will be called imx6ul_tsc.
493
482config TOUCHSCREEN_INEXIO 494config TOUCHSCREEN_INEXIO
483 tristate "iNexio serial touchscreens" 495 tristate "iNexio serial touchscreens"
484 select SERIO 496 select SERIO
@@ -1040,4 +1052,16 @@ config TOUCHSCREEN_ZFORCE
1040 To compile this driver as a module, choose M here: the 1052 To compile this driver as a module, choose M here: the
1041 module will be called zforce_ts. 1053 module will be called zforce_ts.
1042 1054
1055config TOUCHSCREEN_COLIBRI_VF50
1056 tristate "Toradex Colibri on board touchscreen driver"
1057 depends on GPIOLIB && IIO && VF610_ADC
1058 help
1059 Say Y here if you have a Colibri VF50 and plan to use
1060 the on-board provided 4-wire touchscreen driver.
1061
1062 If unsure, say N.
1063
1064 To compile this driver as a module, choose M here: the
1065 module will be called colibri_vf50_ts.
1066
1043endif 1067endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index c85aae23e7f8..1b79cc09744a 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_TOUCHSCREEN_EGALAX) += egalax_ts.o
38obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o 38obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
39obj-$(CONFIG_TOUCHSCREEN_GOODIX) += goodix.o 39obj-$(CONFIG_TOUCHSCREEN_GOODIX) += goodix.o
40obj-$(CONFIG_TOUCHSCREEN_ILI210X) += ili210x.o 40obj-$(CONFIG_TOUCHSCREEN_ILI210X) += ili210x.o
41obj-$(CONFIG_TOUCHSCREEN_IMX6UL_TSC) += imx6ul_tsc.o
41obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o 42obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
42obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += intel-mid-touch.o 43obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += intel-mid-touch.o
43obj-$(CONFIG_TOUCHSCREEN_IPROC) += bcm_iproc_tsc.o 44obj-$(CONFIG_TOUCHSCREEN_IPROC) += bcm_iproc_tsc.o
@@ -85,3 +86,4 @@ obj-$(CONFIG_TOUCHSCREEN_W90X900) += w90p910_ts.o
85obj-$(CONFIG_TOUCHSCREEN_SX8654) += sx8654.o 86obj-$(CONFIG_TOUCHSCREEN_SX8654) += sx8654.o
86obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o 87obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o
87obj-$(CONFIG_TOUCHSCREEN_ZFORCE) += zforce_ts.o 88obj-$(CONFIG_TOUCHSCREEN_ZFORCE) += zforce_ts.o
89obj-$(CONFIG_TOUCHSCREEN_COLIBRI_VF50) += colibri-vf50-ts.o
diff --git a/drivers/input/touchscreen/colibri-vf50-ts.c b/drivers/input/touchscreen/colibri-vf50-ts.c
new file mode 100644
index 000000000000..5d4903a402cc
--- /dev/null
+++ b/drivers/input/touchscreen/colibri-vf50-ts.c
@@ -0,0 +1,386 @@
1/*
2 * Toradex Colibri VF50 Touchscreen driver
3 *
4 * Copyright 2015 Toradex AG
5 *
6 * Originally authored by Stefan Agner for 3.0 kernel
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/delay.h>
15#include <linux/err.h>
16#include <linux/gpio.h>
17#include <linux/gpio/consumer.h>
18#include <linux/iio/consumer.h>
19#include <linux/iio/types.h>
20#include <linux/input.h>
21#include <linux/interrupt.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/pinctrl/consumer.h>
25#include <linux/platform_device.h>
26#include <linux/slab.h>
27#include <linux/types.h>
28
29#define DRIVER_NAME "colibri-vf50-ts"
30#define DRV_VERSION "1.0"
31
32#define VF_ADC_MAX ((1 << 12) - 1)
33
34#define COLI_TOUCH_MIN_DELAY_US 1000
35#define COLI_TOUCH_MAX_DELAY_US 2000
36#define COLI_PULLUP_MIN_DELAY_US 10000
37#define COLI_PULLUP_MAX_DELAY_US 11000
38#define COLI_TOUCH_NO_OF_AVGS 5
39#define COLI_TOUCH_REQ_ADC_CHAN 4
40
41struct vf50_touch_device {
42 struct platform_device *pdev;
43 struct input_dev *ts_input;
44 struct iio_channel *channels;
45 struct gpio_desc *gpio_xp;
46 struct gpio_desc *gpio_xm;
47 struct gpio_desc *gpio_yp;
48 struct gpio_desc *gpio_ym;
49 int pen_irq;
50 int min_pressure;
51 bool stop_touchscreen;
52};
53
54/*
55 * Enables given plates and measures touch parameters using ADC
56 */
57static int adc_ts_measure(struct iio_channel *channel,
58 struct gpio_desc *plate_p, struct gpio_desc *plate_m)
59{
60 int i, value = 0, val = 0;
61 int error;
62
63 gpiod_set_value(plate_p, 1);
64 gpiod_set_value(plate_m, 1);
65
66 usleep_range(COLI_TOUCH_MIN_DELAY_US, COLI_TOUCH_MAX_DELAY_US);
67
68 for (i = 0; i < COLI_TOUCH_NO_OF_AVGS; i++) {
69 error = iio_read_channel_raw(channel, &val);
70 if (error < 0) {
71 value = error;
72 goto error_iio_read;
73 }
74
75 value += val;
76 }
77
78 value /= COLI_TOUCH_NO_OF_AVGS;
79
80error_iio_read:
81 gpiod_set_value(plate_p, 0);
82 gpiod_set_value(plate_m, 0);
83
84 return value;
85}
86
87/*
88 * Enable touch detection using falling edge detection on XM
89 */
90static void vf50_ts_enable_touch_detection(struct vf50_touch_device *vf50_ts)
91{
92 /* Enable plate YM (needs to be strong GND, high active) */
93 gpiod_set_value(vf50_ts->gpio_ym, 1);
94
95 /*
96 * Let the platform mux to idle state in order to enable
97 * Pull-Up on GPIO
98 */
99 pinctrl_pm_select_idle_state(&vf50_ts->pdev->dev);
100
101 /* Wait for the pull-up to be stable on high */
102 usleep_range(COLI_PULLUP_MIN_DELAY_US, COLI_PULLUP_MAX_DELAY_US);
103}
104
105/*
106 * ADC touch screen sampling bottom half irq handler
107 */
108static irqreturn_t vf50_ts_irq_bh(int irq, void *private)
109{
110 struct vf50_touch_device *vf50_ts = private;
111 struct device *dev = &vf50_ts->pdev->dev;
112 int val_x, val_y, val_z1, val_z2, val_p = 0;
113 bool discard_val_on_start = true;
114
115 /* Disable the touch detection plates */
116 gpiod_set_value(vf50_ts->gpio_ym, 0);
117
118 /* Let the platform mux to default state in order to mux as ADC */
119 pinctrl_pm_select_default_state(dev);
120
121 while (!vf50_ts->stop_touchscreen) {
122 /* X-Direction */
123 val_x = adc_ts_measure(&vf50_ts->channels[0],
124 vf50_ts->gpio_xp, vf50_ts->gpio_xm);
125 if (val_x < 0)
126 break;
127
128 /* Y-Direction */
129 val_y = adc_ts_measure(&vf50_ts->channels[1],
130 vf50_ts->gpio_yp, vf50_ts->gpio_ym);
131 if (val_y < 0)
132 break;
133
134 /*
135 * Touch pressure
136 * Measure on XP/YM
137 */
138 val_z1 = adc_ts_measure(&vf50_ts->channels[2],
139 vf50_ts->gpio_yp, vf50_ts->gpio_xm);
140 if (val_z1 < 0)
141 break;
142 val_z2 = adc_ts_measure(&vf50_ts->channels[3],
143 vf50_ts->gpio_yp, vf50_ts->gpio_xm);
144 if (val_z2 < 0)
145 break;
146
147 /* Validate signal (avoid calculation using noise) */
148 if (val_z1 > 64 && val_x > 64) {
149 /*
150 * Calculate resistance between the plates
151 * lower resistance means higher pressure
152 */
153 int r_x = (1000 * val_x) / VF_ADC_MAX;
154
155 val_p = (r_x * val_z2) / val_z1 - r_x;
156
157 } else {
158 val_p = 2000;
159 }
160
161 val_p = 2000 - val_p;
162 dev_dbg(dev,
163 "Measured values: x: %d, y: %d, z1: %d, z2: %d, p: %d\n",
164 val_x, val_y, val_z1, val_z2, val_p);
165
166 /*
167 * If touch pressure is too low, stop measuring and reenable
168 * touch detection
169 */
170 if (val_p < vf50_ts->min_pressure || val_p > 2000)
171 break;
172
173 /*
174 * The pressure may not be enough for the first x and the
175 * second y measurement, but, the pressure is ok when the
176 * driver is doing the third and fourth measurement. To
177 * take care of this, we drop the first measurement always.
178 */
179 if (discard_val_on_start) {
180 discard_val_on_start = false;
181 } else {
182 /*
183 * Report touch position and sleep for
184 * the next measurement.
185 */
186 input_report_abs(vf50_ts->ts_input,
187 ABS_X, VF_ADC_MAX - val_x);
188 input_report_abs(vf50_ts->ts_input,
189 ABS_Y, VF_ADC_MAX - val_y);
190 input_report_abs(vf50_ts->ts_input,
191 ABS_PRESSURE, val_p);
192 input_report_key(vf50_ts->ts_input, BTN_TOUCH, 1);
193 input_sync(vf50_ts->ts_input);
194 }
195
196 usleep_range(COLI_PULLUP_MIN_DELAY_US,
197 COLI_PULLUP_MAX_DELAY_US);
198 }
199
200 /* Report no more touch, re-enable touch detection */
201 input_report_abs(vf50_ts->ts_input, ABS_PRESSURE, 0);
202 input_report_key(vf50_ts->ts_input, BTN_TOUCH, 0);
203 input_sync(vf50_ts->ts_input);
204
205 vf50_ts_enable_touch_detection(vf50_ts);
206
207 return IRQ_HANDLED;
208}
209
210static int vf50_ts_open(struct input_dev *dev_input)
211{
212 struct vf50_touch_device *touchdev = input_get_drvdata(dev_input);
213 struct device *dev = &touchdev->pdev->dev;
214
215 dev_dbg(dev, "Input device %s opened, starting touch detection\n",
216 dev_input->name);
217
218 touchdev->stop_touchscreen = false;
219
220 /* Mux detection before request IRQ, wait for pull-up to settle */
221 vf50_ts_enable_touch_detection(touchdev);
222
223 return 0;
224}
225
226static void vf50_ts_close(struct input_dev *dev_input)
227{
228 struct vf50_touch_device *touchdev = input_get_drvdata(dev_input);
229 struct device *dev = &touchdev->pdev->dev;
230
231 touchdev->stop_touchscreen = true;
232
233 /* Make sure IRQ is not running past close */
234 mb();
235 synchronize_irq(touchdev->pen_irq);
236
237 gpiod_set_value(touchdev->gpio_ym, 0);
238 pinctrl_pm_select_default_state(dev);
239
240 dev_dbg(dev, "Input device %s closed, disable touch detection\n",
241 dev_input->name);
242}
243
244static int vf50_ts_get_gpiod(struct device *dev, struct gpio_desc **gpio_d,
245 const char *con_id, enum gpiod_flags flags)
246{
247 int error;
248
249 *gpio_d = devm_gpiod_get(dev, con_id, flags);
250 if (IS_ERR(*gpio_d)) {
251 error = PTR_ERR(*gpio_d);
252 dev_err(dev, "Could not get gpio_%s %d\n", con_id, error);
253 return error;
254 }
255
256 return 0;
257}
258
259static void vf50_ts_channel_release(void *data)
260{
261 struct iio_channel *channels = data;
262
263 iio_channel_release_all(channels);
264}
265
266static int vf50_ts_probe(struct platform_device *pdev)
267{
268 struct input_dev *input;
269 struct iio_channel *channels;
270 struct device *dev = &pdev->dev;
271 struct vf50_touch_device *touchdev;
272 int num_adc_channels;
273 int error;
274
275 channels = iio_channel_get_all(dev);
276 if (IS_ERR(channels))
277 return PTR_ERR(channels);
278
279 error = devm_add_action(dev, vf50_ts_channel_release, channels);
280 if (error) {
281 iio_channel_release_all(channels);
282 dev_err(dev, "Failed to register iio channel release action");
283 return error;
284 }
285
286 num_adc_channels = 0;
287 while (channels[num_adc_channels].indio_dev)
288 num_adc_channels++;
289
290 if (num_adc_channels != COLI_TOUCH_REQ_ADC_CHAN) {
291 dev_err(dev, "Inadequate ADC channels specified\n");
292 return -EINVAL;
293 }
294
295 touchdev = devm_kzalloc(dev, sizeof(*touchdev), GFP_KERNEL);
296 if (!touchdev)
297 return -ENOMEM;
298
299 touchdev->pdev = pdev;
300 touchdev->channels = channels;
301
302 error = of_property_read_u32(dev->of_node, "vf50-ts-min-pressure",
303 &touchdev->min_pressure);
304 if (error)
305 return error;
306
307 input = devm_input_allocate_device(dev);
308 if (!input) {
309 dev_err(dev, "Failed to allocate TS input device\n");
310 return -ENOMEM;
311 }
312
313 platform_set_drvdata(pdev, touchdev);
314
315 input->name = DRIVER_NAME;
316 input->id.bustype = BUS_HOST;
317 input->dev.parent = dev;
318 input->open = vf50_ts_open;
319 input->close = vf50_ts_close;
320
321 input_set_capability(input, EV_KEY, BTN_TOUCH);
322 input_set_abs_params(input, ABS_X, 0, VF_ADC_MAX, 0, 0);
323 input_set_abs_params(input, ABS_Y, 0, VF_ADC_MAX, 0, 0);
324 input_set_abs_params(input, ABS_PRESSURE, 0, VF_ADC_MAX, 0, 0);
325
326 touchdev->ts_input = input;
327 input_set_drvdata(input, touchdev);
328
329 error = input_register_device(input);
330 if (error) {
331 dev_err(dev, "Failed to register input device\n");
332 return error;
333 }
334
335 error = vf50_ts_get_gpiod(dev, &touchdev->gpio_xp, "xp", GPIOD_OUT_LOW);
336 if (error)
337 return error;
338
339 error = vf50_ts_get_gpiod(dev, &touchdev->gpio_xm,
340 "xm", GPIOD_OUT_LOW);
341 if (error)
342 return error;
343
344 error = vf50_ts_get_gpiod(dev, &touchdev->gpio_yp, "yp", GPIOD_OUT_LOW);
345 if (error)
346 return error;
347
348 error = vf50_ts_get_gpiod(dev, &touchdev->gpio_ym, "ym", GPIOD_OUT_LOW);
349 if (error)
350 return error;
351
352 touchdev->pen_irq = platform_get_irq(pdev, 0);
353 if (touchdev->pen_irq < 0)
354 return touchdev->pen_irq;
355
356 error = devm_request_threaded_irq(dev, touchdev->pen_irq,
357 NULL, vf50_ts_irq_bh, IRQF_ONESHOT,
358 "vf50 touch", touchdev);
359 if (error) {
360 dev_err(dev, "Failed to request IRQ %d: %d\n",
361 touchdev->pen_irq, error);
362 return error;
363 }
364
365 return 0;
366}
367
368static const struct of_device_id vf50_touch_of_match[] = {
369 { .compatible = "toradex,vf50-touchscreen", },
370 { }
371};
372MODULE_DEVICE_TABLE(of, vf50_touch_of_match);
373
374static struct platform_driver vf50_touch_driver = {
375 .driver = {
376 .name = "toradex,vf50_touchctrl",
377 .of_match_table = vf50_touch_of_match,
378 },
379 .probe = vf50_ts_probe,
380};
381module_platform_driver(vf50_touch_driver);
382
383MODULE_AUTHOR("Sanchayan Maity");
384MODULE_DESCRIPTION("Colibri VF50 Touchscreen driver");
385MODULE_LICENSE("GPL");
386MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/input/touchscreen/cyttsp4_i2c.c b/drivers/input/touchscreen/cyttsp4_i2c.c
index 9a323dd915de..a9f95c7d3c00 100644
--- a/drivers/input/touchscreen/cyttsp4_i2c.c
+++ b/drivers/input/touchscreen/cyttsp4_i2c.c
@@ -86,4 +86,3 @@ module_i2c_driver(cyttsp4_i2c_driver);
86MODULE_LICENSE("GPL"); 86MODULE_LICENSE("GPL");
87MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) I2C driver"); 87MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) I2C driver");
88MODULE_AUTHOR("Cypress"); 88MODULE_AUTHOR("Cypress");
89MODULE_ALIAS("i2c:cyttsp4");
diff --git a/drivers/input/touchscreen/cyttsp_i2c.c b/drivers/input/touchscreen/cyttsp_i2c.c
index 519e2de2f8df..eee51b3f2e3f 100644
--- a/drivers/input/touchscreen/cyttsp_i2c.c
+++ b/drivers/input/touchscreen/cyttsp_i2c.c
@@ -86,4 +86,3 @@ module_i2c_driver(cyttsp_i2c_driver);
86MODULE_LICENSE("GPL"); 86MODULE_LICENSE("GPL");
87MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) I2C driver"); 87MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) I2C driver");
88MODULE_AUTHOR("Cypress"); 88MODULE_AUTHOR("Cypress");
89MODULE_ALIAS("i2c:cyttsp");
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index ddac134b25b1..17cc20ef4923 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -102,7 +102,7 @@
102#define ELAN_FW_PAGESIZE 132 102#define ELAN_FW_PAGESIZE 132
103 103
104/* calibration timeout definition */ 104/* calibration timeout definition */
105#define ELAN_CALI_TIMEOUT_MSEC 10000 105#define ELAN_CALI_TIMEOUT_MSEC 12000
106 106
107#define ELAN_POWERON_DELAY_USEC 500 107#define ELAN_POWERON_DELAY_USEC 500
108#define ELAN_RESET_DELAY_MSEC 20 108#define ELAN_RESET_DELAY_MSEC 20
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
new file mode 100644
index 000000000000..ff0b75813daa
--- /dev/null
+++ b/drivers/input/touchscreen/imx6ul_tsc.c
@@ -0,0 +1,523 @@
1/*
2 * Freescale i.MX6UL touchscreen controller driver
3 *
4 * Copyright (C) 2015 Freescale Semiconductor, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/errno.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/gpio/consumer.h>
15#include <linux/input.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/delay.h>
19#include <linux/of.h>
20#include <linux/interrupt.h>
21#include <linux/platform_device.h>
22#include <linux/clk.h>
23#include <linux/io.h>
24
25/* ADC configuration registers field define */
26#define ADC_AIEN (0x1 << 7)
27#define ADC_CONV_DISABLE 0x1F
28#define ADC_CAL (0x1 << 7)
29#define ADC_CALF 0x2
30#define ADC_12BIT_MODE (0x2 << 2)
31#define ADC_IPG_CLK 0x00
32#define ADC_CLK_DIV_8 (0x03 << 5)
33#define ADC_SHORT_SAMPLE_MODE (0x0 << 4)
34#define ADC_HARDWARE_TRIGGER (0x1 << 13)
35#define SELECT_CHANNEL_4 0x04
36#define SELECT_CHANNEL_1 0x01
37#define DISABLE_CONVERSION_INT (0x0 << 7)
38
39/* ADC registers */
40#define REG_ADC_HC0 0x00
41#define REG_ADC_HC1 0x04
42#define REG_ADC_HC2 0x08
43#define REG_ADC_HC3 0x0C
44#define REG_ADC_HC4 0x10
45#define REG_ADC_HS 0x14
46#define REG_ADC_R0 0x18
47#define REG_ADC_CFG 0x2C
48#define REG_ADC_GC 0x30
49#define REG_ADC_GS 0x34
50
51#define ADC_TIMEOUT msecs_to_jiffies(100)
52
53/* TSC registers */
54#define REG_TSC_BASIC_SETING 0x00
55#define REG_TSC_PRE_CHARGE_TIME 0x10
56#define REG_TSC_FLOW_CONTROL 0x20
57#define REG_TSC_MEASURE_VALUE 0x30
58#define REG_TSC_INT_EN 0x40
59#define REG_TSC_INT_SIG_EN 0x50
60#define REG_TSC_INT_STATUS 0x60
61#define REG_TSC_DEBUG_MODE 0x70
62#define REG_TSC_DEBUG_MODE2 0x80
63
64/* TSC configuration registers field define */
65#define DETECT_4_WIRE_MODE (0x0 << 4)
66#define AUTO_MEASURE 0x1
67#define MEASURE_SIGNAL 0x1
68#define DETECT_SIGNAL (0x1 << 4)
69#define VALID_SIGNAL (0x1 << 8)
70#define MEASURE_INT_EN 0x1
71#define MEASURE_SIG_EN 0x1
72#define VALID_SIG_EN (0x1 << 8)
73#define DE_GLITCH_2 (0x2 << 29)
74#define START_SENSE (0x1 << 12)
75#define TSC_DISABLE (0x1 << 16)
76#define DETECT_MODE 0x2
77
78struct imx6ul_tsc {
79 struct device *dev;
80 struct input_dev *input;
81 void __iomem *tsc_regs;
82 void __iomem *adc_regs;
83 struct clk *tsc_clk;
84 struct clk *adc_clk;
85 struct gpio_desc *xnur_gpio;
86
87 int measure_delay_time;
88 int pre_charge_time;
89
90 struct completion completion;
91};
92
93/*
94 * TSC module need ADC to get the measure value. So
95 * before config TSC, we should initialize ADC module.
96 */
97static void imx6ul_adc_init(struct imx6ul_tsc *tsc)
98{
99 int adc_hc = 0;
100 int adc_gc;
101 int adc_gs;
102 int adc_cfg;
103 int timeout;
104
105 reinit_completion(&tsc->completion);
106
107 adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG);
108 adc_cfg |= ADC_12BIT_MODE | ADC_IPG_CLK;
109 adc_cfg |= ADC_CLK_DIV_8 | ADC_SHORT_SAMPLE_MODE;
110 adc_cfg &= ~ADC_HARDWARE_TRIGGER;
111 writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG);
112
113 /* enable calibration interrupt */
114 adc_hc |= ADC_AIEN;
115 adc_hc |= ADC_CONV_DISABLE;
116 writel(adc_hc, tsc->adc_regs + REG_ADC_HC0);
117
118 /* start ADC calibration */
119 adc_gc = readl(tsc->adc_regs + REG_ADC_GC);
120 adc_gc |= ADC_CAL;
121 writel(adc_gc, tsc->adc_regs + REG_ADC_GC);
122
123 timeout = wait_for_completion_timeout
124 (&tsc->completion, ADC_TIMEOUT);
125 if (timeout == 0)
126 dev_err(tsc->dev, "Timeout for adc calibration\n");
127
128 adc_gs = readl(tsc->adc_regs + REG_ADC_GS);
129 if (adc_gs & ADC_CALF)
130 dev_err(tsc->dev, "ADC calibration failed\n");
131
132 /* TSC need the ADC work in hardware trigger */
133 adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG);
134 adc_cfg |= ADC_HARDWARE_TRIGGER;
135 writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG);
136}
137
138/*
139 * This is a TSC workaround. Currently TSC misconnect two
140 * ADC channels, this function remap channel configure for
141 * hardware trigger.
142 */
143static void imx6ul_tsc_channel_config(struct imx6ul_tsc *tsc)
144{
145 int adc_hc0, adc_hc1, adc_hc2, adc_hc3, adc_hc4;
146
147 adc_hc0 = DISABLE_CONVERSION_INT;
148 writel(adc_hc0, tsc->adc_regs + REG_ADC_HC0);
149
150 adc_hc1 = DISABLE_CONVERSION_INT | SELECT_CHANNEL_4;
151 writel(adc_hc1, tsc->adc_regs + REG_ADC_HC1);
152
153 adc_hc2 = DISABLE_CONVERSION_INT;
154 writel(adc_hc2, tsc->adc_regs + REG_ADC_HC2);
155
156 adc_hc3 = DISABLE_CONVERSION_INT | SELECT_CHANNEL_1;
157 writel(adc_hc3, tsc->adc_regs + REG_ADC_HC3);
158
159 adc_hc4 = DISABLE_CONVERSION_INT;
160 writel(adc_hc4, tsc->adc_regs + REG_ADC_HC4);
161}
162
163/*
164 * TSC setting, confige the pre-charge time and measure delay time.
165 * different touch screen may need different pre-charge time and
166 * measure delay time.
167 */
168static void imx6ul_tsc_set(struct imx6ul_tsc *tsc)
169{
170 int basic_setting = 0;
171 int start;
172
173 basic_setting |= tsc->measure_delay_time << 8;
174 basic_setting |= DETECT_4_WIRE_MODE | AUTO_MEASURE;
175 writel(basic_setting, tsc->tsc_regs + REG_TSC_BASIC_SETING);
176
177 writel(DE_GLITCH_2, tsc->tsc_regs + REG_TSC_DEBUG_MODE2);
178
179 writel(tsc->pre_charge_time, tsc->tsc_regs + REG_TSC_PRE_CHARGE_TIME);
180 writel(MEASURE_INT_EN, tsc->tsc_regs + REG_TSC_INT_EN);
181 writel(MEASURE_SIG_EN | VALID_SIG_EN,
182 tsc->tsc_regs + REG_TSC_INT_SIG_EN);
183
184 /* start sense detection */
185 start = readl(tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
186 start |= START_SENSE;
187 start &= ~TSC_DISABLE;
188 writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
189}
190
191static void imx6ul_tsc_init(struct imx6ul_tsc *tsc)
192{
193 imx6ul_adc_init(tsc);
194 imx6ul_tsc_channel_config(tsc);
195 imx6ul_tsc_set(tsc);
196}
197
198static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc)
199{
200 int tsc_flow;
201 int adc_cfg;
202
203 /* TSC controller enters to idle status */
204 tsc_flow = readl(tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
205 tsc_flow |= TSC_DISABLE;
206 writel(tsc_flow, tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
207
208 /* ADC controller enters to stop mode */
209 adc_cfg = readl(tsc->adc_regs + REG_ADC_HC0);
210 adc_cfg |= ADC_CONV_DISABLE;
211 writel(adc_cfg, tsc->adc_regs + REG_ADC_HC0);
212}
213
214/* Delay some time (max 2ms), wait the pre-charge done. */
215static bool tsc_wait_detect_mode(struct imx6ul_tsc *tsc)
216{
217 unsigned long timeout = jiffies + msecs_to_jiffies(2);
218 int state_machine;
219 int debug_mode2;
220
221 do {
222 if (time_after(jiffies, timeout))
223 return false;
224
225 usleep_range(200, 400);
226 debug_mode2 = readl(tsc->tsc_regs + REG_TSC_DEBUG_MODE2);
227 state_machine = (debug_mode2 >> 20) & 0x7;
228 } while (state_machine != DETECT_MODE);
229
230 usleep_range(200, 400);
231 return true;
232}
233
234static irqreturn_t tsc_irq_fn(int irq, void *dev_id)
235{
236 struct imx6ul_tsc *tsc = dev_id;
237 int status;
238 int value;
239 int x, y;
240 int start;
241
242 status = readl(tsc->tsc_regs + REG_TSC_INT_STATUS);
243
244 /* write 1 to clear the bit measure-signal */
245 writel(MEASURE_SIGNAL | DETECT_SIGNAL,
246 tsc->tsc_regs + REG_TSC_INT_STATUS);
247
248 /* It's a HW self-clean bit. Set this bit and start sense detection */
249 start = readl(tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
250 start |= START_SENSE;
251 writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
252
253 if (status & MEASURE_SIGNAL) {
254 value = readl(tsc->tsc_regs + REG_TSC_MEASURE_VALUE);
255 x = (value >> 16) & 0x0fff;
256 y = value & 0x0fff;
257
258 /*
259 * In detect mode, we can get the xnur gpio value,
260 * otherwise assume contact is stiull active.
261 */
262 if (!tsc_wait_detect_mode(tsc) ||
263 gpiod_get_value_cansleep(tsc->xnur_gpio)) {
264 input_report_key(tsc->input, BTN_TOUCH, 1);
265 input_report_abs(tsc->input, ABS_X, x);
266 input_report_abs(tsc->input, ABS_Y, y);
267 } else {
268 input_report_key(tsc->input, BTN_TOUCH, 0);
269 }
270
271 input_sync(tsc->input);
272 }
273
274 return IRQ_HANDLED;
275}
276
277static irqreturn_t adc_irq_fn(int irq, void *dev_id)
278{
279 struct imx6ul_tsc *tsc = dev_id;
280 int coco;
281 int value;
282
283 coco = readl(tsc->adc_regs + REG_ADC_HS);
284 if (coco & 0x01) {
285 value = readl(tsc->adc_regs + REG_ADC_R0);
286 complete(&tsc->completion);
287 }
288
289 return IRQ_HANDLED;
290}
291
292static int imx6ul_tsc_open(struct input_dev *input_dev)
293{
294 struct imx6ul_tsc *tsc = input_get_drvdata(input_dev);
295 int err;
296
297 err = clk_prepare_enable(tsc->adc_clk);
298 if (err) {
299 dev_err(tsc->dev,
300 "Could not prepare or enable the adc clock: %d\n",
301 err);
302 return err;
303 }
304
305 err = clk_prepare_enable(tsc->tsc_clk);
306 if (err) {
307 dev_err(tsc->dev,
308 "Could not prepare or enable the tsc clock: %d\n",
309 err);
310 clk_disable_unprepare(tsc->adc_clk);
311 return err;
312 }
313
314 imx6ul_tsc_init(tsc);
315
316 return 0;
317}
318
319static void imx6ul_tsc_close(struct input_dev *input_dev)
320{
321 struct imx6ul_tsc *tsc = input_get_drvdata(input_dev);
322
323 imx6ul_tsc_disable(tsc);
324
325 clk_disable_unprepare(tsc->tsc_clk);
326 clk_disable_unprepare(tsc->adc_clk);
327}
328
329static int imx6ul_tsc_probe(struct platform_device *pdev)
330{
331 struct device_node *np = pdev->dev.of_node;
332 struct imx6ul_tsc *tsc;
333 struct input_dev *input_dev;
334 struct resource *tsc_mem;
335 struct resource *adc_mem;
336 int err;
337 int tsc_irq;
338 int adc_irq;
339
340 tsc = devm_kzalloc(&pdev->dev, sizeof(struct imx6ul_tsc), GFP_KERNEL);
341 if (!tsc)
342 return -ENOMEM;
343
344 input_dev = devm_input_allocate_device(&pdev->dev);
345 if (!input_dev)
346 return -ENOMEM;
347
348 input_dev->name = "iMX6UL TouchScreen Controller";
349 input_dev->id.bustype = BUS_HOST;
350
351 input_dev->open = imx6ul_tsc_open;
352 input_dev->close = imx6ul_tsc_close;
353
354 input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
355 input_set_abs_params(input_dev, ABS_X, 0, 0xFFF, 0, 0);
356 input_set_abs_params(input_dev, ABS_Y, 0, 0xFFF, 0, 0);
357
358 input_set_drvdata(input_dev, tsc);
359
360 tsc->dev = &pdev->dev;
361 tsc->input = input_dev;
362 init_completion(&tsc->completion);
363
364 tsc->xnur_gpio = devm_gpiod_get(&pdev->dev, "xnur", GPIOD_IN);
365 if (IS_ERR(tsc->xnur_gpio)) {
366 err = PTR_ERR(tsc->xnur_gpio);
367 dev_err(&pdev->dev,
368 "failed to request GPIO tsc_X- (xnur): %d\n", err);
369 return err;
370 }
371
372 tsc_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
373 tsc->tsc_regs = devm_ioremap_resource(&pdev->dev, tsc_mem);
374 if (IS_ERR(tsc->tsc_regs)) {
375 err = PTR_ERR(tsc->tsc_regs);
376 dev_err(&pdev->dev, "failed to remap tsc memory: %d\n", err);
377 return err;
378 }
379
380 adc_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
381 tsc->adc_regs = devm_ioremap_resource(&pdev->dev, adc_mem);
382 if (IS_ERR(tsc->adc_regs)) {
383 err = PTR_ERR(tsc->adc_regs);
384 dev_err(&pdev->dev, "failed to remap adc memory: %d\n", err);
385 return err;
386 }
387
388 tsc->tsc_clk = devm_clk_get(&pdev->dev, "tsc");
389 if (IS_ERR(tsc->tsc_clk)) {
390 err = PTR_ERR(tsc->tsc_clk);
391 dev_err(&pdev->dev, "failed getting tsc clock: %d\n", err);
392 return err;
393 }
394
395 tsc->adc_clk = devm_clk_get(&pdev->dev, "adc");
396 if (IS_ERR(tsc->adc_clk)) {
397 err = PTR_ERR(tsc->adc_clk);
398 dev_err(&pdev->dev, "failed getting adc clock: %d\n", err);
399 return err;
400 }
401
402 tsc_irq = platform_get_irq(pdev, 0);
403 if (tsc_irq < 0) {
404 dev_err(&pdev->dev, "no tsc irq resource?\n");
405 return tsc_irq;
406 }
407
408 adc_irq = platform_get_irq(pdev, 1);
409 if (adc_irq <= 0) {
410 dev_err(&pdev->dev, "no adc irq resource?\n");
411 return adc_irq;
412 }
413
414 err = devm_request_threaded_irq(tsc->dev, tsc_irq,
415 NULL, tsc_irq_fn, IRQF_ONESHOT,
416 dev_name(&pdev->dev), tsc);
417 if (err) {
418 dev_err(&pdev->dev,
419 "failed requesting tsc irq %d: %d\n",
420 tsc_irq, err);
421 return err;
422 }
423
424 err = devm_request_irq(tsc->dev, adc_irq, adc_irq_fn, 0,
425 dev_name(&pdev->dev), tsc);
426 if (err) {
427 dev_err(&pdev->dev,
428 "failed requesting adc irq %d: %d\n",
429 adc_irq, err);
430 return err;
431 }
432
433 err = of_property_read_u32(np, "measure-delay-time",
434 &tsc->measure_delay_time);
435 if (err)
436 tsc->measure_delay_time = 0xffff;
437
438 err = of_property_read_u32(np, "pre-charge-time",
439 &tsc->pre_charge_time);
440 if (err)
441 tsc->pre_charge_time = 0xfff;
442
443 err = input_register_device(tsc->input);
444 if (err) {
445 dev_err(&pdev->dev,
446 "failed to register input device: %d\n", err);
447 return err;
448 }
449
450 platform_set_drvdata(pdev, tsc);
451 return 0;
452}
453
454static int __maybe_unused imx6ul_tsc_suspend(struct device *dev)
455{
456 struct platform_device *pdev = to_platform_device(dev);
457 struct imx6ul_tsc *tsc = platform_get_drvdata(pdev);
458 struct input_dev *input_dev = tsc->input;
459
460 mutex_lock(&input_dev->mutex);
461
462 if (input_dev->users) {
463 imx6ul_tsc_disable(tsc);
464
465 clk_disable_unprepare(tsc->tsc_clk);
466 clk_disable_unprepare(tsc->adc_clk);
467 }
468
469 mutex_unlock(&input_dev->mutex);
470
471 return 0;
472}
473
474static int __maybe_unused imx6ul_tsc_resume(struct device *dev)
475{
476 struct platform_device *pdev = to_platform_device(dev);
477 struct imx6ul_tsc *tsc = platform_get_drvdata(pdev);
478 struct input_dev *input_dev = tsc->input;
479 int retval = 0;
480
481 mutex_lock(&input_dev->mutex);
482
483 if (input_dev->users) {
484 retval = clk_prepare_enable(tsc->adc_clk);
485 if (retval)
486 goto out;
487
488 retval = clk_prepare_enable(tsc->tsc_clk);
489 if (retval) {
490 clk_disable_unprepare(tsc->adc_clk);
491 goto out;
492 }
493
494 imx6ul_tsc_init(tsc);
495 }
496
497out:
498 mutex_unlock(&input_dev->mutex);
499 return retval;
500}
501
502static SIMPLE_DEV_PM_OPS(imx6ul_tsc_pm_ops,
503 imx6ul_tsc_suspend, imx6ul_tsc_resume);
504
505static const struct of_device_id imx6ul_tsc_match[] = {
506 { .compatible = "fsl,imx6ul-tsc", },
507 { /* sentinel */ }
508};
509MODULE_DEVICE_TABLE(of, imx6ul_tsc_match);
510
511static struct platform_driver imx6ul_tsc_driver = {
512 .driver = {
513 .name = "imx6ul-tsc",
514 .of_match_table = imx6ul_tsc_match,
515 .pm = &imx6ul_tsc_pm_ops,
516 },
517 .probe = imx6ul_tsc_probe,
518};
519module_platform_driver(imx6ul_tsc_driver);
520
521MODULE_AUTHOR("Haibo Chen <haibo.chen@freescale.com>");
522MODULE_DESCRIPTION("Freescale i.MX6UL Touchscreen controller driver");
523MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
index c0116994067d..485794376ee5 100644
--- a/drivers/input/touchscreen/sun4i-ts.c
+++ b/drivers/input/touchscreen/sun4i-ts.c
@@ -191,7 +191,7 @@ static void sun4i_ts_close(struct input_dev *dev)
191 writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC); 191 writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
192} 192}
193 193
194static int sun4i_get_temp(const struct sun4i_ts_data *ts, long *temp) 194static int sun4i_get_temp(const struct sun4i_ts_data *ts, int *temp)
195{ 195{
196 /* No temp_data until the first irq */ 196 /* No temp_data until the first irq */
197 if (ts->temp_data == -1) 197 if (ts->temp_data == -1)
@@ -202,7 +202,7 @@ static int sun4i_get_temp(const struct sun4i_ts_data *ts, long *temp)
202 return 0; 202 return 0;
203} 203}
204 204
205static int sun4i_get_tz_temp(void *data, long *temp) 205static int sun4i_get_tz_temp(void *data, int *temp)
206{ 206{
207 return sun4i_get_temp(data, temp); 207 return sun4i_get_temp(data, temp);
208} 208}
@@ -215,14 +215,14 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
215 char *buf) 215 char *buf)
216{ 216{
217 struct sun4i_ts_data *ts = dev_get_drvdata(dev); 217 struct sun4i_ts_data *ts = dev_get_drvdata(dev);
218 long temp; 218 int temp;
219 int error; 219 int error;
220 220
221 error = sun4i_get_temp(ts, &temp); 221 error = sun4i_get_temp(ts, &temp);
222 if (error) 222 if (error)
223 return error; 223 return error;
224 224
225 return sprintf(buf, "%ld\n", temp); 225 return sprintf(buf, "%d\n", temp);
226} 226}
227 227
228static ssize_t show_temp_label(struct device *dev, 228static ssize_t show_temp_label(struct device *dev,
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index 0717aa96ce39..9bc20e2119a3 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -135,8 +135,9 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
135static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, 135static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
136 struct seq_file *s) 136 struct seq_file *s)
137{ 137{
138 return seq_printf(s, "%08x %08x %01x\n", cr->cam, cr->ram, 138 seq_printf(s, "%08x %08x %01x\n", cr->cam, cr->ram,
139 (cr->cam & MMU_CAM_P) ? 1 : 0); 139 (cr->cam & MMU_CAM_P) ? 1 : 0);
140 return 0;
140} 141}
141 142
142static size_t omap_dump_tlb_entries(struct omap_iommu *obj, struct seq_file *s) 143static size_t omap_dump_tlb_entries(struct omap_iommu *obj, struct seq_file *s)
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index e9c6f2a5b52d..cd7d3bc78e34 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -65,12 +65,10 @@ static void combiner_unmask_irq(struct irq_data *data)
65 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET); 65 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
66} 66}
67 67
68static void combiner_handle_cascade_irq(unsigned int __irq, 68static void combiner_handle_cascade_irq(struct irq_desc *desc)
69 struct irq_desc *desc)
70{ 69{
71 struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc); 70 struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc);
72 struct irq_chip *chip = irq_desc_get_chip(desc); 71 struct irq_chip *chip = irq_desc_get_chip(desc);
73 unsigned int irq = irq_desc_get_irq(desc);
74 unsigned int cascade_irq, combiner_irq; 72 unsigned int cascade_irq, combiner_irq;
75 unsigned long status; 73 unsigned long status;
76 74
@@ -88,7 +86,7 @@ static void combiner_handle_cascade_irq(unsigned int __irq,
88 cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq); 86 cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq);
89 87
90 if (unlikely(!cascade_irq)) 88 if (unlikely(!cascade_irq))
91 handle_bad_irq(irq, desc); 89 handle_bad_irq(desc);
92 else 90 else
93 generic_handle_irq(cascade_irq); 91 generic_handle_irq(cascade_irq);
94 92
@@ -165,7 +163,7 @@ static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
165 163
166 irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq); 164 irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
167 irq_set_chip_data(irq, &combiner_data[hw >> 3]); 165 irq_set_chip_data(irq, &combiner_data[hw >> 3]);
168 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 166 irq_set_probe(irq);
169 167
170 return 0; 168 return 0;
171} 169}
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 39b72da0c143..655cb967a1f2 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -200,7 +200,6 @@ static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq,
200{ 200{
201 irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip, 201 irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip,
202 handle_simple_irq); 202 handle_simple_irq);
203 set_irq_flags(virq, IRQF_VALID);
204 203
205 return 0; 204 return 0;
206} 205}
@@ -317,7 +316,7 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
317 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, 316 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
318 handle_level_irq); 317 handle_level_irq);
319 } 318 }
320 set_irq_flags(virq, IRQF_VALID | IRQF_PROBE); 319 irq_set_probe(virq);
321 320
322 return 0; 321 return 0;
323} 322}
@@ -447,8 +446,7 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
447static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {} 446static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
448#endif 447#endif
449 448
450static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq, 449static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
451 struct irq_desc *desc)
452{ 450{
453 struct irq_chip *chip = irq_desc_get_chip(desc); 451 struct irq_chip *chip = irq_desc_get_chip(desc);
454 unsigned long irqmap, irqn, irqsrc, cpuid; 452 unsigned long irqmap, irqn, irqsrc, cpuid;
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index ed4ca9deca70..bf9cc5f2e839 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -96,7 +96,7 @@ struct armctrl_ic {
96static struct armctrl_ic intc __read_mostly; 96static struct armctrl_ic intc __read_mostly;
97static void __exception_irq_entry bcm2835_handle_irq( 97static void __exception_irq_entry bcm2835_handle_irq(
98 struct pt_regs *regs); 98 struct pt_regs *regs);
99static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc); 99static void bcm2836_chained_handle_irq(struct irq_desc *desc);
100 100
101static void armctrl_mask_irq(struct irq_data *d) 101static void armctrl_mask_irq(struct irq_data *d)
102{ 102{
@@ -166,7 +166,7 @@ static int __init armctrl_of_init(struct device_node *node,
166 BUG_ON(irq <= 0); 166 BUG_ON(irq <= 0);
167 irq_set_chip_and_handler(irq, &armctrl_chip, 167 irq_set_chip_and_handler(irq, &armctrl_chip,
168 handle_level_irq); 168 handle_level_irq);
169 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 169 irq_set_probe(irq);
170 } 170 }
171 } 171 }
172 172
@@ -245,7 +245,7 @@ static void __exception_irq_entry bcm2835_handle_irq(
245 handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); 245 handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs);
246} 246}
247 247
248static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc) 248static void bcm2836_chained_handle_irq(struct irq_desc *desc)
249{ 249{
250 u32 hwirq; 250 u32 hwirq;
251 251
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 409bdc6366c2..0fea985ef1dc 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -115,7 +115,7 @@ static inline void l1_writel(u32 val, void __iomem *reg)
115 writel(val, reg); 115 writel(val, reg);
116} 116}
117 117
118static void bcm7038_l1_irq_handle(unsigned int irq, struct irq_desc *desc) 118static void bcm7038_l1_irq_handle(struct irq_desc *desc)
119{ 119{
120 struct bcm7038_l1_chip *intc = irq_desc_get_handler_data(desc); 120 struct bcm7038_l1_chip *intc = irq_desc_get_handler_data(desc);
121 struct bcm7038_l1_cpu *cpu; 121 struct bcm7038_l1_cpu *cpu;
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index d3f976913a6f..61b18ab33ad9 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -56,7 +56,7 @@ struct bcm7120_l2_intc_data {
56 const __be32 *map_mask_prop; 56 const __be32 *map_mask_prop;
57}; 57};
58 58
59static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc) 59static void bcm7120_l2_intc_irq_handle(struct irq_desc *desc)
60{ 60{
61 struct bcm7120_l1_intc_data *data = irq_desc_get_handler_data(desc); 61 struct bcm7120_l1_intc_data *data = irq_desc_get_handler_data(desc);
62 struct bcm7120_l2_intc_data *b = data->b; 62 struct bcm7120_l2_intc_data *b = data->b;
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index aedda06191eb..65cd341f331a 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -49,13 +49,12 @@ struct brcmstb_l2_intc_data {
49 u32 saved_mask; /* for suspend/resume */ 49 u32 saved_mask; /* for suspend/resume */
50}; 50};
51 51
52static void brcmstb_l2_intc_irq_handle(unsigned int __irq, 52static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
53 struct irq_desc *desc)
54{ 53{
55 struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc); 54 struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc);
56 struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0); 55 struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0);
57 struct irq_chip *chip = irq_desc_get_chip(desc); 56 struct irq_chip *chip = irq_desc_get_chip(desc);
58 unsigned int irq = irq_desc_get_irq(desc); 57 unsigned int irq;
59 u32 status; 58 u32 status;
60 59
61 chained_irq_enter(chip, desc); 60 chained_irq_enter(chip, desc);
@@ -65,7 +64,7 @@ static void brcmstb_l2_intc_irq_handle(unsigned int __irq,
65 64
66 if (status == 0) { 65 if (status == 0) {
67 raw_spin_lock(&desc->lock); 66 raw_spin_lock(&desc->lock);
68 handle_bad_irq(irq, desc); 67 handle_bad_irq(desc);
69 raw_spin_unlock(&desc->lock); 68 raw_spin_unlock(&desc->lock);
70 goto out; 69 goto out;
71 } 70 }
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index 2dd929eed9e0..eb5eb0cd414d 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -132,14 +132,14 @@ static int __init clps711x_intc_irq_map(struct irq_domain *h, unsigned int virq,
132 irq_hw_number_t hw) 132 irq_hw_number_t hw)
133{ 133{
134 irq_flow_handler_t handler = handle_level_irq; 134 irq_flow_handler_t handler = handle_level_irq;
135 unsigned int flags = IRQF_VALID | IRQF_PROBE; 135 unsigned int flags = 0;
136 136
137 if (!clps711x_irqs[hw].flags) 137 if (!clps711x_irqs[hw].flags)
138 return 0; 138 return 0;
139 139
140 if (clps711x_irqs[hw].flags & CLPS711X_FLAG_FIQ) { 140 if (clps711x_irqs[hw].flags & CLPS711X_FLAG_FIQ) {
141 handler = handle_bad_irq; 141 handler = handle_bad_irq;
142 flags |= IRQF_NOAUTOEN; 142 flags |= IRQ_NOAUTOEN;
143 } else if (clps711x_irqs[hw].eoi) { 143 } else if (clps711x_irqs[hw].eoi) {
144 handler = handle_fasteoi_irq; 144 handler = handle_fasteoi_irq;
145 } 145 }
@@ -149,7 +149,7 @@ static int __init clps711x_intc_irq_map(struct irq_domain *h, unsigned int virq,
149 writel_relaxed(0, clps711x_intc->base + clps711x_irqs[hw].eoi); 149 writel_relaxed(0, clps711x_intc->base + clps711x_irqs[hw].eoi);
150 150
151 irq_set_chip_and_handler(virq, &clps711x_intc_chip, handler); 151 irq_set_chip_and_handler(virq, &clps711x_intc_chip, handler);
152 set_irq_flags(virq, flags); 152 irq_modify_status(virq, IRQ_NOPROBE, flags);
153 153
154 return 0; 154 return 0;
155} 155}
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
index efd95d9955e7..052f266364c0 100644
--- a/drivers/irqchip/irq-dw-apb-ictl.c
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -26,7 +26,7 @@
26#define APB_INT_FINALSTATUS_H 0x34 26#define APB_INT_FINALSTATUS_H 0x34
27#define APB_INT_BASE_OFFSET 0x04 27#define APB_INT_BASE_OFFSET 0x04
28 28
29static void dw_apb_ictl_handler(unsigned int irq, struct irq_desc *desc) 29static void dw_apb_ictl_handler(struct irq_desc *desc)
30{ 30{
31 struct irq_domain *d = irq_desc_get_handler_data(desc); 31 struct irq_domain *d = irq_desc_get_handler_data(desc);
32 struct irq_chip *chip = irq_desc_get_chip(desc); 32 struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index db04fc1f56b2..12985daa66ab 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -95,8 +95,8 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
95 struct v2m_data *v2m = irq_data_get_irq_chip_data(data); 95 struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
96 phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS; 96 phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS;
97 97
98 msg->address_hi = (u32) (addr >> 32); 98 msg->address_hi = upper_32_bits(addr);
99 msg->address_lo = (u32) (addr); 99 msg->address_lo = lower_32_bits(addr);
100 msg->data = data->hwirq; 100 msg->data = data->hwirq;
101} 101}
102 102
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 26b55c53755f..ac7ae2b3cb83 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -898,8 +898,10 @@ retry_baser:
898 * non-cacheable as well. 898 * non-cacheable as well.
899 */ 899 */
900 shr = tmp & GITS_BASER_SHAREABILITY_MASK; 900 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
901 if (!shr) 901 if (!shr) {
902 cache = GITS_BASER_nC; 902 cache = GITS_BASER_nC;
903 __flush_dcache_area(base, alloc_size);
904 }
903 goto retry_baser; 905 goto retry_baser;
904 } 906 }
905 907
@@ -1140,6 +1142,8 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1140 return NULL; 1142 return NULL;
1141 } 1143 }
1142 1144
1145 __flush_dcache_area(itt, sz);
1146
1143 dev->its = its; 1147 dev->its = its;
1144 dev->itt = itt; 1148 dev->itt = itt;
1145 dev->nr_ites = nr_ites; 1149 dev->nr_ites = nr_ites;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 7deed6ef54c2..36ecfc870e5a 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -70,11 +70,6 @@ static inline int gic_irq_in_rdist(struct irq_data *d)
70 return gic_irq(d) < 32; 70 return gic_irq(d) < 32;
71} 71}
72 72
73static inline bool forwarded_irq(struct irq_data *d)
74{
75 return d->handler_data != NULL;
76}
77
78static inline void __iomem *gic_dist_base(struct irq_data *d) 73static inline void __iomem *gic_dist_base(struct irq_data *d)
79{ 74{
80 if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */ 75 if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
@@ -249,7 +244,7 @@ static void gic_eoimode1_mask_irq(struct irq_data *d)
249 * disabled/masked will not get "stuck", because there is 244 * disabled/masked will not get "stuck", because there is
250 * noone to deactivate it (guest is being terminated). 245 * noone to deactivate it (guest is being terminated).
251 */ 246 */
252 if (forwarded_irq(d)) 247 if (irqd_is_forwarded_to_vcpu(d))
253 gic_poke_irq(d, GICD_ICACTIVER); 248 gic_poke_irq(d, GICD_ICACTIVER);
254} 249}
255 250
@@ -324,7 +319,7 @@ static void gic_eoimode1_eoi_irq(struct irq_data *d)
324 * No need to deactivate an LPI, or an interrupt that 319 * No need to deactivate an LPI, or an interrupt that
325 * is is getting forwarded to a vcpu. 320 * is is getting forwarded to a vcpu.
326 */ 321 */
327 if (gic_irq(d) >= 8192 || forwarded_irq(d)) 322 if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
328 return; 323 return;
329 gic_write_dir(gic_irq(d)); 324 gic_write_dir(gic_irq(d));
330} 325}
@@ -357,7 +352,10 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
357 352
358static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) 353static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
359{ 354{
360 d->handler_data = vcpu; 355 if (vcpu)
356 irqd_set_forwarded_to_vcpu(d);
357 else
358 irqd_clr_forwarded_to_vcpu(d);
361 return 0; 359 return 0;
362} 360}
363 361
@@ -754,13 +752,13 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
754 irq_set_percpu_devid(irq); 752 irq_set_percpu_devid(irq);
755 irq_domain_set_info(d, irq, hw, chip, d->host_data, 753 irq_domain_set_info(d, irq, hw, chip, d->host_data,
756 handle_percpu_devid_irq, NULL, NULL); 754 handle_percpu_devid_irq, NULL, NULL);
757 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); 755 irq_set_status_flags(irq, IRQ_NOAUTOEN);
758 } 756 }
759 /* SPIs */ 757 /* SPIs */
760 if (hw >= 32 && hw < gic_data.irq_nr) { 758 if (hw >= 32 && hw < gic_data.irq_nr) {
761 irq_domain_set_info(d, irq, hw, chip, d->host_data, 759 irq_domain_set_info(d, irq, hw, chip, d->host_data,
762 handle_fasteoi_irq, NULL, NULL); 760 handle_fasteoi_irq, NULL, NULL);
763 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 761 irq_set_probe(irq);
764 } 762 }
765 /* LPIs */ 763 /* LPIs */
766 if (hw >= 8192 && hw < GIC_ID_NR) { 764 if (hw >= 8192 && hw < GIC_ID_NR) {
@@ -768,7 +766,6 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
768 return -EPERM; 766 return -EPERM;
769 irq_domain_set_info(d, irq, hw, chip, d->host_data, 767 irq_domain_set_info(d, irq, hw, chip, d->host_data,
770 handle_fasteoi_irq, NULL, NULL); 768 handle_fasteoi_irq, NULL, NULL);
771 set_irq_flags(irq, IRQF_VALID);
772 } 769 }
773 770
774 return 0; 771 return 0;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index e6b7ed537952..982c09c2d791 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -145,29 +145,10 @@ static inline bool cascading_gic_irq(struct irq_data *d)
145 void *data = irq_data_get_irq_handler_data(d); 145 void *data = irq_data_get_irq_handler_data(d);
146 146
147 /* 147 /*
148 * If handler_data pointing to one of the secondary GICs, then 148 * If handler_data is set, this is a cascading interrupt, and
149 * this is a cascading interrupt, and it cannot possibly be 149 * it cannot possibly be forwarded.
150 * forwarded.
151 */ 150 */
152 if (data >= (void *)(gic_data + 1) && 151 return data != NULL;
153 data < (void *)(gic_data + MAX_GIC_NR))
154 return true;
155
156 return false;
157}
158
159static inline bool forwarded_irq(struct irq_data *d)
160{
161 /*
162 * A forwarded interrupt:
163 * - is on the primary GIC
164 * - has its handler_data set to a value
165 * - that isn't a secondary GIC
166 */
167 if (d->handler_data && !cascading_gic_irq(d))
168 return true;
169
170 return false;
171} 152}
172 153
173/* 154/*
@@ -201,7 +182,7 @@ static void gic_eoimode1_mask_irq(struct irq_data *d)
201 * disabled/masked will not get "stuck", because there is 182 * disabled/masked will not get "stuck", because there is
202 * noone to deactivate it (guest is being terminated). 183 * noone to deactivate it (guest is being terminated).
203 */ 184 */
204 if (forwarded_irq(d)) 185 if (irqd_is_forwarded_to_vcpu(d))
205 gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR); 186 gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR);
206} 187}
207 188
@@ -218,7 +199,7 @@ static void gic_eoi_irq(struct irq_data *d)
218static void gic_eoimode1_eoi_irq(struct irq_data *d) 199static void gic_eoimode1_eoi_irq(struct irq_data *d)
219{ 200{
220 /* Do not deactivate an IRQ forwarded to a vcpu. */ 201 /* Do not deactivate an IRQ forwarded to a vcpu. */
221 if (forwarded_irq(d)) 202 if (irqd_is_forwarded_to_vcpu(d))
222 return; 203 return;
223 204
224 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE); 205 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
@@ -296,7 +277,10 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
296 if (cascading_gic_irq(d)) 277 if (cascading_gic_irq(d))
297 return -EINVAL; 278 return -EINVAL;
298 279
299 d->handler_data = vcpu; 280 if (vcpu)
281 irqd_set_forwarded_to_vcpu(d);
282 else
283 irqd_clr_forwarded_to_vcpu(d);
300 return 0; 284 return 0;
301} 285}
302 286
@@ -357,7 +341,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
357 } while (1); 341 } while (1);
358} 342}
359 343
360static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) 344static void gic_handle_cascade_irq(struct irq_desc *desc)
361{ 345{
362 struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc); 346 struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc);
363 struct irq_chip *chip = irq_desc_get_chip(desc); 347 struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -376,7 +360,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
376 360
377 cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); 361 cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
378 if (unlikely(gic_irq < 32 || gic_irq > 1020)) 362 if (unlikely(gic_irq < 32 || gic_irq > 1020))
379 handle_bad_irq(cascade_irq, desc); 363 handle_bad_irq(desc);
380 else 364 else
381 generic_handle_irq(cascade_irq); 365 generic_handle_irq(cascade_irq);
382 366
@@ -906,11 +890,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
906 irq_set_percpu_devid(irq); 890 irq_set_percpu_devid(irq);
907 irq_domain_set_info(d, irq, hw, chip, d->host_data, 891 irq_domain_set_info(d, irq, hw, chip, d->host_data,
908 handle_percpu_devid_irq, NULL, NULL); 892 handle_percpu_devid_irq, NULL, NULL);
909 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); 893 irq_set_status_flags(irq, IRQ_NOAUTOEN);
910 } else { 894 } else {
911 irq_domain_set_info(d, irq, hw, chip, d->host_data, 895 irq_domain_set_info(d, irq, hw, chip, d->host_data,
912 handle_fasteoi_irq, NULL, NULL); 896 handle_fasteoi_irq, NULL, NULL);
913 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 897 irq_set_probe(irq);
914 } 898 }
915 return 0; 899 return 0;
916} 900}
@@ -1119,12 +1103,49 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
1119#ifdef CONFIG_OF 1103#ifdef CONFIG_OF
1120static int gic_cnt __initdata; 1104static int gic_cnt __initdata;
1121 1105
1106static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
1107{
1108 struct resource cpuif_res;
1109
1110 of_address_to_resource(node, 1, &cpuif_res);
1111
1112 if (!is_hyp_mode_available())
1113 return false;
1114 if (resource_size(&cpuif_res) < SZ_8K)
1115 return false;
1116 if (resource_size(&cpuif_res) == SZ_128K) {
1117 u32 val_low, val_high;
1118
1119 /*
1120 * Verify that we have the first 4kB of a GIC400
1121 * aliased over the first 64kB by checking the
1122 * GICC_IIDR register on both ends.
1123 */
1124 val_low = readl_relaxed(*base + GIC_CPU_IDENT);
1125 val_high = readl_relaxed(*base + GIC_CPU_IDENT + 0xf000);
1126 if ((val_low & 0xffff0fff) != 0x0202043B ||
1127 val_low != val_high)
1128 return false;
1129
1130 /*
1131 * Move the base up by 60kB, so that we have a 8kB
1132 * contiguous region, which allows us to use GICC_DIR
1133 * at its normal offset. Please pass me that bucket.
1134 */
1135 *base += 0xf000;
1136 cpuif_res.start += 0xf000;
1137 pr_warn("GIC: Adjusting CPU interface base to %pa",
1138 &cpuif_res.start);
1139 }
1140
1141 return true;
1142}
1143
1122static int __init 1144static int __init
1123gic_of_init(struct device_node *node, struct device_node *parent) 1145gic_of_init(struct device_node *node, struct device_node *parent)
1124{ 1146{
1125 void __iomem *cpu_base; 1147 void __iomem *cpu_base;
1126 void __iomem *dist_base; 1148 void __iomem *dist_base;
1127 struct resource cpu_res;
1128 u32 percpu_offset; 1149 u32 percpu_offset;
1129 int irq; 1150 int irq;
1130 1151
@@ -1137,14 +1158,11 @@ gic_of_init(struct device_node *node, struct device_node *parent)
1137 cpu_base = of_iomap(node, 1); 1158 cpu_base = of_iomap(node, 1);
1138 WARN(!cpu_base, "unable to map gic cpu registers\n"); 1159 WARN(!cpu_base, "unable to map gic cpu registers\n");
1139 1160
1140 of_address_to_resource(node, 1, &cpu_res);
1141
1142 /* 1161 /*
1143 * Disable split EOI/Deactivate if either HYP is not available 1162 * Disable split EOI/Deactivate if either HYP is not available
1144 * or the CPU interface is too small. 1163 * or the CPU interface is too small.
1145 */ 1164 */
1146 if (gic_cnt == 0 && (!is_hyp_mode_available() || 1165 if (gic_cnt == 0 && !gic_check_eoimode(node, &cpu_base))
1147 resource_size(&cpu_res) < SZ_8K))
1148 static_key_slow_dec(&supports_deactivate); 1166 static_key_slow_dec(&supports_deactivate);
1149 1167
1150 if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) 1168 if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index a0128c7c98dd..8f3ca8f3a62b 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -307,11 +307,11 @@ static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq,
307 irq_set_percpu_devid(irq); 307 irq_set_percpu_devid(irq);
308 irq_set_chip_and_handler(irq, &hip04_irq_chip, 308 irq_set_chip_and_handler(irq, &hip04_irq_chip,
309 handle_percpu_devid_irq); 309 handle_percpu_devid_irq);
310 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); 310 irq_set_status_flags(irq, IRQ_NOAUTOEN);
311 } else { 311 } else {
312 irq_set_chip_and_handler(irq, &hip04_irq_chip, 312 irq_set_chip_and_handler(irq, &hip04_irq_chip,
313 handle_fasteoi_irq); 313 handle_fasteoi_irq);
314 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 314 irq_set_probe(irq);
315 } 315 }
316 irq_set_chip_data(irq, d->host_data); 316 irq_set_chip_data(irq, d->host_data);
317 return 0; 317 return 0;
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index 4836102ba312..e484fd255321 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -352,7 +352,7 @@ void __init init_i8259_irqs(void)
352 __init_i8259_irqs(NULL); 352 __init_i8259_irqs(NULL);
353} 353}
354 354
355static void i8259_irq_dispatch(unsigned int __irq, struct irq_desc *desc) 355static void i8259_irq_dispatch(struct irq_desc *desc)
356{ 356{
357 struct irq_domain *domain = irq_desc_get_handler_data(desc); 357 struct irq_domain *domain = irq_desc_get_handler_data(desc);
358 int hwirq = i8259_irq(); 358 int hwirq = i8259_irq();
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
index 841604b81004..c02d29c9dc05 100644
--- a/drivers/irqchip/irq-imgpdc.c
+++ b/drivers/irqchip/irq-imgpdc.c
@@ -218,7 +218,7 @@ static int pdc_irq_set_wake(struct irq_data *data, unsigned int on)
218 return 0; 218 return 0;
219} 219}
220 220
221static void pdc_intc_perip_isr(unsigned int __irq, struct irq_desc *desc) 221static void pdc_intc_perip_isr(struct irq_desc *desc)
222{ 222{
223 unsigned int irq = irq_desc_get_irq(desc); 223 unsigned int irq = irq_desc_get_irq(desc);
224 struct pdc_intc_priv *priv; 224 struct pdc_intc_priv *priv;
@@ -240,7 +240,7 @@ found:
240 generic_handle_irq(irq_no); 240 generic_handle_irq(irq_no);
241} 241}
242 242
243static void pdc_intc_syswake_isr(unsigned int irq, struct irq_desc *desc) 243static void pdc_intc_syswake_isr(struct irq_desc *desc)
244{ 244{
245 struct pdc_intc_priv *priv; 245 struct pdc_intc_priv *priv;
246 unsigned int syswake, irq_no; 246 unsigned int syswake, irq_no;
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index c1517267b5db..deb89d63a728 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -83,7 +83,7 @@ static void keystone_irq_ack(struct irq_data *d)
83 /* nothing to do here */ 83 /* nothing to do here */
84} 84}
85 85
86static void keystone_irq_handler(unsigned __irq, struct irq_desc *desc) 86static void keystone_irq_handler(struct irq_desc *desc)
87{ 87{
88 unsigned int irq = irq_desc_get_irq(desc); 88 unsigned int irq = irq_desc_get_irq(desc);
89 struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc); 89 struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc);
@@ -127,7 +127,7 @@ static int keystone_irq_map(struct irq_domain *h, unsigned int virq,
127 127
128 irq_set_chip_data(virq, kirq); 128 irq_set_chip_data(virq, kirq);
129 irq_set_chip_and_handler(virq, &kirq->chip, handle_level_irq); 129 irq_set_chip_and_handler(virq, &kirq->chip, handle_level_irq);
130 set_irq_flags(virq, IRQF_VALID | IRQF_PROBE); 130 irq_set_probe(virq);
131 return 0; 131 return 0;
132} 132}
133 133
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
index 5f4c52928d16..8c38b3d92e1c 100644
--- a/drivers/irqchip/irq-metag-ext.c
+++ b/drivers/irqchip/irq-metag-ext.c
@@ -446,7 +446,7 @@ static int meta_intc_irq_set_type(struct irq_data *data, unsigned int flow_type)
446 * Whilst using TR2 to detect external interrupts is a software convention it is 446 * Whilst using TR2 to detect external interrupts is a software convention it is
447 * (hopefully) unlikely to change. 447 * (hopefully) unlikely to change.
448 */ 448 */
449static void meta_intc_irq_demux(unsigned int irq, struct irq_desc *desc) 449static void meta_intc_irq_demux(struct irq_desc *desc)
450{ 450{
451 struct meta_intc_priv *priv = &meta_intc_priv; 451 struct meta_intc_priv *priv = &meta_intc_priv;
452 irq_hw_number_t hw; 452 irq_hw_number_t hw;
diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c
index 3d23ce3edb5c..a5f053bd2f44 100644
--- a/drivers/irqchip/irq-metag.c
+++ b/drivers/irqchip/irq-metag.c
@@ -220,7 +220,7 @@ static int metag_internal_irq_set_affinity(struct irq_data *data,
220 * occurred. It is this function's job to demux this irq and 220 * occurred. It is this function's job to demux this irq and
221 * figure out exactly which trigger needs servicing. 221 * figure out exactly which trigger needs servicing.
222 */ 222 */
223static void metag_internal_irq_demux(unsigned int irq, struct irq_desc *desc) 223static void metag_internal_irq_demux(struct irq_desc *desc)
224{ 224{
225 struct metag_internal_irq_priv *priv = irq_desc_get_handler_data(desc); 225 struct metag_internal_irq_priv *priv = irq_desc_get_handler_data(desc);
226 irq_hw_number_t hw; 226 irq_hw_number_t hw;
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 1764bcf8ee6b..af2f16bb8a94 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -546,7 +546,7 @@ static void __gic_irq_dispatch(void)
546 gic_handle_shared_int(false); 546 gic_handle_shared_int(false);
547} 547}
548 548
549static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc) 549static void gic_irq_dispatch(struct irq_desc *desc)
550{ 550{
551 gic_handle_local_int(true); 551 gic_handle_local_int(true);
552 gic_handle_shared_int(true); 552 gic_handle_shared_int(true);
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 781ed6e71dbb..013fc9659a84 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -129,7 +129,7 @@ struct irq_chip icu_irq_chip = {
129 .irq_unmask = icu_unmask_irq, 129 .irq_unmask = icu_unmask_irq,
130}; 130};
131 131
132static void icu_mux_irq_demux(unsigned int __irq, struct irq_desc *desc) 132static void icu_mux_irq_demux(struct irq_desc *desc)
133{ 133{
134 unsigned int irq = irq_desc_get_irq(desc); 134 unsigned int irq = irq_desc_get_irq(desc);
135 struct irq_domain *domain; 135 struct irq_domain *domain;
@@ -164,7 +164,6 @@ static int mmp_irq_domain_map(struct irq_domain *d, unsigned int irq,
164 irq_hw_number_t hw) 164 irq_hw_number_t hw)
165{ 165{
166 irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq); 166 irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
167 set_irq_flags(irq, IRQF_VALID);
168 return 0; 167 return 0;
169} 168}
170 169
@@ -234,7 +233,6 @@ void __init icu_init_irq(void)
234 for (irq = 0; irq < 64; irq++) { 233 for (irq = 0; irq < 64; irq++) {
235 icu_mask_irq(irq_get_irq_data(irq)); 234 icu_mask_irq(irq_get_irq_data(irq));
236 irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq); 235 irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
237 set_irq_flags(irq, IRQF_VALID);
238 } 236 }
239 irq_set_default_host(icu_data[0].domain); 237 irq_set_default_host(icu_data[0].domain);
240 set_handle_irq(mmp_handle_irq); 238 set_handle_irq(mmp_handle_irq);
@@ -337,7 +335,6 @@ void __init mmp2_init_icu(void)
337 irq_set_chip_and_handler(irq, &icu_irq_chip, 335 irq_set_chip_and_handler(irq, &icu_irq_chip,
338 handle_level_irq); 336 handle_level_irq);
339 } 337 }
340 set_irq_flags(irq, IRQF_VALID);
341 } 338 }
342 irq_set_default_host(icu_data[0].domain); 339 irq_set_default_host(icu_data[0].domain);
343 set_handle_irq(mmp2_handle_irq); 340 set_handle_irq(mmp2_handle_irq);
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index 1faf812f3dc8..604df63e2edf 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -84,7 +84,6 @@ static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq,
84 irq_hw_number_t hw) 84 irq_hw_number_t hw)
85{ 85{
86 irq_set_chip_and_handler(virq, &mxs_icoll_chip, handle_level_irq); 86 irq_set_chip_and_handler(virq, &mxs_icoll_chip, handle_level_irq);
87 set_irq_flags(virq, IRQF_VALID);
88 87
89 return 0; 88 return 0;
90} 89}
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index 5ea999a724b5..be4c5a8c9659 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -106,7 +106,7 @@ IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
106#define ORION_BRIDGE_IRQ_CAUSE 0x00 106#define ORION_BRIDGE_IRQ_CAUSE 0x00
107#define ORION_BRIDGE_IRQ_MASK 0x04 107#define ORION_BRIDGE_IRQ_MASK 0x04
108 108
109static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc) 109static void orion_bridge_irq_handler(struct irq_desc *desc)
110{ 110{
111 struct irq_domain *d = irq_desc_get_handler_data(desc); 111 struct irq_domain *d = irq_desc_get_handler_data(desc);
112 112
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 0670ab4e3897..9525335723f6 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -283,6 +283,9 @@ static int intc_irqpin_irq_set_type(struct irq_data *d, unsigned int type)
283static int intc_irqpin_irq_set_wake(struct irq_data *d, unsigned int on) 283static int intc_irqpin_irq_set_wake(struct irq_data *d, unsigned int on)
284{ 284{
285 struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); 285 struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
286 int hw_irq = irqd_to_hwirq(d);
287
288 irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
286 289
287 if (!p->clk) 290 if (!p->clk)
288 return 0; 291 return 0;
@@ -332,6 +335,12 @@ static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id)
332 return status; 335 return status;
333} 336}
334 337
338/*
339 * This lock class tells lockdep that INTC External IRQ Pin irqs are in a
340 * different category than their parents, so it won't report false recursion.
341 */
342static struct lock_class_key intc_irqpin_irq_lock_class;
343
335static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq, 344static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
336 irq_hw_number_t hw) 345 irq_hw_number_t hw)
337{ 346{
@@ -342,8 +351,8 @@ static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
342 351
343 intc_irqpin_dbg(&p->irq[hw], "map"); 352 intc_irqpin_dbg(&p->irq[hw], "map");
344 irq_set_chip_data(virq, h->host_data); 353 irq_set_chip_data(virq, h->host_data);
354 irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class);
345 irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq); 355 irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
346 set_irq_flags(virq, IRQF_VALID); /* kill me now */
347 return 0; 356 return 0;
348} 357}
349 358
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index 2aa3add711a6..35bf97ba4a3d 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -121,6 +121,9 @@ static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
121static int irqc_irq_set_wake(struct irq_data *d, unsigned int on) 121static int irqc_irq_set_wake(struct irq_data *d, unsigned int on)
122{ 122{
123 struct irqc_priv *p = irq_data_get_irq_chip_data(d); 123 struct irqc_priv *p = irq_data_get_irq_chip_data(d);
124 int hw_irq = irqd_to_hwirq(d);
125
126 irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
124 127
125 if (!p->clk) 128 if (!p->clk)
126 return 0; 129 return 0;
@@ -150,6 +153,12 @@ static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
150 return IRQ_NONE; 153 return IRQ_NONE;
151} 154}
152 155
156/*
157 * This lock class tells lockdep that IRQC irqs are in a different
158 * category than their parents, so it won't report false recursion.
159 */
160static struct lock_class_key irqc_irq_lock_class;
161
153static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq, 162static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq,
154 irq_hw_number_t hw) 163 irq_hw_number_t hw)
155{ 164{
@@ -157,6 +166,7 @@ static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq,
157 166
158 irqc_dbg(&p->irq[hw], "map"); 167 irqc_dbg(&p->irq[hw], "map");
159 irq_set_chip_data(virq, h->host_data); 168 irq_set_chip_data(virq, h->host_data);
169 irq_set_lockdep_class(virq, &irqc_irq_lock_class);
160 irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq); 170 irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
161 return 0; 171 return 0;
162} 172}
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c
index 506d9f20ca51..7154b011ddd2 100644
--- a/drivers/irqchip/irq-s3c24xx.c
+++ b/drivers/irqchip/irq-s3c24xx.c
@@ -298,7 +298,7 @@ static struct irq_chip s3c_irq_eint0t4 = {
298 .irq_set_type = s3c_irqext0_type, 298 .irq_set_type = s3c_irqext0_type,
299}; 299};
300 300
301static void s3c_irq_demux(unsigned int __irq, struct irq_desc *desc) 301static void s3c_irq_demux(struct irq_desc *desc)
302{ 302{
303 struct irq_chip *chip = irq_desc_get_chip(desc); 303 struct irq_chip *chip = irq_desc_get_chip(desc);
304 struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc); 304 struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc);
@@ -466,13 +466,11 @@ static int s3c24xx_irq_map(struct irq_domain *h, unsigned int virq,
466 466
467 irq_set_chip_data(virq, irq_data); 467 irq_set_chip_data(virq, irq_data);
468 468
469 set_irq_flags(virq, IRQF_VALID);
470
471 if (parent_intc && irq_data->type != S3C_IRQTYPE_NONE) { 469 if (parent_intc && irq_data->type != S3C_IRQTYPE_NONE) {
472 if (irq_data->parent_irq > 31) { 470 if (irq_data->parent_irq > 31) {
473 pr_err("irq-s3c24xx: parent irq %lu is out of range\n", 471 pr_err("irq-s3c24xx: parent irq %lu is out of range\n",
474 irq_data->parent_irq); 472 irq_data->parent_irq);
475 goto err; 473 return -EINVAL;
476 } 474 }
477 475
478 parent_irq_data = &parent_intc->irqs[irq_data->parent_irq]; 476 parent_irq_data = &parent_intc->irqs[irq_data->parent_irq];
@@ -485,18 +483,12 @@ static int s3c24xx_irq_map(struct irq_domain *h, unsigned int virq,
485 if (!irqno) { 483 if (!irqno) {
486 pr_err("irq-s3c24xx: could not find mapping for parent irq %lu\n", 484 pr_err("irq-s3c24xx: could not find mapping for parent irq %lu\n",
487 irq_data->parent_irq); 485 irq_data->parent_irq);
488 goto err; 486 return -EINVAL;
489 } 487 }
490 irq_set_chained_handler(irqno, s3c_irq_demux); 488 irq_set_chained_handler(irqno, s3c_irq_demux);
491 } 489 }
492 490
493 return 0; 491 return 0;
494
495err:
496 set_irq_flags(virq, 0);
497
498 /* the only error can result from bad mapping data*/
499 return -EINVAL;
500} 492}
501 493
502static const struct irq_domain_ops s3c24xx_irq_ops = { 494static const struct irq_domain_ops s3c24xx_irq_ops = {
@@ -1174,8 +1166,6 @@ static int s3c24xx_irq_map_of(struct irq_domain *h, unsigned int virq,
1174 1166
1175 irq_set_chip_data(virq, irq_data); 1167 irq_set_chip_data(virq, irq_data);
1176 1168
1177 set_irq_flags(virq, IRQF_VALID);
1178
1179 return 0; 1169 return 0;
1180} 1170}
1181 1171
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index 4ad3e7c69aa7..0704362f4c82 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -83,7 +83,7 @@ static int sun4i_irq_map(struct irq_domain *d, unsigned int virq,
83 irq_hw_number_t hw) 83 irq_hw_number_t hw)
84{ 84{
85 irq_set_chip_and_handler(virq, &sun4i_irq_chip, handle_fasteoi_irq); 85 irq_set_chip_and_handler(virq, &sun4i_irq_chip, handle_fasteoi_irq);
86 set_irq_flags(virq, IRQF_VALID | IRQF_PROBE); 86 irq_set_probe(virq);
87 87
88 return 0; 88 return 0;
89} 89}
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 772a82cacbf7..c143dd58410c 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -58,7 +58,7 @@ static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off)
58 return irq_reg_readl(gc, off); 58 return irq_reg_readl(gc, off);
59} 59}
60 60
61static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc) 61static void sunxi_sc_nmi_handle_irq(struct irq_desc *desc)
62{ 62{
63 struct irq_domain *domain = irq_desc_get_handler_data(desc); 63 struct irq_domain *domain = irq_desc_get_handler_data(desc);
64 struct irq_chip *chip = irq_desc_get_chip(desc); 64 struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c
index 331829661366..848d782a2a3b 100644
--- a/drivers/irqchip/irq-tb10x.c
+++ b/drivers/irqchip/irq-tb10x.c
@@ -97,7 +97,7 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
97 return IRQ_SET_MASK_OK; 97 return IRQ_SET_MASK_OK;
98} 98}
99 99
100static void tb10x_irq_cascade(unsigned int __irq, struct irq_desc *desc) 100static void tb10x_irq_cascade(struct irq_desc *desc)
101{ 101{
102 struct irq_domain *domain = irq_desc_get_handler_data(desc); 102 struct irq_domain *domain = irq_desc_get_handler_data(desc);
103 unsigned int irq = irq_desc_get_irq(desc); 103 unsigned int irq = irq_desc_get_irq(desc);
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 16123f688768..598ab3f0e0ac 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -65,19 +65,19 @@ static void fpga_irq_unmask(struct irq_data *d)
65 writel(mask, f->base + IRQ_ENABLE_SET); 65 writel(mask, f->base + IRQ_ENABLE_SET);
66} 66}
67 67
68static void fpga_irq_handle(unsigned int __irq, struct irq_desc *desc) 68static void fpga_irq_handle(struct irq_desc *desc)
69{ 69{
70 struct fpga_irq_data *f = irq_desc_get_handler_data(desc); 70 struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
71 unsigned int irq = irq_desc_get_irq(desc);
72 u32 status = readl(f->base + IRQ_STATUS); 71 u32 status = readl(f->base + IRQ_STATUS);
73 72
74 if (status == 0) { 73 if (status == 0) {
75 do_bad_IRQ(irq, desc); 74 do_bad_IRQ(desc);
76 return; 75 return;
77 } 76 }
78 77
79 do { 78 do {
80 irq = ffs(status) - 1; 79 unsigned int irq = ffs(status) - 1;
80
81 status &= ~(1 << irq); 81 status &= ~(1 << irq);
82 generic_handle_irq(irq_find_mapping(f->domain, irq)); 82 generic_handle_irq(irq_find_mapping(f->domain, irq));
83 } while (status); 83 } while (status);
@@ -128,7 +128,7 @@ static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq,
128 irq_set_chip_data(irq, f); 128 irq_set_chip_data(irq, f);
129 irq_set_chip_and_handler(irq, &f->chip, 129 irq_set_chip_and_handler(irq, &f->chip,
130 handle_level_irq); 130 handle_level_irq);
131 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 131 irq_set_probe(irq);
132 return 0; 132 return 0;
133} 133}
134 134
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 03846dff4212..b956dfffe78c 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -201,7 +201,7 @@ static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq,
201 return -EPERM; 201 return -EPERM;
202 irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq); 202 irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq);
203 irq_set_chip_data(irq, v->base); 203 irq_set_chip_data(irq, v->base);
204 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 204 irq_set_probe(irq);
205 return 0; 205 return 0;
206} 206}
207 207
@@ -225,7 +225,7 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
225 return handled; 225 return handled;
226} 226}
227 227
228static void vic_handle_irq_cascaded(unsigned int irq, struct irq_desc *desc) 228static void vic_handle_irq_cascaded(struct irq_desc *desc)
229{ 229{
230 u32 stat, hwirq; 230 u32 stat, hwirq;
231 struct irq_chip *host_chip = irq_desc_get_chip(desc); 231 struct irq_chip *host_chip = irq_desc_get_chip(desc);
diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c
index 8371d9978d31..f9af0af21751 100644
--- a/drivers/irqchip/irq-vt8500.c
+++ b/drivers/irqchip/irq-vt8500.c
@@ -167,7 +167,6 @@ static int vt8500_irq_map(struct irq_domain *h, unsigned int virq,
167 irq_hw_number_t hw) 167 irq_hw_number_t hw)
168{ 168{
169 irq_set_chip_and_handler(virq, &vt8500_irq_chip, handle_level_irq); 169 irq_set_chip_and_handler(virq, &vt8500_irq_chip, handle_level_irq);
170 set_irq_flags(virq, IRQF_VALID);
171 170
172 return 0; 171 return 0;
173} 172}
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index 4cbd9c5dc1e6..1ccd2abed65f 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -182,7 +182,7 @@ static struct spear_shirq *spear320_shirq_blocks[] = {
182 &spear320_shirq_intrcomm_ras, 182 &spear320_shirq_intrcomm_ras,
183}; 183};
184 184
185static void shirq_handler(unsigned __irq, struct irq_desc *desc) 185static void shirq_handler(struct irq_desc *desc)
186{ 186{
187 struct spear_shirq *shirq = irq_desc_get_handler_data(desc); 187 struct spear_shirq *shirq = irq_desc_get_handler_data(desc);
188 u32 pend; 188 u32 pend;
@@ -211,7 +211,6 @@ static void __init spear_shirq_register(struct spear_shirq *shirq,
211 for (i = 0; i < shirq->nr_irqs; i++) { 211 for (i = 0; i < shirq->nr_irqs; i++) {
212 irq_set_chip_and_handler(shirq->virq_base + i, 212 irq_set_chip_and_handler(shirq->virq_base + i,
213 shirq->irq_chip, handle_simple_irq); 213 shirq->irq_chip, handle_simple_irq);
214 set_irq_flags(shirq->virq_base + i, IRQF_VALID);
215 irq_set_chip_data(shirq->virq_base + i, shirq); 214 irq_set_chip_data(shirq->virq_base + i, shirq);
216 } 215 }
217} 216}
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 70f4255ff291..42990f2d0317 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -170,6 +170,7 @@ config LEDS_SUNFIRE
170 170
171config LEDS_IPAQ_MICRO 171config LEDS_IPAQ_MICRO
172 tristate "LED Support for the Compaq iPAQ h3xxx" 172 tristate "LED Support for the Compaq iPAQ h3xxx"
173 depends on LEDS_CLASS
173 depends on MFD_IPAQ_MICRO 174 depends on MFD_IPAQ_MICRO
174 help 175 help
175 Choose this option if you want to use the notification LED on 176 Choose this option if you want to use the notification LED on
@@ -229,7 +230,7 @@ config LEDS_LP55XX_COMMON
229 tristate "Common Driver for TI/National LP5521/5523/55231/5562/8501" 230 tristate "Common Driver for TI/National LP5521/5523/55231/5562/8501"
230 depends on LEDS_LP5521 || LEDS_LP5523 || LEDS_LP5562 || LEDS_LP8501 231 depends on LEDS_LP5521 || LEDS_LP5523 || LEDS_LP5562 || LEDS_LP8501
231 select FW_LOADER 232 select FW_LOADER
232 select FW_LOADER_USER_HELPER_FALLBACK 233 select FW_LOADER_USER_HELPER
233 help 234 help
234 This option supports common operations for LP5521/5523/55231/5562/8501 235 This option supports common operations for LP5521/5523/55231/5562/8501
235 devices. 236 devices.
diff --git a/drivers/leds/leds-aat1290.c b/drivers/leds/leds-aat1290.c
index fd7c25fd29c1..ac77d36b630c 100644
--- a/drivers/leds/leds-aat1290.c
+++ b/drivers/leds/leds-aat1290.c
@@ -331,7 +331,7 @@ static void aat1290_led_validate_mm_current(struct aat1290_led *led,
331 cfg->max_brightness = b + 1; 331 cfg->max_brightness = b + 1;
332} 332}
333 333
334int init_mm_current_scale(struct aat1290_led *led, 334static int init_mm_current_scale(struct aat1290_led *led,
335 struct aat1290_led_config_data *cfg) 335 struct aat1290_led_config_data *cfg)
336{ 336{
337 int max_mm_current_percent[] = { 20, 22, 25, 28, 32, 36, 40, 45, 50, 56, 337 int max_mm_current_percent[] = { 20, 22, 25, 28, 32, 36, 40, 45, 50, 56,
@@ -559,6 +559,7 @@ static const struct of_device_id aat1290_led_dt_match[] = {
559 { .compatible = "skyworks,aat1290" }, 559 { .compatible = "skyworks,aat1290" },
560 {}, 560 {},
561}; 561};
562MODULE_DEVICE_TABLE(of, aat1290_led_dt_match);
562 563
563static struct platform_driver aat1290_led_driver = { 564static struct platform_driver aat1290_led_driver = {
564 .probe = aat1290_led_probe, 565 .probe = aat1290_led_probe,
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c
index 986fe1e28f84..1793727bc9ae 100644
--- a/drivers/leds/leds-bcm6328.c
+++ b/drivers/leds/leds-bcm6328.c
@@ -395,6 +395,7 @@ static const struct of_device_id bcm6328_leds_of_match[] = {
395 { .compatible = "brcm,bcm6328-leds", }, 395 { .compatible = "brcm,bcm6328-leds", },
396 { }, 396 { },
397}; 397};
398MODULE_DEVICE_TABLE(of, bcm6328_leds_of_match);
398 399
399static struct platform_driver bcm6328_leds_driver = { 400static struct platform_driver bcm6328_leds_driver = {
400 .probe = bcm6328_leds_probe, 401 .probe = bcm6328_leds_probe,
diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c
index 21f96930b3be..7ea3526702e0 100644
--- a/drivers/leds/leds-bcm6358.c
+++ b/drivers/leds/leds-bcm6358.c
@@ -226,6 +226,7 @@ static const struct of_device_id bcm6358_leds_of_match[] = {
226 { .compatible = "brcm,bcm6358-leds", }, 226 { .compatible = "brcm,bcm6358-leds", },
227 { }, 227 { },
228}; 228};
229MODULE_DEVICE_TABLE(of, bcm6358_leds_of_match);
229 230
230static struct platform_driver bcm6358_leds_driver = { 231static struct platform_driver bcm6358_leds_driver = {
231 .probe = bcm6358_leds_probe, 232 .probe = bcm6358_leds_probe,
diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c
index 2ae8c4d17ff8..feca07be85f5 100644
--- a/drivers/leds/leds-ktd2692.c
+++ b/drivers/leds/leds-ktd2692.c
@@ -426,6 +426,7 @@ static const struct of_device_id ktd2692_match[] = {
426 { .compatible = "kinetic,ktd2692", }, 426 { .compatible = "kinetic,ktd2692", },
427 { /* sentinel */ }, 427 { /* sentinel */ },
428}; 428};
429MODULE_DEVICE_TABLE(of, ktd2692_match);
429 430
430static struct platform_driver ktd2692_driver = { 431static struct platform_driver ktd2692_driver = {
431 .driver = { 432 .driver = {
diff --git a/drivers/leds/leds-max77693.c b/drivers/leds/leds-max77693.c
index df348a06d8c7..afbb1409b2e2 100644
--- a/drivers/leds/leds-max77693.c
+++ b/drivers/leds/leds-max77693.c
@@ -1080,6 +1080,7 @@ static const struct of_device_id max77693_led_dt_match[] = {
1080 { .compatible = "maxim,max77693-led" }, 1080 { .compatible = "maxim,max77693-led" },
1081 {}, 1081 {},
1082}; 1082};
1083MODULE_DEVICE_TABLE(of, max77693_led_dt_match);
1083 1084
1084static struct platform_driver max77693_led_driver = { 1085static struct platform_driver max77693_led_driver = {
1085 .probe = max77693_led_probe, 1086 .probe = max77693_led_probe,
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index b33514d9f427..a95a61220169 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -337,6 +337,7 @@ static const struct of_device_id of_ns2_leds_match[] = {
337 { .compatible = "lacie,ns2-leds", }, 337 { .compatible = "lacie,ns2-leds", },
338 {}, 338 {},
339}; 339};
340MODULE_DEVICE_TABLE(of, of_ns2_leds_match);
340#endif /* CONFIG_OF_GPIO */ 341#endif /* CONFIG_OF_GPIO */
341 342
342struct ns2_led_priv { 343struct ns2_led_priv {
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index d5415eedba86..3e01e6fb3424 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -393,7 +393,7 @@ config DM_MULTIPATH
393 # of SCSI_DH if the latter isn't defined but if 393 # of SCSI_DH if the latter isn't defined but if
394 # it is, DM_MULTIPATH must depend on it. We get a build 394 # it is, DM_MULTIPATH must depend on it. We get a build
395 # error if SCSI_DH=m and DM_MULTIPATH=y 395 # error if SCSI_DH=m and DM_MULTIPATH=y
396 depends on SCSI_DH || !SCSI_DH 396 depends on !SCSI_DH || SCSI
397 ---help--- 397 ---help---
398 Allow volume managers to support multipath hardware. 398 Allow volume managers to support multipath hardware.
399 399
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d60c88df5234..4b3b6f8aff0c 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -968,7 +968,8 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
968 968
969/* 969/*
970 * Generate a new unfragmented bio with the given size 970 * Generate a new unfragmented bio with the given size
971 * This should never violate the device limitations 971 * This should never violate the device limitations (but only because
972 * max_segment_size is being constrained to PAGE_SIZE).
972 * 973 *
973 * This function may be called concurrently. If we allocate from the mempool 974 * This function may be called concurrently. If we allocate from the mempool
974 * concurrently, there is a possibility of deadlock. For example, if we have 975 * concurrently, there is a possibility of deadlock. For example, if we have
@@ -2045,9 +2046,20 @@ static int crypt_iterate_devices(struct dm_target *ti,
2045 return fn(ti, cc->dev, cc->start, ti->len, data); 2046 return fn(ti, cc->dev, cc->start, ti->len, data);
2046} 2047}
2047 2048
2049static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
2050{
2051 /*
2052 * Unfortunate constraint that is required to avoid the potential
2053 * for exceeding underlying device's max_segments limits -- due to
2054 * crypt_alloc_buffer() possibly allocating pages for the encryption
2055 * bio that are not as physically contiguous as the original bio.
2056 */
2057 limits->max_segment_size = PAGE_SIZE;
2058}
2059
2048static struct target_type crypt_target = { 2060static struct target_type crypt_target = {
2049 .name = "crypt", 2061 .name = "crypt",
2050 .version = {1, 14, 0}, 2062 .version = {1, 14, 1},
2051 .module = THIS_MODULE, 2063 .module = THIS_MODULE,
2052 .ctr = crypt_ctr, 2064 .ctr = crypt_ctr,
2053 .dtr = crypt_dtr, 2065 .dtr = crypt_dtr,
@@ -2058,6 +2070,7 @@ static struct target_type crypt_target = {
2058 .resume = crypt_resume, 2070 .resume = crypt_resume,
2059 .message = crypt_message, 2071 .message = crypt_message,
2060 .iterate_devices = crypt_iterate_devices, 2072 .iterate_devices = crypt_iterate_devices,
2073 .io_hints = crypt_io_hints,
2061}; 2074};
2062 2075
2063static int __init dm_crypt_init(void) 2076static int __init dm_crypt_init(void)
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index eff7bdd7731d..5a67671a3973 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -159,12 +159,9 @@ static struct priority_group *alloc_priority_group(void)
159static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) 159static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
160{ 160{
161 struct pgpath *pgpath, *tmp; 161 struct pgpath *pgpath, *tmp;
162 struct multipath *m = ti->private;
163 162
164 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { 163 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
165 list_del(&pgpath->list); 164 list_del(&pgpath->list);
166 if (m->hw_handler_name)
167 scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
168 dm_put_device(ti, pgpath->path.dev); 165 dm_put_device(ti, pgpath->path.dev);
169 free_pgpath(pgpath); 166 free_pgpath(pgpath);
170 } 167 }
@@ -580,6 +577,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
580 q = bdev_get_queue(p->path.dev->bdev); 577 q = bdev_get_queue(p->path.dev->bdev);
581 578
582 if (m->retain_attached_hw_handler) { 579 if (m->retain_attached_hw_handler) {
580retain:
583 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); 581 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
584 if (attached_handler_name) { 582 if (attached_handler_name) {
585 /* 583 /*
@@ -599,20 +597,14 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
599 } 597 }
600 598
601 if (m->hw_handler_name) { 599 if (m->hw_handler_name) {
602 /*
603 * Increments scsi_dh reference, even when using an
604 * already-attached handler.
605 */
606 r = scsi_dh_attach(q, m->hw_handler_name); 600 r = scsi_dh_attach(q, m->hw_handler_name);
607 if (r == -EBUSY) { 601 if (r == -EBUSY) {
608 /* 602 char b[BDEVNAME_SIZE];
609 * Already attached to different hw_handler:
610 * try to reattach with correct one.
611 */
612 scsi_dh_detach(q);
613 r = scsi_dh_attach(q, m->hw_handler_name);
614 }
615 603
604 printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
605 bdevname(p->path.dev->bdev, b));
606 goto retain;
607 }
616 if (r < 0) { 608 if (r < 0) {
617 ti->error = "error attaching hardware handler"; 609 ti->error = "error attaching hardware handler";
618 dm_put_device(ti, p->path.dev); 610 dm_put_device(ti, p->path.dev);
@@ -624,7 +616,6 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
624 if (r < 0) { 616 if (r < 0) {
625 ti->error = "unable to set hardware " 617 ti->error = "unable to set hardware "
626 "handler parameters"; 618 "handler parameters";
627 scsi_dh_detach(q);
628 dm_put_device(ti, p->path.dev); 619 dm_put_device(ti, p->path.dev);
629 goto bad; 620 goto bad;
630 } 621 }
@@ -734,12 +725,6 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
734 return 0; 725 return 0;
735 726
736 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL); 727 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
737 if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
738 "scsi_dh_%s", m->hw_handler_name)) {
739 ti->error = "unknown hardware handler type";
740 ret = -EINVAL;
741 goto fail;
742 }
743 728
744 if (hw_argc > 1) { 729 if (hw_argc > 1) {
745 char *p; 730 char *p;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 6578b7bc1fbb..6fcbfb063366 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -4249,6 +4249,10 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
4249{ 4249{
4250 struct thin_c *tc = ti->private; 4250 struct thin_c *tc = ti->private;
4251 struct pool *pool = tc->pool; 4251 struct pool *pool = tc->pool;
4252 struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md);
4253
4254 if (!pool_limits->discard_granularity)
4255 return; /* pool's discard support is disabled */
4252 4256
4253 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; 4257 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
4254 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ 4258 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/omap/Kconfig
index dc2aaab54aef..217d613b0fe7 100644
--- a/drivers/media/platform/omap/Kconfig
+++ b/drivers/media/platform/omap/Kconfig
@@ -10,6 +10,7 @@ config VIDEO_OMAP2_VOUT
10 select OMAP2_DSS if HAS_IOMEM && ARCH_OMAP2PLUS 10 select OMAP2_DSS if HAS_IOMEM && ARCH_OMAP2PLUS
11 select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3 11 select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
12 select VIDEO_OMAP2_VOUT_VRFB if VIDEO_OMAP2_VOUT && OMAP2_VRFB 12 select VIDEO_OMAP2_VOUT_VRFB if VIDEO_OMAP2_VOUT && OMAP2_VRFB
13 select FRAME_VECTOR
13 default n 14 default n
14 ---help--- 15 ---help---
15 V4L2 Display driver support for OMAP2/3 based boards. 16 V4L2 Display driver support for OMAP2/3 based boards.
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index de2474e1132d..70c28d19ea04 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -195,46 +195,34 @@ static int omap_vout_try_format(struct v4l2_pix_format *pix)
195} 195}
196 196
197/* 197/*
198 * omap_vout_uservirt_to_phys: This inline function is used to convert user 198 * omap_vout_get_userptr: Convert user space virtual address to physical
199 * space virtual address to physical address. 199 * address.
200 */ 200 */
201static unsigned long omap_vout_uservirt_to_phys(unsigned long virtp) 201static int omap_vout_get_userptr(struct videobuf_buffer *vb, u32 virtp,
202 u32 *physp)
202{ 203{
203 unsigned long physp = 0; 204 struct frame_vector *vec;
204 struct vm_area_struct *vma; 205 int ret;
205 struct mm_struct *mm = current->mm;
206 206
207 /* For kernel direct-mapped memory, take the easy way */ 207 /* For kernel direct-mapped memory, take the easy way */
208 if (virtp >= PAGE_OFFSET) 208 if (virtp >= PAGE_OFFSET) {
209 return virt_to_phys((void *) virtp); 209 *physp = virt_to_phys((void *)virtp);
210 210 return 0;
211 down_read(&current->mm->mmap_sem); 211 }
212 vma = find_vma(mm, virtp);
213 if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
214 /* this will catch, kernel-allocated, mmaped-to-usermode
215 addresses */
216 physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
217 up_read(&current->mm->mmap_sem);
218 } else {
219 /* otherwise, use get_user_pages() for general userland pages */
220 int res, nr_pages = 1;
221 struct page *pages;
222 212
223 res = get_user_pages(current, current->mm, virtp, nr_pages, 1, 213 vec = frame_vector_create(1);
224 0, &pages, NULL); 214 if (!vec)
225 up_read(&current->mm->mmap_sem); 215 return -ENOMEM;
226 216
227 if (res == nr_pages) { 217 ret = get_vaddr_frames(virtp, 1, true, false, vec);
228 physp = __pa(page_address(&pages[0]) + 218 if (ret != 1) {
229 (virtp & ~PAGE_MASK)); 219 frame_vector_destroy(vec);
230 } else { 220 return -EINVAL;
231 printk(KERN_WARNING VOUT_NAME
232 "get_user_pages failed\n");
233 return 0;
234 }
235 } 221 }
222 *physp = __pfn_to_phys(frame_vector_pfns(vec)[0]);
223 vb->priv = vec;
236 224
237 return physp; 225 return 0;
238} 226}
239 227
240/* 228/*
@@ -784,11 +772,15 @@ static int omap_vout_buffer_prepare(struct videobuf_queue *q,
784 * address of the buffer 772 * address of the buffer
785 */ 773 */
786 if (V4L2_MEMORY_USERPTR == vb->memory) { 774 if (V4L2_MEMORY_USERPTR == vb->memory) {
775 int ret;
776
787 if (0 == vb->baddr) 777 if (0 == vb->baddr)
788 return -EINVAL; 778 return -EINVAL;
789 /* Physical address */ 779 /* Physical address */
790 vout->queued_buf_addr[vb->i] = (u8 *) 780 ret = omap_vout_get_userptr(vb, vb->baddr,
791 omap_vout_uservirt_to_phys(vb->baddr); 781 (u32 *)&vout->queued_buf_addr[vb->i]);
782 if (ret < 0)
783 return ret;
792 } else { 784 } else {
793 unsigned long addr, dma_addr; 785 unsigned long addr, dma_addr;
794 unsigned long size; 786 unsigned long size;
@@ -834,12 +826,13 @@ static void omap_vout_buffer_queue(struct videobuf_queue *q,
834static void omap_vout_buffer_release(struct videobuf_queue *q, 826static void omap_vout_buffer_release(struct videobuf_queue *q,
835 struct videobuf_buffer *vb) 827 struct videobuf_buffer *vb)
836{ 828{
837 struct omap_vout_device *vout = q->priv_data;
838
839 vb->state = VIDEOBUF_NEEDS_INIT; 829 vb->state = VIDEOBUF_NEEDS_INIT;
830 if (vb->memory == V4L2_MEMORY_USERPTR && vb->priv) {
831 struct frame_vector *vec = vb->priv;
840 832
841 if (V4L2_MEMORY_MMAP != vout->memory) 833 put_vaddr_frames(vec);
842 return; 834 frame_vector_destroy(vec);
835 }
843} 836}
844 837
845/* 838/*
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index b4b022933e29..82876a67f144 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -84,6 +84,7 @@ config VIDEOBUF2_CORE
84 84
85config VIDEOBUF2_MEMOPS 85config VIDEOBUF2_MEMOPS
86 tristate 86 tristate
87 select FRAME_VECTOR
87 88
88config VIDEOBUF2_DMA_CONTIG 89config VIDEOBUF2_DMA_CONTIG
89 tristate 90 tristate
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index f1022d810d22..4f59b7ec05d0 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1691,9 +1691,7 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1691 ret = __qbuf_mmap(vb, b); 1691 ret = __qbuf_mmap(vb, b);
1692 break; 1692 break;
1693 case V4L2_MEMORY_USERPTR: 1693 case V4L2_MEMORY_USERPTR:
1694 down_read(&current->mm->mmap_sem);
1695 ret = __qbuf_userptr(vb, b); 1694 ret = __qbuf_userptr(vb, b);
1696 up_read(&current->mm->mmap_sem);
1697 break; 1695 break;
1698 case V4L2_MEMORY_DMABUF: 1696 case V4L2_MEMORY_DMABUF:
1699 ret = __qbuf_dmabuf(vb, b); 1697 ret = __qbuf_dmabuf(vb, b);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 94c1e6455d36..2397ceb1dc6b 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -32,15 +32,13 @@ struct vb2_dc_buf {
32 dma_addr_t dma_addr; 32 dma_addr_t dma_addr;
33 enum dma_data_direction dma_dir; 33 enum dma_data_direction dma_dir;
34 struct sg_table *dma_sgt; 34 struct sg_table *dma_sgt;
35 struct frame_vector *vec;
35 36
36 /* MMAP related */ 37 /* MMAP related */
37 struct vb2_vmarea_handler handler; 38 struct vb2_vmarea_handler handler;
38 atomic_t refcount; 39 atomic_t refcount;
39 struct sg_table *sgt_base; 40 struct sg_table *sgt_base;
40 41
41 /* USERPTR related */
42 struct vm_area_struct *vma;
43
44 /* DMABUF related */ 42 /* DMABUF related */
45 struct dma_buf_attachment *db_attach; 43 struct dma_buf_attachment *db_attach;
46}; 44};
@@ -49,24 +47,6 @@ struct vb2_dc_buf {
49/* scatterlist table functions */ 47/* scatterlist table functions */
50/*********************************************/ 48/*********************************************/
51 49
52
53static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
54 void (*cb)(struct page *pg))
55{
56 struct scatterlist *s;
57 unsigned int i;
58
59 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
60 struct page *page = sg_page(s);
61 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
62 >> PAGE_SHIFT;
63 unsigned int j;
64
65 for (j = 0; j < n_pages; ++j, ++page)
66 cb(page);
67 }
68}
69
70static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt) 50static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
71{ 51{
72 struct scatterlist *s; 52 struct scatterlist *s;
@@ -429,92 +409,12 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
429/* callbacks for USERPTR buffers */ 409/* callbacks for USERPTR buffers */
430/*********************************************/ 410/*********************************************/
431 411
432static inline int vma_is_io(struct vm_area_struct *vma)
433{
434 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
435}
436
437static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
438 struct vm_area_struct *vma, unsigned long *res)
439{
440 unsigned long pfn, start_pfn, prev_pfn;
441 unsigned int i;
442 int ret;
443
444 if (!vma_is_io(vma))
445 return -EFAULT;
446
447 ret = follow_pfn(vma, start, &pfn);
448 if (ret)
449 return ret;
450
451 start_pfn = pfn;
452 start += PAGE_SIZE;
453
454 for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
455 prev_pfn = pfn;
456 ret = follow_pfn(vma, start, &pfn);
457
458 if (ret) {
459 pr_err("no page for address %lu\n", start);
460 return ret;
461 }
462 if (pfn != prev_pfn + 1)
463 return -EINVAL;
464 }
465
466 *res = start_pfn;
467 return 0;
468}
469
470static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
471 int n_pages, struct vm_area_struct *vma,
472 enum dma_data_direction dma_dir)
473{
474 if (vma_is_io(vma)) {
475 unsigned int i;
476
477 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
478 unsigned long pfn;
479 int ret = follow_pfn(vma, start, &pfn);
480
481 if (!pfn_valid(pfn))
482 return -EINVAL;
483
484 if (ret) {
485 pr_err("no page for address %lu\n", start);
486 return ret;
487 }
488 pages[i] = pfn_to_page(pfn);
489 }
490 } else {
491 int n;
492
493 n = get_user_pages(current, current->mm, start & PAGE_MASK,
494 n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL);
495 /* negative error means that no page was pinned */
496 n = max(n, 0);
497 if (n != n_pages) {
498 pr_err("got only %d of %d user pages\n", n, n_pages);
499 while (n)
500 put_page(pages[--n]);
501 return -EFAULT;
502 }
503 }
504
505 return 0;
506}
507
508static void vb2_dc_put_dirty_page(struct page *page)
509{
510 set_page_dirty_lock(page);
511 put_page(page);
512}
513
514static void vb2_dc_put_userptr(void *buf_priv) 412static void vb2_dc_put_userptr(void *buf_priv)
515{ 413{
516 struct vb2_dc_buf *buf = buf_priv; 414 struct vb2_dc_buf *buf = buf_priv;
517 struct sg_table *sgt = buf->dma_sgt; 415 struct sg_table *sgt = buf->dma_sgt;
416 int i;
417 struct page **pages;
518 418
519 if (sgt) { 419 if (sgt) {
520 DEFINE_DMA_ATTRS(attrs); 420 DEFINE_DMA_ATTRS(attrs);
@@ -526,13 +426,15 @@ static void vb2_dc_put_userptr(void *buf_priv)
526 */ 426 */
527 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, 427 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
528 buf->dma_dir, &attrs); 428 buf->dma_dir, &attrs);
529 if (!vma_is_io(buf->vma)) 429 pages = frame_vector_pages(buf->vec);
530 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); 430 /* sgt should exist only if vector contains pages... */
531 431 BUG_ON(IS_ERR(pages));
432 for (i = 0; i < frame_vector_count(buf->vec); i++)
433 set_page_dirty_lock(pages[i]);
532 sg_free_table(sgt); 434 sg_free_table(sgt);
533 kfree(sgt); 435 kfree(sgt);
534 } 436 }
535 vb2_put_vma(buf->vma); 437 vb2_destroy_framevec(buf->vec);
536 kfree(buf); 438 kfree(buf);
537} 439}
538 440
@@ -572,13 +474,10 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
572{ 474{
573 struct vb2_dc_conf *conf = alloc_ctx; 475 struct vb2_dc_conf *conf = alloc_ctx;
574 struct vb2_dc_buf *buf; 476 struct vb2_dc_buf *buf;
575 unsigned long start; 477 struct frame_vector *vec;
576 unsigned long end;
577 unsigned long offset; 478 unsigned long offset;
578 struct page **pages; 479 int n_pages, i;
579 int n_pages;
580 int ret = 0; 480 int ret = 0;
581 struct vm_area_struct *vma;
582 struct sg_table *sgt; 481 struct sg_table *sgt;
583 unsigned long contig_size; 482 unsigned long contig_size;
584 unsigned long dma_align = dma_get_cache_alignment(); 483 unsigned long dma_align = dma_get_cache_alignment();
@@ -604,72 +503,43 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
604 buf->dev = conf->dev; 503 buf->dev = conf->dev;
605 buf->dma_dir = dma_dir; 504 buf->dma_dir = dma_dir;
606 505
607 start = vaddr & PAGE_MASK;
608 offset = vaddr & ~PAGE_MASK; 506 offset = vaddr & ~PAGE_MASK;
609 end = PAGE_ALIGN(vaddr + size); 507 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
610 n_pages = (end - start) >> PAGE_SHIFT; 508 if (IS_ERR(vec)) {
611 509 ret = PTR_ERR(vec);
612 pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
613 if (!pages) {
614 ret = -ENOMEM;
615 pr_err("failed to allocate pages table\n");
616 goto fail_buf; 510 goto fail_buf;
617 } 511 }
512 buf->vec = vec;
513 n_pages = frame_vector_count(vec);
514 ret = frame_vector_to_pages(vec);
515 if (ret < 0) {
516 unsigned long *nums = frame_vector_pfns(vec);
618 517
619 /* current->mm->mmap_sem is taken by videobuf2 core */ 518 /*
620 vma = find_vma(current->mm, vaddr); 519 * Failed to convert to pages... Check the memory is physically
621 if (!vma) { 520 * contiguous and use direct mapping
622 pr_err("no vma for address %lu\n", vaddr); 521 */
623 ret = -EFAULT; 522 for (i = 1; i < n_pages; i++)
624 goto fail_pages; 523 if (nums[i-1] + 1 != nums[i])
625 } 524 goto fail_pfnvec;
626 525 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
627 if (vma->vm_end < vaddr + size) { 526 goto out;
628 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
629 ret = -EFAULT;
630 goto fail_pages;
631 }
632
633 buf->vma = vb2_get_vma(vma);
634 if (!buf->vma) {
635 pr_err("failed to copy vma\n");
636 ret = -ENOMEM;
637 goto fail_pages;
638 }
639
640 /* extract page list from userspace mapping */
641 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, dma_dir);
642 if (ret) {
643 unsigned long pfn;
644 if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
645 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
646 buf->size = size;
647 kfree(pages);
648 return buf;
649 }
650
651 pr_err("failed to get user pages\n");
652 goto fail_vma;
653 } 527 }
654 528
655 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 529 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
656 if (!sgt) { 530 if (!sgt) {
657 pr_err("failed to allocate sg table\n"); 531 pr_err("failed to allocate sg table\n");
658 ret = -ENOMEM; 532 ret = -ENOMEM;
659 goto fail_get_user_pages; 533 goto fail_pfnvec;
660 } 534 }
661 535
662 ret = sg_alloc_table_from_pages(sgt, pages, n_pages, 536 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
663 offset, size, GFP_KERNEL); 537 offset, size, GFP_KERNEL);
664 if (ret) { 538 if (ret) {
665 pr_err("failed to initialize sg table\n"); 539 pr_err("failed to initialize sg table\n");
666 goto fail_sgt; 540 goto fail_sgt;
667 } 541 }
668 542
669 /* pages are no longer needed */
670 kfree(pages);
671 pages = NULL;
672
673 /* 543 /*
674 * No need to sync to the device, this will happen later when the 544 * No need to sync to the device, this will happen later when the
675 * prepare() memop is called. 545 * prepare() memop is called.
@@ -691,8 +561,9 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
691 } 561 }
692 562
693 buf->dma_addr = sg_dma_address(sgt->sgl); 563 buf->dma_addr = sg_dma_address(sgt->sgl);
694 buf->size = size;
695 buf->dma_sgt = sgt; 564 buf->dma_sgt = sgt;
565out:
566 buf->size = size;
696 567
697 return buf; 568 return buf;
698 569
@@ -701,23 +572,13 @@ fail_map_sg:
701 buf->dma_dir, &attrs); 572 buf->dma_dir, &attrs);
702 573
703fail_sgt_init: 574fail_sgt_init:
704 if (!vma_is_io(buf->vma))
705 vb2_dc_sgt_foreach_page(sgt, put_page);
706 sg_free_table(sgt); 575 sg_free_table(sgt);
707 576
708fail_sgt: 577fail_sgt:
709 kfree(sgt); 578 kfree(sgt);
710 579
711fail_get_user_pages: 580fail_pfnvec:
712 if (pages && !vma_is_io(buf->vma)) 581 vb2_destroy_framevec(vec);
713 while (n_pages)
714 put_page(pages[--n_pages]);
715
716fail_vma:
717 vb2_put_vma(buf->vma);
718
719fail_pages:
720 kfree(pages); /* kfree is NULL-proof */
721 582
722fail_buf: 583fail_buf:
723 kfree(buf); 584 kfree(buf);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 7289b81bd7b7..be7bd6535c9d 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -38,6 +38,7 @@ struct vb2_dma_sg_buf {
38 struct device *dev; 38 struct device *dev;
39 void *vaddr; 39 void *vaddr;
40 struct page **pages; 40 struct page **pages;
41 struct frame_vector *vec;
41 int offset; 42 int offset;
42 enum dma_data_direction dma_dir; 43 enum dma_data_direction dma_dir;
43 struct sg_table sg_table; 44 struct sg_table sg_table;
@@ -51,7 +52,6 @@ struct vb2_dma_sg_buf {
51 unsigned int num_pages; 52 unsigned int num_pages;
52 atomic_t refcount; 53 atomic_t refcount;
53 struct vb2_vmarea_handler handler; 54 struct vb2_vmarea_handler handler;
54 struct vm_area_struct *vma;
55 55
56 struct dma_buf_attachment *db_attach; 56 struct dma_buf_attachment *db_attach;
57}; 57};
@@ -225,25 +225,17 @@ static void vb2_dma_sg_finish(void *buf_priv)
225 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); 225 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
226} 226}
227 227
228static inline int vma_is_io(struct vm_area_struct *vma)
229{
230 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
231}
232
233static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, 228static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
234 unsigned long size, 229 unsigned long size,
235 enum dma_data_direction dma_dir) 230 enum dma_data_direction dma_dir)
236{ 231{
237 struct vb2_dma_sg_conf *conf = alloc_ctx; 232 struct vb2_dma_sg_conf *conf = alloc_ctx;
238 struct vb2_dma_sg_buf *buf; 233 struct vb2_dma_sg_buf *buf;
239 unsigned long first, last;
240 int num_pages_from_user;
241 struct vm_area_struct *vma;
242 struct sg_table *sgt; 234 struct sg_table *sgt;
243 DEFINE_DMA_ATTRS(attrs); 235 DEFINE_DMA_ATTRS(attrs);
236 struct frame_vector *vec;
244 237
245 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); 238 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
246
247 buf = kzalloc(sizeof *buf, GFP_KERNEL); 239 buf = kzalloc(sizeof *buf, GFP_KERNEL);
248 if (!buf) 240 if (!buf)
249 return NULL; 241 return NULL;
@@ -254,61 +246,19 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
254 buf->offset = vaddr & ~PAGE_MASK; 246 buf->offset = vaddr & ~PAGE_MASK;
255 buf->size = size; 247 buf->size = size;
256 buf->dma_sgt = &buf->sg_table; 248 buf->dma_sgt = &buf->sg_table;
249 vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE);
250 if (IS_ERR(vec))
251 goto userptr_fail_pfnvec;
252 buf->vec = vec;
257 253
258 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT; 254 buf->pages = frame_vector_pages(vec);
259 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; 255 if (IS_ERR(buf->pages))
260 buf->num_pages = last - first + 1; 256 goto userptr_fail_sgtable;
261 257 buf->num_pages = frame_vector_count(vec);
262 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
263 GFP_KERNEL);
264 if (!buf->pages)
265 goto userptr_fail_alloc_pages;
266
267 vma = find_vma(current->mm, vaddr);
268 if (!vma) {
269 dprintk(1, "no vma for address %lu\n", vaddr);
270 goto userptr_fail_find_vma;
271 }
272
273 if (vma->vm_end < vaddr + size) {
274 dprintk(1, "vma at %lu is too small for %lu bytes\n",
275 vaddr, size);
276 goto userptr_fail_find_vma;
277 }
278
279 buf->vma = vb2_get_vma(vma);
280 if (!buf->vma) {
281 dprintk(1, "failed to copy vma\n");
282 goto userptr_fail_find_vma;
283 }
284
285 if (vma_is_io(buf->vma)) {
286 for (num_pages_from_user = 0;
287 num_pages_from_user < buf->num_pages;
288 ++num_pages_from_user, vaddr += PAGE_SIZE) {
289 unsigned long pfn;
290
291 if (follow_pfn(vma, vaddr, &pfn)) {
292 dprintk(1, "no page for address %lu\n", vaddr);
293 break;
294 }
295 buf->pages[num_pages_from_user] = pfn_to_page(pfn);
296 }
297 } else
298 num_pages_from_user = get_user_pages(current, current->mm,
299 vaddr & PAGE_MASK,
300 buf->num_pages,
301 buf->dma_dir == DMA_FROM_DEVICE,
302 1, /* force */
303 buf->pages,
304 NULL);
305
306 if (num_pages_from_user != buf->num_pages)
307 goto userptr_fail_get_user_pages;
308 258
309 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, 259 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
310 buf->num_pages, buf->offset, size, 0)) 260 buf->num_pages, buf->offset, size, 0))
311 goto userptr_fail_alloc_table_from_pages; 261 goto userptr_fail_sgtable;
312 262
313 sgt = &buf->sg_table; 263 sgt = &buf->sg_table;
314 /* 264 /*
@@ -324,17 +274,9 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
324 274
325userptr_fail_map: 275userptr_fail_map:
326 sg_free_table(&buf->sg_table); 276 sg_free_table(&buf->sg_table);
327userptr_fail_alloc_table_from_pages: 277userptr_fail_sgtable:
328userptr_fail_get_user_pages: 278 vb2_destroy_framevec(vec);
329 dprintk(1, "get_user_pages requested/got: %d/%d]\n", 279userptr_fail_pfnvec:
330 buf->num_pages, num_pages_from_user);
331 if (!vma_is_io(buf->vma))
332 while (--num_pages_from_user >= 0)
333 put_page(buf->pages[num_pages_from_user]);
334 vb2_put_vma(buf->vma);
335userptr_fail_find_vma:
336 kfree(buf->pages);
337userptr_fail_alloc_pages:
338 kfree(buf); 280 kfree(buf);
339 return NULL; 281 return NULL;
340} 282}
@@ -362,11 +304,8 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
362 while (--i >= 0) { 304 while (--i >= 0) {
363 if (buf->dma_dir == DMA_FROM_DEVICE) 305 if (buf->dma_dir == DMA_FROM_DEVICE)
364 set_page_dirty_lock(buf->pages[i]); 306 set_page_dirty_lock(buf->pages[i]);
365 if (!vma_is_io(buf->vma))
366 put_page(buf->pages[i]);
367 } 307 }
368 kfree(buf->pages); 308 vb2_destroy_framevec(buf->vec);
369 vb2_put_vma(buf->vma);
370 kfree(buf); 309 kfree(buf);
371} 310}
372 311
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 0d49b7951f84..48c6a49c4928 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -23,118 +23,62 @@
23#include <media/videobuf2-memops.h> 23#include <media/videobuf2-memops.h>
24 24
25/** 25/**
26 * vb2_get_vma() - acquire and lock the virtual memory area 26 * vb2_create_framevec() - map virtual addresses to pfns
27 * @vma: given virtual memory area 27 * @start: Virtual user address where we start mapping
28 * @length: Length of a range to map
29 * @write: Should we map for writing into the area
28 * 30 *
29 * This function attempts to acquire an area mapped in the userspace for 31 * This function allocates and fills in a vector with pfns corresponding to
30 * the duration of a hardware operation. The area is "locked" by performing 32 * virtual address range passed in arguments. If pfns have corresponding pages,
31 * the same set of operation that are done when process calls fork() and 33 * page references are also grabbed to pin pages in memory. The function
32 * memory areas are duplicated. 34 * returns pointer to the vector on success and error pointer in case of
33 * 35 * failure. Returned vector needs to be freed via vb2_destroy_pfnvec().
34 * Returns a copy of a virtual memory region on success or NULL.
35 */
36struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
37{
38 struct vm_area_struct *vma_copy;
39
40 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
41 if (vma_copy == NULL)
42 return NULL;
43
44 if (vma->vm_ops && vma->vm_ops->open)
45 vma->vm_ops->open(vma);
46
47 if (vma->vm_file)
48 get_file(vma->vm_file);
49
50 memcpy(vma_copy, vma, sizeof(*vma));
51
52 vma_copy->vm_mm = NULL;
53 vma_copy->vm_next = NULL;
54 vma_copy->vm_prev = NULL;
55
56 return vma_copy;
57}
58EXPORT_SYMBOL_GPL(vb2_get_vma);
59
60/**
61 * vb2_put_userptr() - release a userspace virtual memory area
62 * @vma: virtual memory region associated with the area to be released
63 *
64 * This function releases the previously acquired memory area after a hardware
65 * operation.
66 */ 36 */
67void vb2_put_vma(struct vm_area_struct *vma) 37struct frame_vector *vb2_create_framevec(unsigned long start,
38 unsigned long length,
39 bool write)
68{ 40{
69 if (!vma) 41 int ret;
70 return; 42 unsigned long first, last;
71 43 unsigned long nr;
72 if (vma->vm_ops && vma->vm_ops->close) 44 struct frame_vector *vec;
73 vma->vm_ops->close(vma); 45
74 46 first = start >> PAGE_SHIFT;
75 if (vma->vm_file) 47 last = (start + length - 1) >> PAGE_SHIFT;
76 fput(vma->vm_file); 48 nr = last - first + 1;
77 49 vec = frame_vector_create(nr);
78 kfree(vma); 50 if (!vec)
51 return ERR_PTR(-ENOMEM);
52 ret = get_vaddr_frames(start, nr, write, 1, vec);
53 if (ret < 0)
54 goto out_destroy;
55 /* We accept only complete set of PFNs */
56 if (ret != nr) {
57 ret = -EFAULT;
58 goto out_release;
59 }
60 return vec;
61out_release:
62 put_vaddr_frames(vec);
63out_destroy:
64 frame_vector_destroy(vec);
65 return ERR_PTR(ret);
79} 66}
80EXPORT_SYMBOL_GPL(vb2_put_vma); 67EXPORT_SYMBOL(vb2_create_framevec);
81 68
82/** 69/**
83 * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory 70 * vb2_destroy_framevec() - release vector of mapped pfns
84 * @vaddr: starting virtual address of the area to be verified 71 * @vec: vector of pfns / pages to release
85 * @size: size of the area
86 * @res_paddr: will return physical address for the given vaddr
87 * @res_vma: will return locked copy of struct vm_area for the given area
88 *
89 * This function will go through memory area of size @size mapped at @vaddr and
90 * verify that the underlying physical pages are contiguous. If they are
91 * contiguous the virtual memory area is locked and a @res_vma is filled with
92 * the copy and @res_pa set to the physical address of the buffer.
93 * 72 *
94 * Returns 0 on success. 73 * This releases references to all pages in the vector @vec (if corresponding
74 * pfns are backed by pages) and frees the passed vector.
95 */ 75 */
96int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size, 76void vb2_destroy_framevec(struct frame_vector *vec)
97 struct vm_area_struct **res_vma, dma_addr_t *res_pa)
98{ 77{
99 struct mm_struct *mm = current->mm; 78 put_vaddr_frames(vec);
100 struct vm_area_struct *vma; 79 frame_vector_destroy(vec);
101 unsigned long offset, start, end;
102 unsigned long this_pfn, prev_pfn;
103 dma_addr_t pa = 0;
104
105 start = vaddr;
106 offset = start & ~PAGE_MASK;
107 end = start + size;
108
109 vma = find_vma(mm, start);
110
111 if (vma == NULL || vma->vm_end < end)
112 return -EFAULT;
113
114 for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
115 int ret = follow_pfn(vma, start, &this_pfn);
116 if (ret)
117 return ret;
118
119 if (prev_pfn == 0)
120 pa = this_pfn << PAGE_SHIFT;
121 else if (this_pfn != prev_pfn + 1)
122 return -EFAULT;
123
124 prev_pfn = this_pfn;
125 }
126
127 /*
128 * Memory is contiguous, lock vma and return to the caller
129 */
130 *res_vma = vb2_get_vma(vma);
131 if (*res_vma == NULL)
132 return -ENOMEM;
133
134 *res_pa = pa + offset;
135 return 0;
136} 80}
137EXPORT_SYMBOL_GPL(vb2_get_contig_userptr); 81EXPORT_SYMBOL(vb2_destroy_framevec);
138 82
139/** 83/**
140 * vb2_common_vm_open() - increase refcount of the vma 84 * vb2_common_vm_open() - increase refcount of the vma
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index 2fe4c27f524a..ecb8f0c7f025 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -23,11 +23,9 @@
23 23
24struct vb2_vmalloc_buf { 24struct vb2_vmalloc_buf {
25 void *vaddr; 25 void *vaddr;
26 struct page **pages; 26 struct frame_vector *vec;
27 struct vm_area_struct *vma;
28 enum dma_data_direction dma_dir; 27 enum dma_data_direction dma_dir;
29 unsigned long size; 28 unsigned long size;
30 unsigned int n_pages;
31 atomic_t refcount; 29 atomic_t refcount;
32 struct vb2_vmarea_handler handler; 30 struct vb2_vmarea_handler handler;
33 struct dma_buf *dbuf; 31 struct dma_buf *dbuf;
@@ -76,10 +74,8 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
76 enum dma_data_direction dma_dir) 74 enum dma_data_direction dma_dir)
77{ 75{
78 struct vb2_vmalloc_buf *buf; 76 struct vb2_vmalloc_buf *buf;
79 unsigned long first, last; 77 struct frame_vector *vec;
80 int n_pages, offset; 78 int n_pages, offset, i;
81 struct vm_area_struct *vma;
82 dma_addr_t physp;
83 79
84 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 80 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
85 if (!buf) 81 if (!buf)
@@ -88,51 +84,36 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
88 buf->dma_dir = dma_dir; 84 buf->dma_dir = dma_dir;
89 offset = vaddr & ~PAGE_MASK; 85 offset = vaddr & ~PAGE_MASK;
90 buf->size = size; 86 buf->size = size;
91 87 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
92 88 if (IS_ERR(vec))
93 vma = find_vma(current->mm, vaddr); 89 goto fail_pfnvec_create;
94 if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) { 90 buf->vec = vec;
95 if (vb2_get_contig_userptr(vaddr, size, &vma, &physp)) 91 n_pages = frame_vector_count(vec);
96 goto fail_pages_array_alloc; 92 if (frame_vector_to_pages(vec) < 0) {
97 buf->vma = vma; 93 unsigned long *nums = frame_vector_pfns(vec);
98 buf->vaddr = (__force void *)ioremap_nocache(physp, size); 94
99 if (!buf->vaddr) 95 /*
100 goto fail_pages_array_alloc; 96 * We cannot get page pointers for these pfns. Check memory is
97 * physically contiguous and use direct mapping.
98 */
99 for (i = 1; i < n_pages; i++)
100 if (nums[i-1] + 1 != nums[i])
101 goto fail_map;
102 buf->vaddr = (__force void *)
103 ioremap_nocache(nums[0] << PAGE_SHIFT, size);
101 } else { 104 } else {
102 first = vaddr >> PAGE_SHIFT; 105 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
103 last = (vaddr + size - 1) >> PAGE_SHIFT;
104 buf->n_pages = last - first + 1;
105 buf->pages = kzalloc(buf->n_pages * sizeof(struct page *),
106 GFP_KERNEL);
107 if (!buf->pages)
108 goto fail_pages_array_alloc;
109
110 /* current->mm->mmap_sem is taken by videobuf2 core */
111 n_pages = get_user_pages(current, current->mm,
112 vaddr & PAGE_MASK, buf->n_pages,
113 dma_dir == DMA_FROM_DEVICE,
114 1, /* force */
115 buf->pages, NULL);
116 if (n_pages != buf->n_pages)
117 goto fail_get_user_pages;
118
119 buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1,
120 PAGE_KERNEL); 106 PAGE_KERNEL);
121 if (!buf->vaddr)
122 goto fail_get_user_pages;
123 } 107 }
124 108
109 if (!buf->vaddr)
110 goto fail_map;
125 buf->vaddr += offset; 111 buf->vaddr += offset;
126 return buf; 112 return buf;
127 113
128fail_get_user_pages: 114fail_map:
129 pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages, 115 vb2_destroy_framevec(vec);
130 buf->n_pages); 116fail_pfnvec_create:
131 while (--n_pages >= 0)
132 put_page(buf->pages[n_pages]);
133 kfree(buf->pages);
134
135fail_pages_array_alloc:
136 kfree(buf); 117 kfree(buf);
137 118
138 return NULL; 119 return NULL;
@@ -143,20 +124,21 @@ static void vb2_vmalloc_put_userptr(void *buf_priv)
143 struct vb2_vmalloc_buf *buf = buf_priv; 124 struct vb2_vmalloc_buf *buf = buf_priv;
144 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK; 125 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
145 unsigned int i; 126 unsigned int i;
127 struct page **pages;
128 unsigned int n_pages;
146 129
147 if (buf->pages) { 130 if (!buf->vec->is_pfns) {
131 n_pages = frame_vector_count(buf->vec);
132 pages = frame_vector_pages(buf->vec);
148 if (vaddr) 133 if (vaddr)
149 vm_unmap_ram((void *)vaddr, buf->n_pages); 134 vm_unmap_ram((void *)vaddr, n_pages);
150 for (i = 0; i < buf->n_pages; ++i) { 135 if (buf->dma_dir == DMA_FROM_DEVICE)
151 if (buf->dma_dir == DMA_FROM_DEVICE) 136 for (i = 0; i < n_pages; i++)
152 set_page_dirty_lock(buf->pages[i]); 137 set_page_dirty_lock(pages[i]);
153 put_page(buf->pages[i]);
154 }
155 kfree(buf->pages);
156 } else { 138 } else {
157 vb2_put_vma(buf->vma);
158 iounmap((__force void __iomem *)buf->vaddr); 139 iounmap((__force void __iomem *)buf->vaddr);
159 } 140 }
141 vb2_destroy_framevec(buf->vec);
160 kfree(buf); 142 kfree(buf);
161} 143}
162 144
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 4b54128bc78e..a726f01e3b02 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -138,7 +138,7 @@ static void asic3_irq_flip_edge(struct asic3 *asic,
138 spin_unlock_irqrestore(&asic->lock, flags); 138 spin_unlock_irqrestore(&asic->lock, flags);
139} 139}
140 140
141static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) 141static void asic3_irq_demux(struct irq_desc *desc)
142{ 142{
143 struct asic3 *asic = irq_desc_get_handler_data(desc); 143 struct asic3 *asic = irq_desc_get_handler_data(desc);
144 struct irq_data *data = irq_desc_get_irq_data(desc); 144 struct irq_data *data = irq_desc_get_irq_data(desc);
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index a76eb6ef47a0..b279205659a4 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -205,7 +205,7 @@ static void pcap_isr_work(struct work_struct *work)
205 } while (gpio_get_value(pdata->gpio)); 205 } while (gpio_get_value(pdata->gpio));
206} 206}
207 207
208static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc) 208static void pcap_irq_handler(struct irq_desc *desc)
209{ 209{
210 struct pcap_chip *pcap = irq_desc_get_handler_data(desc); 210 struct pcap_chip *pcap = irq_desc_get_handler_data(desc);
211 211
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c
index 9131cdcdc64a..6ccaf90d98fd 100644
--- a/drivers/mfd/htc-egpio.c
+++ b/drivers/mfd/htc-egpio.c
@@ -98,7 +98,7 @@ static struct irq_chip egpio_muxed_chip = {
98 .irq_unmask = egpio_unmask, 98 .irq_unmask = egpio_unmask,
99}; 99};
100 100
101static void egpio_handler(unsigned int irq, struct irq_desc *desc) 101static void egpio_handler(struct irq_desc *desc)
102{ 102{
103 struct egpio_info *ei = irq_desc_get_handler_data(desc); 103 struct egpio_info *ei = irq_desc_get_handler_data(desc);
104 int irqpin; 104 int irqpin;
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 5bb49f08955d..798e44306382 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -65,7 +65,7 @@ struct jz4740_adc {
65 spinlock_t lock; 65 spinlock_t lock;
66}; 66};
67 67
68static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc) 68static void jz4740_adc_irq_demux(struct irq_desc *desc)
69{ 69{
70 struct irq_chip_generic *gc = irq_desc_get_handler_data(desc); 70 struct irq_chip_generic *gc = irq_desc_get_handler_data(desc);
71 uint8_t status; 71 uint8_t status;
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
index 59502d02cd15..1b7ec0870c2a 100644
--- a/drivers/mfd/pm8921-core.c
+++ b/drivers/mfd/pm8921-core.c
@@ -156,7 +156,7 @@ static int pm8xxx_irq_master_handler(struct pm_irq_chip *chip, int master)
156 return ret; 156 return ret;
157} 157}
158 158
159static void pm8xxx_irq_handler(unsigned int irq, struct irq_desc *desc) 159static void pm8xxx_irq_handler(struct irq_desc *desc)
160{ 160{
161 struct pm_irq_chip *chip = irq_desc_get_handler_data(desc); 161 struct pm_irq_chip *chip = irq_desc_get_handler_data(desc);
162 struct irq_chip *irq_chip = irq_desc_get_chip(desc); 162 struct irq_chip *irq_chip = irq_desc_get_chip(desc);
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 16fc1adc4fa3..94bd89cb1f06 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -185,7 +185,7 @@ static struct mfd_cell t7l66xb_cells[] = {
185/*--------------------------------------------------------------------------*/ 185/*--------------------------------------------------------------------------*/
186 186
187/* Handle the T7L66XB interrupt mux */ 187/* Handle the T7L66XB interrupt mux */
188static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc) 188static void t7l66xb_irq(struct irq_desc *desc)
189{ 189{
190 struct t7l66xb *t7l66xb = irq_desc_get_handler_data(desc); 190 struct t7l66xb *t7l66xb = irq_desc_get_handler_data(desc);
191 unsigned int isr; 191 unsigned int isr;
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 775b9aca871a..8c84a513016b 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -522,8 +522,7 @@ static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base)
522 522
523/*--------------------------------------------------------------------------*/ 523/*--------------------------------------------------------------------------*/
524 524
525static void 525static void tc6393xb_irq(struct irq_desc *desc)
526tc6393xb_irq(unsigned int irq, struct irq_desc *desc)
527{ 526{
528 struct tc6393xb *tc6393xb = irq_desc_get_handler_data(desc); 527 struct tc6393xb *tc6393xb = irq_desc_get_handler_data(desc);
529 unsigned int isr; 528 unsigned int isr;
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index 9a2302129711..f691d7ecad52 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -282,7 +282,7 @@ void ucb1x00_adc_disable(struct ucb1x00 *ucb)
282 * SIBCLK to talk to the chip. We leave the clock running until 282 * SIBCLK to talk to the chip. We leave the clock running until
283 * we have finished processing all interrupts from the chip. 283 * we have finished processing all interrupts from the chip.
284 */ 284 */
285static void ucb1x00_irq(unsigned int __irq, struct irq_desc *desc) 285static void ucb1x00_irq(struct irq_desc *desc)
286{ 286{
287 struct ucb1x00 *ucb = irq_desc_get_handler_data(desc); 287 struct ucb1x00 *ucb = irq_desc_get_handler_data(desc);
288 unsigned int isr, i; 288 unsigned int isr, i;
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index 6f484dfe78f9..6982f603fadc 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -1,4 +1,4 @@
1ccflags-y := -Werror 1ccflags-y := -Werror -Wno-unused-const-variable
2 2
3cxl-y += main.o file.o irq.o fault.o native.o 3cxl-y += main.o file.o irq.o fault.o native.o
4cxl-y += context.o sysfs.o debugfs.o pci.o trace.o 4cxl-y += context.o sysfs.o debugfs.o pci.o trace.o
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 02c85160bfe9..a5e977192b61 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1249,8 +1249,6 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
1249 int slice; 1249 int slice;
1250 int rc; 1250 int rc;
1251 1251
1252 pci_dev_get(dev);
1253
1254 if (cxl_verbose) 1252 if (cxl_verbose)
1255 dump_cxl_config_space(dev); 1253 dump_cxl_config_space(dev);
1256 1254
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 25868c2ec03e..02006f7109a8 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -592,6 +592,8 @@ int cxl_sysfs_afu_add(struct cxl_afu *afu)
592 592
593 /* conditionally create the add the binary file for error info buffer */ 593 /* conditionally create the add the binary file for error info buffer */
594 if (afu->eb_len) { 594 if (afu->eb_len) {
595 sysfs_attr_init(&afu->attr_eb.attr);
596
595 afu->attr_eb.attr.name = "afu_err_buff"; 597 afu->attr_eb.attr.name = "afu_err_buff";
596 afu->attr_eb.attr.mode = S_IRUGO; 598 afu->attr_eb.attr.mode = S_IRUGO;
597 afu->attr_eb.size = afu->eb_len; 599 afu->attr_eb.size = afu->eb_len;
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index 6dd16a6d153f..94b520896b18 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -48,6 +48,12 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
48 48
49 phb = pci_bus_to_host(dev->bus); 49 phb = pci_bus_to_host(dev->bus);
50 afu = (struct cxl_afu *)phb->private_data; 50 afu = (struct cxl_afu *)phb->private_data;
51
52 if (!cxl_adapter_link_ok(afu->adapter)) {
53 dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
54 return false;
55 }
56
51 set_dma_ops(&dev->dev, &dma_direct_ops); 57 set_dma_ops(&dev->dev, &dma_direct_ops);
52 set_dma_offset(&dev->dev, PAGE_OFFSET); 58 set_dma_offset(&dev->dev, PAGE_OFFSET);
53 59
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 2bc0f5089f82..b346638833b0 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -364,6 +364,7 @@ int mei_watchdog_register(struct mei_device *dev)
364 364
365 int ret; 365 int ret;
366 366
367 amt_wd_dev.parent = dev->dev;
367 /* unlock to perserve correct locking order */ 368 /* unlock to perserve correct locking order */
368 mutex_unlock(&dev->device_lock); 369 mutex_unlock(&dev->device_lock);
369 ret = watchdog_register_device(&amt_wd_dev); 370 ret = watchdog_register_device(&amt_wd_dev);
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 2158e985a0e7..e41dd36fe832 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -302,7 +302,7 @@ static void arcdev_setup(struct net_device *dev)
302 dev->type = ARPHRD_ARCNET; 302 dev->type = ARPHRD_ARCNET;
303 dev->netdev_ops = &arcnet_netdev_ops; 303 dev->netdev_ops = &arcnet_netdev_ops;
304 dev->header_ops = &arcnet_header_ops; 304 dev->header_ops = &arcnet_header_ops;
305 dev->hard_header_len = sizeof(struct archdr); 305 dev->hard_header_len = sizeof(struct arc_hardware);
306 dev->mtu = choose_mtu(); 306 dev->mtu = choose_mtu();
307 307
308 dev->addr_len = ARCNET_ALEN; 308 dev->addr_len = ARCNET_ALEN;
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 30227ca2d5db..8e9d172543a0 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2074,6 +2074,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2074 */ 2074 */
2075 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL); 2075 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
2076 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { 2076 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
2077 reg &= ~PORT_PCS_CTRL_UNFORCED;
2077 reg |= PORT_PCS_CTRL_FORCE_LINK | 2078 reg |= PORT_PCS_CTRL_FORCE_LINK |
2078 PORT_PCS_CTRL_LINK_UP | 2079 PORT_PCS_CTRL_LINK_UP |
2079 PORT_PCS_CTRL_DUPLEX_FULL | 2080 PORT_PCS_CTRL_DUPLEX_FULL |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 21749f01827d..652f21889a48 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -690,16 +690,24 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
690 netdev_dbg(ndev, "No phy-handle found in DT\n"); 690 netdev_dbg(ndev, "No phy-handle found in DT\n");
691 return -ENODEV; 691 return -ENODEV;
692 } 692 }
693 pdata->phy_dev = of_phy_find_device(phy_np);
694 }
695 693
696 phy_dev = pdata->phy_dev; 694 phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
695 0, pdata->phy_mode);
696 if (!phy_dev) {
697 netdev_err(ndev, "Could not connect to PHY\n");
698 return -ENODEV;
699 }
700
701 pdata->phy_dev = phy_dev;
702 } else {
703 phy_dev = pdata->phy_dev;
697 704
698 if (!phy_dev || 705 if (!phy_dev ||
699 phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link, 706 phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
700 pdata->phy_mode)) { 707 pdata->phy_mode)) {
701 netdev_err(ndev, "Could not connect to PHY\n"); 708 netdev_err(ndev, "Could not connect to PHY\n");
702 return -ENODEV; 709 return -ENODEV;
710 }
703 } 711 }
704 712
705 pdata->phy_speed = SPEED_UNKNOWN; 713 pdata->phy_speed = SPEED_UNKNOWN;
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
index f9cb99bfb511..ffd180570920 100644
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ b/drivers/net/ethernet/arc/emac_arc.c
@@ -78,6 +78,7 @@ static const struct of_device_id emac_arc_dt_ids[] = {
78 { .compatible = "snps,arc-emac" }, 78 { .compatible = "snps,arc-emac" },
79 { /* Sentinel */ } 79 { /* Sentinel */ }
80}; 80};
81MODULE_DEVICE_TABLE(of, emac_arc_dt_ids);
81 82
82static struct platform_driver emac_arc_driver = { 83static struct platform_driver emac_arc_driver = {
83 .probe = emac_arc_probe, 84 .probe = emac_arc_probe,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index b9a5a97ed4dd..f1b5364f3521 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2079,6 +2079,7 @@ static const struct of_device_id bcm_sysport_of_match[] = {
2079 { .compatible = "brcm,systemport" }, 2079 { .compatible = "brcm,systemport" },
2080 { /* sentinel */ } 2080 { /* sentinel */ }
2081}; 2081};
2082MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
2082 2083
2083static struct platform_driver bcm_sysport_driver = { 2084static struct platform_driver bcm_sysport_driver = {
2084 .probe = bcm_sysport_probe, 2085 .probe = bcm_sysport_probe,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index ba936635322a..b5e64b02200c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1946,6 +1946,7 @@ struct bnx2x {
1946 u16 vlan_cnt; 1946 u16 vlan_cnt;
1947 u16 vlan_credit; 1947 u16 vlan_credit;
1948 u16 vxlan_dst_port; 1948 u16 vxlan_dst_port;
1949 u8 vxlan_dst_port_count;
1949 bool accept_any_vlan; 1950 bool accept_any_vlan;
1950}; 1951};
1951 1952
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e3da2bddf143..f1d62d5dbaff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -3705,16 +3705,14 @@ out:
3705 3705
3706void bnx2x_update_mfw_dump(struct bnx2x *bp) 3706void bnx2x_update_mfw_dump(struct bnx2x *bp)
3707{ 3707{
3708 struct timeval epoc;
3709 u32 drv_ver; 3708 u32 drv_ver;
3710 u32 valid_dump; 3709 u32 valid_dump;
3711 3710
3712 if (!SHMEM2_HAS(bp, drv_info)) 3711 if (!SHMEM2_HAS(bp, drv_info))
3713 return; 3712 return;
3714 3713
3715 /* Update Driver load time */ 3714 /* Update Driver load time, possibly broken in y2038 */
3716 do_gettimeofday(&epoc); 3715 SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
3717 SHMEM2_WR(bp, drv_info.epoc, epoc.tv_sec);
3718 3716
3719 drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); 3717 drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3720 SHMEM2_WR(bp, drv_info.drv_ver, drv_ver); 3718 SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
@@ -10110,12 +10108,18 @@ static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port)
10110 if (!netif_running(bp->dev)) 10108 if (!netif_running(bp->dev))
10111 return; 10109 return;
10112 10110
10113 if (bp->vxlan_dst_port || !IS_PF(bp)) { 10111 if (bp->vxlan_dst_port_count && bp->vxlan_dst_port == port) {
10112 bp->vxlan_dst_port_count++;
10113 return;
10114 }
10115
10116 if (bp->vxlan_dst_port_count || !IS_PF(bp)) {
10114 DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n"); 10117 DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n");
10115 return; 10118 return;
10116 } 10119 }
10117 10120
10118 bp->vxlan_dst_port = port; 10121 bp->vxlan_dst_port = port;
10122 bp->vxlan_dst_port_count = 1;
10119 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0); 10123 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0);
10120} 10124}
10121 10125
@@ -10130,10 +10134,14 @@ static void bnx2x_add_vxlan_port(struct net_device *netdev,
10130 10134
10131static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port) 10135static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
10132{ 10136{
10133 if (!bp->vxlan_dst_port || bp->vxlan_dst_port != port || !IS_PF(bp)) { 10137 if (!bp->vxlan_dst_port_count || bp->vxlan_dst_port != port ||
10138 !IS_PF(bp)) {
10134 DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); 10139 DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
10135 return; 10140 return;
10136 } 10141 }
10142 bp->vxlan_dst_port--;
10143 if (bp->vxlan_dst_port)
10144 return;
10137 10145
10138 if (netif_running(bp->dev)) { 10146 if (netif_running(bp->dev)) {
10139 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0); 10147 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index c9bd7f16018e..ff702a707a91 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4319,8 +4319,16 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
4319 4319
4320 /* RSS keys */ 4320 /* RSS keys */
4321 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { 4321 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4322 memcpy(&data->rss_key[0], &p->rss_key[0], 4322 u8 *dst = (u8 *)(data->rss_key) + sizeof(data->rss_key);
4323 sizeof(data->rss_key)); 4323 const u8 *src = (const u8 *)p->rss_key;
4324 int i;
4325
4326 /* Apparently, bnx2x reads this array in reverse order
4327 * We need to byte swap rss_key to comply with Toeplitz specs.
4328 */
4329 for (i = 0; i < sizeof(data->rss_key); i++)
4330 *--dst = *src++;
4331
4324 caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; 4332 caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4325 } 4333 }
4326 4334
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index efcb1119076f..1a3988f51305 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3304,6 +3304,7 @@ static const struct of_device_id bcmgenet_match[] = {
3304 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, 3304 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
3305 { }, 3305 { },
3306}; 3306};
3307MODULE_DEVICE_TABLE(of, bcmgenet_match);
3307 3308
3308static int bcmgenet_probe(struct platform_device *pdev) 3309static int bcmgenet_probe(struct platform_device *pdev)
3309{ 3310{
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 5d0753cc7e73..04b0d16b210e 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -2400,6 +2400,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2400 q0->rcb->id = 0; 2400 q0->rcb->id = 0;
2401 q0->rx_packets = q0->rx_bytes = 0; 2401 q0->rx_packets = q0->rx_bytes = 0;
2402 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; 2402 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2403 q0->rxbuf_map_failed = 0;
2403 2404
2404 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE, 2405 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2405 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]); 2406 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
@@ -2428,6 +2429,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2428 : rx_cfg->q1_buf_size; 2429 : rx_cfg->q1_buf_size;
2429 q1->rx_packets = q1->rx_bytes = 0; 2430 q1->rx_packets = q1->rx_bytes = 0;
2430 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; 2431 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2432 q1->rxbuf_map_failed = 0;
2431 2433
2432 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE, 2434 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2433 &hqpt_mem[i], &hsqpt_mem[i], 2435 &hqpt_mem[i], &hsqpt_mem[i],
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index e0e797f2ea14..c438d032e8bf 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -587,6 +587,7 @@ struct bna_rxq {
587 u64 rx_bytes; 587 u64 rx_bytes;
588 u64 rx_packets_with_error; 588 u64 rx_packets_with_error;
589 u64 rxbuf_alloc_failed; 589 u64 rxbuf_alloc_failed;
590 u64 rxbuf_map_failed;
590}; 591};
591 592
592/* RxQ pair */ 593/* RxQ pair */
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 506047c38607..21a0cfc3e7ec 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -399,7 +399,13 @@ bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
399 } 399 }
400 400
401 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, 401 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
402 unmap_q->map_size, DMA_FROM_DEVICE); 402 unmap_q->map_size, DMA_FROM_DEVICE);
403 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
404 put_page(page);
405 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
406 rcb->rxq->rxbuf_map_failed++;
407 goto finishing;
408 }
403 409
404 unmap->page = page; 410 unmap->page = page;
405 unmap->page_offset = page_offset; 411 unmap->page_offset = page_offset;
@@ -454,8 +460,15 @@ bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
454 rcb->rxq->rxbuf_alloc_failed++; 460 rcb->rxq->rxbuf_alloc_failed++;
455 goto finishing; 461 goto finishing;
456 } 462 }
463
457 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, 464 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
458 buff_sz, DMA_FROM_DEVICE); 465 buff_sz, DMA_FROM_DEVICE);
466 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
467 dev_kfree_skb_any(skb);
468 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
469 rcb->rxq->rxbuf_map_failed++;
470 goto finishing;
471 }
459 472
460 unmap->skb = skb; 473 unmap->skb = skb;
461 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); 474 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
@@ -3025,6 +3038,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3025 unmap = head_unmap; 3038 unmap = head_unmap;
3026 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, 3039 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3027 len, DMA_TO_DEVICE); 3040 len, DMA_TO_DEVICE);
3041 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3042 dev_kfree_skb_any(skb);
3043 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3044 return NETDEV_TX_OK;
3045 }
3028 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); 3046 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3029 txqent->vector[0].length = htons(len); 3047 txqent->vector[0].length = htons(len);
3030 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr); 3048 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
@@ -3056,6 +3074,15 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3056 3074
3057 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, 3075 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3058 0, size, DMA_TO_DEVICE); 3076 0, size, DMA_TO_DEVICE);
3077 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3078 /* Undo the changes starting at tcb->producer_index */
3079 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3080 tcb->producer_index);
3081 dev_kfree_skb_any(skb);
3082 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3083 return NETDEV_TX_OK;
3084 }
3085
3059 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size); 3086 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3060 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); 3087 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3061 txqent->vector[vect_id].length = htons(size); 3088 txqent->vector[vect_id].length = htons(size);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index faedbf24777e..f4ed816b93ee 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -175,6 +175,7 @@ struct bnad_drv_stats {
175 u64 tx_skb_headlen_zero; 175 u64 tx_skb_headlen_zero;
176 u64 tx_skb_frag_zero; 176 u64 tx_skb_frag_zero;
177 u64 tx_skb_len_mismatch; 177 u64 tx_skb_len_mismatch;
178 u64 tx_skb_map_failed;
178 179
179 u64 hw_stats_updates; 180 u64 hw_stats_updates;
180 u64 netif_rx_dropped; 181 u64 netif_rx_dropped;
@@ -189,6 +190,7 @@ struct bnad_drv_stats {
189 u64 rx_unmap_q_alloc_failed; 190 u64 rx_unmap_q_alloc_failed;
190 191
191 u64 rxbuf_alloc_failed; 192 u64 rxbuf_alloc_failed;
193 u64 rxbuf_map_failed;
192}; 194};
193 195
194/* Complete driver stats */ 196/* Complete driver stats */
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 2bdfc5dff4b1..0e4fdc3dd729 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -90,6 +90,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
90 "tx_skb_headlen_zero", 90 "tx_skb_headlen_zero",
91 "tx_skb_frag_zero", 91 "tx_skb_frag_zero",
92 "tx_skb_len_mismatch", 92 "tx_skb_len_mismatch",
93 "tx_skb_map_failed",
93 "hw_stats_updates", 94 "hw_stats_updates",
94 "netif_rx_dropped", 95 "netif_rx_dropped",
95 96
@@ -102,6 +103,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
102 "tx_unmap_q_alloc_failed", 103 "tx_unmap_q_alloc_failed",
103 "rx_unmap_q_alloc_failed", 104 "rx_unmap_q_alloc_failed",
104 "rxbuf_alloc_failed", 105 "rxbuf_alloc_failed",
106 "rxbuf_map_failed",
105 107
106 "mac_stats_clr_cnt", 108 "mac_stats_clr_cnt",
107 "mac_frame_64", 109 "mac_frame_64",
@@ -807,6 +809,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
807 rx_packets_with_error; 809 rx_packets_with_error;
808 buf[bi++] = rcb->rxq-> 810 buf[bi++] = rcb->rxq->
809 rxbuf_alloc_failed; 811 rxbuf_alloc_failed;
812 buf[bi++] = rcb->rxq->rxbuf_map_failed;
810 buf[bi++] = rcb->producer_index; 813 buf[bi++] = rcb->producer_index;
811 buf[bi++] = rcb->consumer_index; 814 buf[bi++] = rcb->consumer_index;
812 } 815 }
@@ -821,6 +824,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
821 rx_packets_with_error; 824 rx_packets_with_error;
822 buf[bi++] = rcb->rxq-> 825 buf[bi++] = rcb->rxq->
823 rxbuf_alloc_failed; 826 rxbuf_alloc_failed;
827 buf[bi++] = rcb->rxq->rxbuf_map_failed;
824 buf[bi++] = rcb->producer_index; 828 buf[bi++] = rcb->producer_index;
825 buf[bi++] = rcb->consumer_index; 829 buf[bi++] = rcb->consumer_index;
826 } 830 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 8353a6cbfcc2..03ed00c49823 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -157,6 +157,11 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
157 CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */ 157 CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
158 CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */ 158 CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */
159 CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */ 159 CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */
160 CH_PCI_ID_TABLE_FENTRY(0x5093), /* Custom T580-LP-CR */
161 CH_PCI_ID_TABLE_FENTRY(0x5094), /* Custom T540-CR */
162 CH_PCI_ID_TABLE_FENTRY(0x5095), /* Custom T540-CR-SO */
163 CH_PCI_ID_TABLE_FENTRY(0x5096), /* Custom T580-CR */
164 CH_PCI_ID_TABLE_FENTRY(0x5097), /* Custom T520-KR */
160 165
161 /* T6 adapters: 166 /* T6 adapters:
162 */ 167 */
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 0a27805cbbbd..821540913343 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -582,6 +582,7 @@ struct be_adapter {
582 u16 pvid; 582 u16 pvid;
583 __be16 vxlan_port; 583 __be16 vxlan_port;
584 int vxlan_port_count; 584 int vxlan_port_count;
585 int vxlan_port_aliases;
585 struct phy_info phy; 586 struct phy_info phy;
586 u8 wol_cap; 587 u8 wol_cap;
587 bool wol_en; 588 bool wol_en;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 12687bf52b95..7bf51a1a0a77 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5176,6 +5176,11 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5176 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) 5176 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5177 return; 5177 return;
5178 5178
5179 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
5180 adapter->vxlan_port_aliases++;
5181 return;
5182 }
5183
5179 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { 5184 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
5180 dev_info(dev, 5185 dev_info(dev,
5181 "Only one UDP port supported for VxLAN offloads\n"); 5186 "Only one UDP port supported for VxLAN offloads\n");
@@ -5226,6 +5231,11 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5226 if (adapter->vxlan_port != port) 5231 if (adapter->vxlan_port != port)
5227 goto done; 5232 goto done;
5228 5233
5234 if (adapter->vxlan_port_aliases) {
5235 adapter->vxlan_port_aliases--;
5236 return;
5237 }
5238
5229 be_disable_vxlan_offloads(adapter); 5239 be_disable_vxlan_offloads(adapter);
5230 5240
5231 dev_info(&adapter->pdev->dev, 5241 dev_info(&adapter->pdev->dev,
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4b69d061d90f..710715fcb23d 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1710,8 +1710,10 @@ static void gfar_configure_serdes(struct net_device *dev)
1710 * everything for us? Resetting it takes the link down and requires 1710 * everything for us? Resetting it takes the link down and requires
1711 * several seconds for it to come back. 1711 * several seconds for it to come back.
1712 */ 1712 */
1713 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) 1713 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
1714 put_device(&tbiphy->dev);
1714 return; 1715 return;
1716 }
1715 1717
1716 /* Single clk mode, mii mode off(for serdes communication) */ 1718 /* Single clk mode, mii mode off(for serdes communication) */
1717 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); 1719 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
@@ -1723,6 +1725,8 @@ static void gfar_configure_serdes(struct net_device *dev)
1723 phy_write(tbiphy, MII_BMCR, 1725 phy_write(tbiphy, MII_BMCR,
1724 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | 1726 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1725 BMCR_SPEED1000); 1727 BMCR_SPEED1000);
1728
1729 put_device(&tbiphy->dev);
1726} 1730}
1727 1731
1728static int __gfar_is_rx_idle(struct gfar_private *priv) 1732static int __gfar_is_rx_idle(struct gfar_private *priv)
@@ -1970,8 +1974,7 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
1970 /* Install our interrupt handlers for Error, 1974 /* Install our interrupt handlers for Error,
1971 * Transmit, and Receive 1975 * Transmit, and Receive
1972 */ 1976 */
1973 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 1977 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
1974 IRQF_NO_SUSPEND,
1975 gfar_irq(grp, ER)->name, grp); 1978 gfar_irq(grp, ER)->name, grp);
1976 if (err < 0) { 1979 if (err < 0) {
1977 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1980 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -1979,6 +1982,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
1979 1982
1980 goto err_irq_fail; 1983 goto err_irq_fail;
1981 } 1984 }
1985 enable_irq_wake(gfar_irq(grp, ER)->irq);
1986
1982 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, 1987 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
1983 gfar_irq(grp, TX)->name, grp); 1988 gfar_irq(grp, TX)->name, grp);
1984 if (err < 0) { 1989 if (err < 0) {
@@ -1994,14 +1999,14 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
1994 goto rx_irq_fail; 1999 goto rx_irq_fail;
1995 } 2000 }
1996 } else { 2001 } else {
1997 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 2002 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
1998 IRQF_NO_SUSPEND,
1999 gfar_irq(grp, TX)->name, grp); 2003 gfar_irq(grp, TX)->name, grp);
2000 if (err < 0) { 2004 if (err < 0) {
2001 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2005 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2002 gfar_irq(grp, TX)->irq); 2006 gfar_irq(grp, TX)->irq);
2003 goto err_irq_fail; 2007 goto err_irq_fail;
2004 } 2008 }
2009 enable_irq_wake(gfar_irq(grp, TX)->irq);
2005 } 2010 }
2006 2011
2007 return 0; 2012 return 0;
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 8e3cd77aa347..664d0c261269 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -557,6 +557,7 @@ static const struct of_device_id match_table[] = {
557 { .compatible = "fsl,etsec-ptp" }, 557 { .compatible = "fsl,etsec-ptp" },
558 {}, 558 {},
559}; 559};
560MODULE_DEVICE_TABLE(of, match_table);
560 561
561static struct platform_driver gianfar_ptp_driver = { 562static struct platform_driver gianfar_ptp_driver = {
562 .driver = { 563 .driver = {
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 4dd40e057f40..650f7888e32b 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1384,6 +1384,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1384 value = phy_read(tbiphy, ENET_TBI_MII_CR); 1384 value = phy_read(tbiphy, ENET_TBI_MII_CR);
1385 value &= ~0x1000; /* Turn off autonegotiation */ 1385 value &= ~0x1000; /* Turn off autonegotiation */
1386 phy_write(tbiphy, ENET_TBI_MII_CR, value); 1386 phy_write(tbiphy, ENET_TBI_MII_CR, value);
1387
1388 put_device(&tbiphy->dev);
1387 } 1389 }
1388 1390
1389 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); 1391 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
@@ -1702,8 +1704,10 @@ static void uec_configure_serdes(struct net_device *dev)
1702 * everything for us? Resetting it takes the link down and requires 1704 * everything for us? Resetting it takes the link down and requires
1703 * several seconds for it to come back. 1705 * several seconds for it to come back.
1704 */ 1706 */
1705 if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) 1707 if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) {
1708 put_device(&tbiphy->dev);
1706 return; 1709 return;
1710 }
1707 1711
1708 /* Single clk mode, mii mode off(for serdes communication) */ 1712 /* Single clk mode, mii mode off(for serdes communication) */
1709 phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS); 1713 phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
@@ -1711,6 +1715,8 @@ static void uec_configure_serdes(struct net_device *dev)
1711 phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); 1715 phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
1712 1716
1713 phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); 1717 phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
1718
1719 put_device(&tbiphy->dev);
1714} 1720}
1715 1721
1716/* Configure the PHY for dev. 1722/* Configure the PHY for dev.
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index fe2299ac4f5c..514df76fc70f 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1479,6 +1479,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1479 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); 1479 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1480 struct sk_buff *skb; 1480 struct sk_buff *skb;
1481 unsigned char *data; 1481 unsigned char *data;
1482 dma_addr_t phys_addr;
1482 u32 rx_status; 1483 u32 rx_status;
1483 int rx_bytes, err; 1484 int rx_bytes, err;
1484 1485
@@ -1486,6 +1487,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1486 rx_status = rx_desc->status; 1487 rx_status = rx_desc->status;
1487 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); 1488 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1488 data = (unsigned char *)rx_desc->buf_cookie; 1489 data = (unsigned char *)rx_desc->buf_cookie;
1490 phys_addr = rx_desc->buf_phys_addr;
1489 1491
1490 if (!mvneta_rxq_desc_is_first_last(rx_status) || 1492 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
1491 (rx_status & MVNETA_RXD_ERR_SUMMARY)) { 1493 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
@@ -1534,7 +1536,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1534 if (!skb) 1536 if (!skb)
1535 goto err_drop_frame; 1537 goto err_drop_frame;
1536 1538
1537 dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr, 1539 dma_unmap_single(dev->dev.parent, phys_addr,
1538 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); 1540 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1539 1541
1540 rcvd_pkts++; 1542 rcvd_pkts++;
@@ -3173,6 +3175,8 @@ static int mvneta_probe(struct platform_device *pdev)
3173 struct phy_device *phy = of_phy_find_device(dn); 3175 struct phy_device *phy = of_phy_find_device(dn);
3174 3176
3175 mvneta_fixed_link_update(pp, phy); 3177 mvneta_fixed_link_update(pp, phy);
3178
3179 put_device(&phy->dev);
3176 } 3180 }
3177 3181
3178 return 0; 3182 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 4402a1e48c9b..e7a5000aa12c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1047,13 +1047,15 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
1047 1047
1048 /* If we used up all the quota - we're probably not done yet... */ 1048 /* If we used up all the quota - we're probably not done yet... */
1049 if (done == budget) { 1049 if (done == budget) {
1050 int cpu_curr;
1051 const struct cpumask *aff; 1050 const struct cpumask *aff;
1051 struct irq_data *idata;
1052 int cpu_curr;
1052 1053
1053 INC_PERF_COUNTER(priv->pstats.napi_quota); 1054 INC_PERF_COUNTER(priv->pstats.napi_quota);
1054 1055
1055 cpu_curr = smp_processor_id(); 1056 cpu_curr = smp_processor_id();
1056 aff = irq_desc_get_irq_data(cq->irq_desc)->affinity; 1057 idata = irq_desc_get_irq_data(cq->irq_desc);
1058 aff = irq_data_get_affinity_mask(idata);
1057 1059
1058 if (likely(cpumask_test_cpu(cpu_curr, aff))) 1060 if (likely(cpumask_test_cpu(cpu_curr, aff)))
1059 return budget; 1061 return budget;
@@ -1268,8 +1270,6 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1268 rss_context->hash_fn = MLX4_RSS_HASH_TOP; 1270 rss_context->hash_fn = MLX4_RSS_HASH_TOP;
1269 memcpy(rss_context->rss_key, priv->rss_key, 1271 memcpy(rss_context->rss_key, priv->rss_key,
1270 MLX4_EN_RSS_KEY_SIZE); 1272 MLX4_EN_RSS_KEY_SIZE);
1271 netdev_rss_key_fill(rss_context->rss_key,
1272 MLX4_EN_RSS_KEY_SIZE);
1273 } else { 1273 } else {
1274 en_err(priv, "Unknown RSS hash function requested\n"); 1274 en_err(priv, "Unknown RSS hash function requested\n");
1275 err = -EINVAL; 1275 err = -EINVAL;
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 66d4ab703f45..60f43ec22175 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -1601,6 +1601,7 @@ static const struct of_device_id ks8851_match_table[] = {
1601 { .compatible = "micrel,ks8851" }, 1601 { .compatible = "micrel,ks8851" },
1602 { } 1602 { }
1603}; 1603};
1604MODULE_DEVICE_TABLE(of, ks8851_match_table);
1604 1605
1605static struct spi_driver ks8851_driver = { 1606static struct spi_driver ks8851_driver = {
1606 .driver = { 1607 .driver = {
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index becbb5f1f5a7..a10c928bbd6b 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -552,6 +552,7 @@ static const struct of_device_id moxart_mac_match[] = {
552 { .compatible = "moxa,moxart-mac" }, 552 { .compatible = "moxa,moxart-mac" },
553 { } 553 { }
554}; 554};
555MODULE_DEVICE_TABLE(of, moxart_mac_match);
555 556
556static struct platform_driver moxart_mac_driver = { 557static struct platform_driver moxart_mac_driver = {
557 .probe = moxart_mac_probe, 558 .probe = moxart_mac_probe,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 06bcc734fe8d..d6696cfa11d2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -536,6 +536,7 @@ struct qlcnic_hardware_context {
536 u8 extend_lb_time; 536 u8 extend_lb_time;
537 u8 phys_port_id[ETH_ALEN]; 537 u8 phys_port_id[ETH_ALEN];
538 u8 lb_mode; 538 u8 lb_mode;
539 u8 vxlan_port_count;
539 u16 vxlan_port; 540 u16 vxlan_port;
540 struct device *hwmon_dev; 541 struct device *hwmon_dev;
541 u32 post_mode; 542 u32 post_mode;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 8b08b20e8b30..d4481454b5f8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -483,11 +483,17 @@ static void qlcnic_add_vxlan_port(struct net_device *netdev,
483 /* Adapter supports only one VXLAN port. Use very first port 483 /* Adapter supports only one VXLAN port. Use very first port
484 * for enabling offload 484 * for enabling offload
485 */ 485 */
486 if (!qlcnic_encap_rx_offload(adapter) || ahw->vxlan_port) 486 if (!qlcnic_encap_rx_offload(adapter))
487 return; 487 return;
488 if (!ahw->vxlan_port_count) {
489 ahw->vxlan_port_count = 1;
490 ahw->vxlan_port = ntohs(port);
491 adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
492 return;
493 }
494 if (ahw->vxlan_port == ntohs(port))
495 ahw->vxlan_port_count++;
488 496
489 ahw->vxlan_port = ntohs(port);
490 adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
491} 497}
492 498
493static void qlcnic_del_vxlan_port(struct net_device *netdev, 499static void qlcnic_del_vxlan_port(struct net_device *netdev,
@@ -496,11 +502,13 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev,
496 struct qlcnic_adapter *adapter = netdev_priv(netdev); 502 struct qlcnic_adapter *adapter = netdev_priv(netdev);
497 struct qlcnic_hardware_context *ahw = adapter->ahw; 503 struct qlcnic_hardware_context *ahw = adapter->ahw;
498 504
499 if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port || 505 if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count ||
500 (ahw->vxlan_port != ntohs(port))) 506 (ahw->vxlan_port != ntohs(port)))
501 return; 507 return;
502 508
503 adapter->flags |= QLCNIC_DEL_VXLAN_PORT; 509 ahw->vxlan_port_count--;
510 if (!ahw->vxlan_port_count)
511 adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
504} 512}
505 513
506static netdev_features_t qlcnic_features_check(struct sk_buff *skb, 514static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index d79e33b3c191..686334f4588d 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -157,6 +157,7 @@ enum {
157 NWayAdvert = 0x66, /* MII ADVERTISE */ 157 NWayAdvert = 0x66, /* MII ADVERTISE */
158 NWayLPAR = 0x68, /* MII LPA */ 158 NWayLPAR = 0x68, /* MII LPA */
159 NWayExpansion = 0x6A, /* MII Expansion */ 159 NWayExpansion = 0x6A, /* MII Expansion */
160 TxDmaOkLowDesc = 0x82, /* Low 16 bit address of a Tx descriptor. */
160 Config5 = 0xD8, /* Config5 */ 161 Config5 = 0xD8, /* Config5 */
161 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */ 162 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
162 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */ 163 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
@@ -341,6 +342,7 @@ struct cp_private {
341 unsigned tx_tail; 342 unsigned tx_tail;
342 struct cp_desc *tx_ring; 343 struct cp_desc *tx_ring;
343 struct sk_buff *tx_skb[CP_TX_RING_SIZE]; 344 struct sk_buff *tx_skb[CP_TX_RING_SIZE];
345 u32 tx_opts[CP_TX_RING_SIZE];
344 346
345 unsigned rx_buf_sz; 347 unsigned rx_buf_sz;
346 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */ 348 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
@@ -665,7 +667,7 @@ static void cp_tx (struct cp_private *cp)
665 BUG_ON(!skb); 667 BUG_ON(!skb);
666 668
667 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), 669 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
668 le32_to_cpu(txd->opts1) & 0xffff, 670 cp->tx_opts[tx_tail] & 0xffff,
669 PCI_DMA_TODEVICE); 671 PCI_DMA_TODEVICE);
670 672
671 if (status & LastFrag) { 673 if (status & LastFrag) {
@@ -733,7 +735,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
733{ 735{
734 struct cp_private *cp = netdev_priv(dev); 736 struct cp_private *cp = netdev_priv(dev);
735 unsigned entry; 737 unsigned entry;
736 u32 eor, flags; 738 u32 eor, opts1;
737 unsigned long intr_flags; 739 unsigned long intr_flags;
738 __le32 opts2; 740 __le32 opts2;
739 int mss = 0; 741 int mss = 0;
@@ -753,6 +755,21 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
753 mss = skb_shinfo(skb)->gso_size; 755 mss = skb_shinfo(skb)->gso_size;
754 756
755 opts2 = cpu_to_le32(cp_tx_vlan_tag(skb)); 757 opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
758 opts1 = DescOwn;
759 if (mss)
760 opts1 |= LargeSend | ((mss & MSSMask) << MSSShift);
761 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
762 const struct iphdr *ip = ip_hdr(skb);
763 if (ip->protocol == IPPROTO_TCP)
764 opts1 |= IPCS | TCPCS;
765 else if (ip->protocol == IPPROTO_UDP)
766 opts1 |= IPCS | UDPCS;
767 else {
768 WARN_ONCE(1,
769 "Net bug: asked to checksum invalid Legacy IP packet\n");
770 goto out_dma_error;
771 }
772 }
756 773
757 if (skb_shinfo(skb)->nr_frags == 0) { 774 if (skb_shinfo(skb)->nr_frags == 0) {
758 struct cp_desc *txd = &cp->tx_ring[entry]; 775 struct cp_desc *txd = &cp->tx_ring[entry];
@@ -768,31 +785,20 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
768 txd->addr = cpu_to_le64(mapping); 785 txd->addr = cpu_to_le64(mapping);
769 wmb(); 786 wmb();
770 787
771 flags = eor | len | DescOwn | FirstFrag | LastFrag; 788 opts1 |= eor | len | FirstFrag | LastFrag;
772
773 if (mss)
774 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
775 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
776 const struct iphdr *ip = ip_hdr(skb);
777 if (ip->protocol == IPPROTO_TCP)
778 flags |= IPCS | TCPCS;
779 else if (ip->protocol == IPPROTO_UDP)
780 flags |= IPCS | UDPCS;
781 else
782 WARN_ON(1); /* we need a WARN() */
783 }
784 789
785 txd->opts1 = cpu_to_le32(flags); 790 txd->opts1 = cpu_to_le32(opts1);
786 wmb(); 791 wmb();
787 792
788 cp->tx_skb[entry] = skb; 793 cp->tx_skb[entry] = skb;
789 entry = NEXT_TX(entry); 794 cp->tx_opts[entry] = opts1;
795 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
796 entry, skb->len);
790 } else { 797 } else {
791 struct cp_desc *txd; 798 struct cp_desc *txd;
792 u32 first_len, first_eor; 799 u32 first_len, first_eor, ctrl;
793 dma_addr_t first_mapping; 800 dma_addr_t first_mapping;
794 int frag, first_entry = entry; 801 int frag, first_entry = entry;
795 const struct iphdr *ip = ip_hdr(skb);
796 802
797 /* We must give this initial chunk to the device last. 803 /* We must give this initial chunk to the device last.
798 * Otherwise we could race with the device. 804 * Otherwise we could race with the device.
@@ -805,14 +811,14 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
805 goto out_dma_error; 811 goto out_dma_error;
806 812
807 cp->tx_skb[entry] = skb; 813 cp->tx_skb[entry] = skb;
808 entry = NEXT_TX(entry);
809 814
810 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 815 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
811 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 816 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
812 u32 len; 817 u32 len;
813 u32 ctrl;
814 dma_addr_t mapping; 818 dma_addr_t mapping;
815 819
820 entry = NEXT_TX(entry);
821
816 len = skb_frag_size(this_frag); 822 len = skb_frag_size(this_frag);
817 mapping = dma_map_single(&cp->pdev->dev, 823 mapping = dma_map_single(&cp->pdev->dev,
818 skb_frag_address(this_frag), 824 skb_frag_address(this_frag),
@@ -824,19 +830,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
824 830
825 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; 831 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
826 832
827 ctrl = eor | len | DescOwn; 833 ctrl = opts1 | eor | len;
828
829 if (mss)
830 ctrl |= LargeSend |
831 ((mss & MSSMask) << MSSShift);
832 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
833 if (ip->protocol == IPPROTO_TCP)
834 ctrl |= IPCS | TCPCS;
835 else if (ip->protocol == IPPROTO_UDP)
836 ctrl |= IPCS | UDPCS;
837 else
838 BUG();
839 }
840 834
841 if (frag == skb_shinfo(skb)->nr_frags - 1) 835 if (frag == skb_shinfo(skb)->nr_frags - 1)
842 ctrl |= LastFrag; 836 ctrl |= LastFrag;
@@ -849,8 +843,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
849 txd->opts1 = cpu_to_le32(ctrl); 843 txd->opts1 = cpu_to_le32(ctrl);
850 wmb(); 844 wmb();
851 845
846 cp->tx_opts[entry] = ctrl;
852 cp->tx_skb[entry] = skb; 847 cp->tx_skb[entry] = skb;
853 entry = NEXT_TX(entry);
854 } 848 }
855 849
856 txd = &cp->tx_ring[first_entry]; 850 txd = &cp->tx_ring[first_entry];
@@ -858,27 +852,17 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
858 txd->addr = cpu_to_le64(first_mapping); 852 txd->addr = cpu_to_le64(first_mapping);
859 wmb(); 853 wmb();
860 854
861 if (skb->ip_summed == CHECKSUM_PARTIAL) { 855 ctrl = opts1 | first_eor | first_len | FirstFrag;
862 if (ip->protocol == IPPROTO_TCP) 856 txd->opts1 = cpu_to_le32(ctrl);
863 txd->opts1 = cpu_to_le32(first_eor | first_len |
864 FirstFrag | DescOwn |
865 IPCS | TCPCS);
866 else if (ip->protocol == IPPROTO_UDP)
867 txd->opts1 = cpu_to_le32(first_eor | first_len |
868 FirstFrag | DescOwn |
869 IPCS | UDPCS);
870 else
871 BUG();
872 } else
873 txd->opts1 = cpu_to_le32(first_eor | first_len |
874 FirstFrag | DescOwn);
875 wmb(); 857 wmb();
858
859 cp->tx_opts[first_entry] = ctrl;
860 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n",
861 first_entry, entry, skb->len);
876 } 862 }
877 cp->tx_head = entry; 863 cp->tx_head = NEXT_TX(entry);
878 864
879 netdev_sent_queue(dev, skb->len); 865 netdev_sent_queue(dev, skb->len);
880 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
881 entry, skb->len);
882 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) 866 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
883 netif_stop_queue(dev); 867 netif_stop_queue(dev);
884 868
@@ -1115,6 +1099,7 @@ static int cp_init_rings (struct cp_private *cp)
1115{ 1099{
1116 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); 1100 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1117 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); 1101 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1102 memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
1118 1103
1119 cp_init_rings_index(cp); 1104 cp_init_rings_index(cp);
1120 1105
@@ -1151,7 +1136,7 @@ static void cp_clean_rings (struct cp_private *cp)
1151 desc = cp->rx_ring + i; 1136 desc = cp->rx_ring + i;
1152 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), 1137 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1153 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1138 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1154 dev_kfree_skb(cp->rx_skb[i]); 1139 dev_kfree_skb_any(cp->rx_skb[i]);
1155 } 1140 }
1156 } 1141 }
1157 1142
@@ -1164,7 +1149,7 @@ static void cp_clean_rings (struct cp_private *cp)
1164 le32_to_cpu(desc->opts1) & 0xffff, 1149 le32_to_cpu(desc->opts1) & 0xffff,
1165 PCI_DMA_TODEVICE); 1150 PCI_DMA_TODEVICE);
1166 if (le32_to_cpu(desc->opts1) & LastFrag) 1151 if (le32_to_cpu(desc->opts1) & LastFrag)
1167 dev_kfree_skb(skb); 1152 dev_kfree_skb_any(skb);
1168 cp->dev->stats.tx_dropped++; 1153 cp->dev->stats.tx_dropped++;
1169 } 1154 }
1170 } 1155 }
@@ -1172,6 +1157,7 @@ static void cp_clean_rings (struct cp_private *cp)
1172 1157
1173 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); 1158 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1174 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); 1159 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1160 memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
1175 1161
1176 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); 1162 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1177 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); 1163 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
@@ -1249,7 +1235,7 @@ static void cp_tx_timeout(struct net_device *dev)
1249{ 1235{
1250 struct cp_private *cp = netdev_priv(dev); 1236 struct cp_private *cp = netdev_priv(dev);
1251 unsigned long flags; 1237 unsigned long flags;
1252 int rc; 1238 int rc, i;
1253 1239
1254 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n", 1240 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1255 cpr8(Cmd), cpr16(CpCmd), 1241 cpr8(Cmd), cpr16(CpCmd),
@@ -1257,13 +1243,26 @@ static void cp_tx_timeout(struct net_device *dev)
1257 1243
1258 spin_lock_irqsave(&cp->lock, flags); 1244 spin_lock_irqsave(&cp->lock, flags);
1259 1245
1246 netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n",
1247 cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc));
1248 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1249 netif_dbg(cp, tx_err, cp->dev,
1250 "TX slot %d @%p: %08x (%08x) %08x %llx %p\n",
1251 i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1),
1252 cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2),
1253 le64_to_cpu(cp->tx_ring[i].addr),
1254 cp->tx_skb[i]);
1255 }
1256
1260 cp_stop_hw(cp); 1257 cp_stop_hw(cp);
1261 cp_clean_rings(cp); 1258 cp_clean_rings(cp);
1262 rc = cp_init_rings(cp); 1259 rc = cp_init_rings(cp);
1263 cp_start_hw(cp); 1260 cp_start_hw(cp);
1264 cp_enable_irq(cp); 1261 __cp_set_rx_mode(dev);
1262 cpw16_f(IntrMask, cp_norx_intr_mask);
1265 1263
1266 netif_wake_queue(dev); 1264 netif_wake_queue(dev);
1265 napi_schedule_irqoff(&cp->napi);
1267 1266
1268 spin_unlock_irqrestore(&cp->lock, flags); 1267 spin_unlock_irqrestore(&cp->lock, flags);
1269} 1268}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index b735fa22ac95..ebf6abc4853f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -161,11 +161,16 @@ int stmmac_mdio_reset(struct mii_bus *bus)
161 161
162 if (!gpio_request(reset_gpio, "mdio-reset")) { 162 if (!gpio_request(reset_gpio, "mdio-reset")) {
163 gpio_direction_output(reset_gpio, active_low ? 1 : 0); 163 gpio_direction_output(reset_gpio, active_low ? 1 : 0);
164 udelay(data->delays[0]); 164 if (data->delays[0])
165 msleep(DIV_ROUND_UP(data->delays[0], 1000));
166
165 gpio_set_value(reset_gpio, active_low ? 0 : 1); 167 gpio_set_value(reset_gpio, active_low ? 0 : 1);
166 udelay(data->delays[1]); 168 if (data->delays[1])
169 msleep(DIV_ROUND_UP(data->delays[1], 1000));
170
167 gpio_set_value(reset_gpio, active_low ? 1 : 0); 171 gpio_set_value(reset_gpio, active_low ? 1 : 0);
168 udelay(data->delays[2]); 172 if (data->delays[2])
173 msleep(DIV_ROUND_UP(data->delays[2], 1000));
169 } 174 }
170 } 175 }
171#endif 176#endif
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 53fe200e0b79..cc106d892e29 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1756,7 +1756,8 @@ static const struct net_device_ops vnet_ops = {
1756#endif 1756#endif
1757}; 1757};
1758 1758
1759static struct vnet *vnet_new(const u64 *local_mac) 1759static struct vnet *vnet_new(const u64 *local_mac,
1760 struct vio_dev *vdev)
1760{ 1761{
1761 struct net_device *dev; 1762 struct net_device *dev;
1762 struct vnet *vp; 1763 struct vnet *vp;
@@ -1790,6 +1791,8 @@ static struct vnet *vnet_new(const u64 *local_mac)
1790 NETIF_F_HW_CSUM | NETIF_F_SG; 1791 NETIF_F_HW_CSUM | NETIF_F_SG;
1791 dev->features = dev->hw_features; 1792 dev->features = dev->hw_features;
1792 1793
1794 SET_NETDEV_DEV(dev, &vdev->dev);
1795
1793 err = register_netdev(dev); 1796 err = register_netdev(dev);
1794 if (err) { 1797 if (err) {
1795 pr_err("Cannot register net device, aborting\n"); 1798 pr_err("Cannot register net device, aborting\n");
@@ -1808,7 +1811,8 @@ err_out_free_dev:
1808 return ERR_PTR(err); 1811 return ERR_PTR(err);
1809} 1812}
1810 1813
1811static struct vnet *vnet_find_or_create(const u64 *local_mac) 1814static struct vnet *vnet_find_or_create(const u64 *local_mac,
1815 struct vio_dev *vdev)
1812{ 1816{
1813 struct vnet *iter, *vp; 1817 struct vnet *iter, *vp;
1814 1818
@@ -1821,7 +1825,7 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
1821 } 1825 }
1822 } 1826 }
1823 if (!vp) 1827 if (!vp)
1824 vp = vnet_new(local_mac); 1828 vp = vnet_new(local_mac, vdev);
1825 mutex_unlock(&vnet_list_mutex); 1829 mutex_unlock(&vnet_list_mutex);
1826 1830
1827 return vp; 1831 return vp;
@@ -1848,7 +1852,8 @@ static void vnet_cleanup(void)
1848static const char *local_mac_prop = "local-mac-address"; 1852static const char *local_mac_prop = "local-mac-address";
1849 1853
1850static struct vnet *vnet_find_parent(struct mdesc_handle *hp, 1854static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
1851 u64 port_node) 1855 u64 port_node,
1856 struct vio_dev *vdev)
1852{ 1857{
1853 const u64 *local_mac = NULL; 1858 const u64 *local_mac = NULL;
1854 u64 a; 1859 u64 a;
@@ -1869,7 +1874,7 @@ static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
1869 if (!local_mac) 1874 if (!local_mac)
1870 return ERR_PTR(-ENODEV); 1875 return ERR_PTR(-ENODEV);
1871 1876
1872 return vnet_find_or_create(local_mac); 1877 return vnet_find_or_create(local_mac, vdev);
1873} 1878}
1874 1879
1875static struct ldc_channel_config vnet_ldc_cfg = { 1880static struct ldc_channel_config vnet_ldc_cfg = {
@@ -1923,7 +1928,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1923 1928
1924 hp = mdesc_grab(); 1929 hp = mdesc_grab();
1925 1930
1926 vp = vnet_find_parent(hp, vdev->mp); 1931 vp = vnet_find_parent(hp, vdev->mp, vdev);
1927 if (IS_ERR(vp)) { 1932 if (IS_ERR(vp)) {
1928 pr_err("Cannot find port parent vnet\n"); 1933 pr_err("Cannot find port parent vnet\n");
1929 err = PTR_ERR(vp); 1934 err = PTR_ERR(vp);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 1a5aca55ea9f..9f9832f0dea9 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -291,13 +291,6 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
291 interface_list) { 291 interface_list) {
292 struct netcp_intf_modpriv *intf_modpriv; 292 struct netcp_intf_modpriv *intf_modpriv;
293 293
294 /* If interface not registered then register now */
295 if (!netcp_intf->netdev_registered)
296 ret = netcp_register_interface(netcp_intf);
297
298 if (ret)
299 return -ENODEV;
300
301 intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv), 294 intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv),
302 GFP_KERNEL); 295 GFP_KERNEL);
303 if (!intf_modpriv) 296 if (!intf_modpriv)
@@ -306,6 +299,11 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
306 interface = of_parse_phandle(netcp_intf->node_interface, 299 interface = of_parse_phandle(netcp_intf->node_interface,
307 module->name, 0); 300 module->name, 0);
308 301
302 if (!interface) {
303 devm_kfree(dev, intf_modpriv);
304 continue;
305 }
306
309 intf_modpriv->netcp_priv = netcp_intf; 307 intf_modpriv->netcp_priv = netcp_intf;
310 intf_modpriv->netcp_module = module; 308 intf_modpriv->netcp_module = module;
311 list_add_tail(&intf_modpriv->intf_list, 309 list_add_tail(&intf_modpriv->intf_list,
@@ -323,6 +321,18 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
323 continue; 321 continue;
324 } 322 }
325 } 323 }
324
325 /* Now register the interface with netdev */
326 list_for_each_entry(netcp_intf,
327 &netcp_device->interface_head,
328 interface_list) {
329 /* If interface not registered then register now */
330 if (!netcp_intf->netdev_registered) {
331 ret = netcp_register_interface(netcp_intf);
332 if (ret)
333 return -ENODEV;
334 }
335 }
326 return 0; 336 return 0;
327} 337}
328 338
@@ -357,7 +367,6 @@ int netcp_register_module(struct netcp_module *module)
357 if (ret < 0) 367 if (ret < 0)
358 goto fail; 368 goto fail;
359 } 369 }
360
361 mutex_unlock(&netcp_modules_lock); 370 mutex_unlock(&netcp_modules_lock);
362 return 0; 371 return 0;
363 372
@@ -796,7 +805,7 @@ static void netcp_rxpool_free(struct netcp_intf *netcp)
796 netcp->rx_pool = NULL; 805 netcp->rx_pool = NULL;
797} 806}
798 807
799static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) 808static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
800{ 809{
801 struct knav_dma_desc *hwdesc; 810 struct knav_dma_desc *hwdesc;
802 unsigned int buf_len, dma_sz; 811 unsigned int buf_len, dma_sz;
@@ -810,7 +819,7 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
810 hwdesc = knav_pool_desc_get(netcp->rx_pool); 819 hwdesc = knav_pool_desc_get(netcp->rx_pool);
811 if (IS_ERR_OR_NULL(hwdesc)) { 820 if (IS_ERR_OR_NULL(hwdesc)) {
812 dev_dbg(netcp->ndev_dev, "out of rx pool desc\n"); 821 dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
813 return; 822 return -ENOMEM;
814 } 823 }
815 824
816 if (likely(fdq == 0)) { 825 if (likely(fdq == 0)) {
@@ -862,25 +871,26 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
862 knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma, 871 knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
863 &dma_sz); 872 &dma_sz);
864 knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0); 873 knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
865 return; 874 return 0;
866 875
867fail: 876fail:
868 knav_pool_desc_put(netcp->rx_pool, hwdesc); 877 knav_pool_desc_put(netcp->rx_pool, hwdesc);
878 return -ENOMEM;
869} 879}
870 880
871/* Refill Rx FDQ with descriptors & attached buffers */ 881/* Refill Rx FDQ with descriptors & attached buffers */
872static void netcp_rxpool_refill(struct netcp_intf *netcp) 882static void netcp_rxpool_refill(struct netcp_intf *netcp)
873{ 883{
874 u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0}; 884 u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
875 int i; 885 int i, ret = 0;
876 886
877 /* Calculate the FDQ deficit and refill */ 887 /* Calculate the FDQ deficit and refill */
878 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) { 888 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
879 fdq_deficit[i] = netcp->rx_queue_depths[i] - 889 fdq_deficit[i] = netcp->rx_queue_depths[i] -
880 knav_queue_get_count(netcp->rx_fdq[i]); 890 knav_queue_get_count(netcp->rx_fdq[i]);
881 891
882 while (fdq_deficit[i]--) 892 while (fdq_deficit[i]-- && !ret)
883 netcp_allocate_rx_buf(netcp, i); 893 ret = netcp_allocate_rx_buf(netcp, i);
884 } /* end for fdqs */ 894 } /* end for fdqs */
885} 895}
886 896
@@ -893,12 +903,12 @@ static int netcp_rx_poll(struct napi_struct *napi, int budget)
893 903
894 packets = netcp_process_rx_packets(netcp, budget); 904 packets = netcp_process_rx_packets(netcp, budget);
895 905
906 netcp_rxpool_refill(netcp);
896 if (packets < budget) { 907 if (packets < budget) {
897 napi_complete(&netcp->rx_napi); 908 napi_complete(&netcp->rx_napi);
898 knav_queue_enable_notify(netcp->rx_queue); 909 knav_queue_enable_notify(netcp->rx_queue);
899 } 910 }
900 911
901 netcp_rxpool_refill(netcp);
902 return packets; 912 return packets;
903} 913}
904 914
@@ -1384,7 +1394,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp)
1384 continue; 1394 continue;
1385 dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n", 1395 dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n",
1386 naddr->addr, naddr->type); 1396 naddr->addr, naddr->type);
1387 mutex_lock(&netcp_modules_lock);
1388 for_each_module(netcp, priv) { 1397 for_each_module(netcp, priv) {
1389 module = priv->netcp_module; 1398 module = priv->netcp_module;
1390 if (!module->del_addr) 1399 if (!module->del_addr)
@@ -1393,7 +1402,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp)
1393 naddr); 1402 naddr);
1394 WARN_ON(error); 1403 WARN_ON(error);
1395 } 1404 }
1396 mutex_unlock(&netcp_modules_lock);
1397 netcp_addr_del(netcp, naddr); 1405 netcp_addr_del(netcp, naddr);
1398 } 1406 }
1399} 1407}
@@ -1410,7 +1418,7 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp)
1410 continue; 1418 continue;
1411 dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n", 1419 dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n",
1412 naddr->addr, naddr->type); 1420 naddr->addr, naddr->type);
1413 mutex_lock(&netcp_modules_lock); 1421
1414 for_each_module(netcp, priv) { 1422 for_each_module(netcp, priv) {
1415 module = priv->netcp_module; 1423 module = priv->netcp_module;
1416 if (!module->add_addr) 1424 if (!module->add_addr)
@@ -1418,7 +1426,6 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp)
1418 error = module->add_addr(priv->module_priv, naddr); 1426 error = module->add_addr(priv->module_priv, naddr);
1419 WARN_ON(error); 1427 WARN_ON(error);
1420 } 1428 }
1421 mutex_unlock(&netcp_modules_lock);
1422 } 1429 }
1423} 1430}
1424 1431
@@ -1432,6 +1439,7 @@ static void netcp_set_rx_mode(struct net_device *ndev)
1432 ndev->flags & IFF_ALLMULTI || 1439 ndev->flags & IFF_ALLMULTI ||
1433 netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR); 1440 netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR);
1434 1441
1442 spin_lock(&netcp->lock);
1435 /* first clear all marks */ 1443 /* first clear all marks */
1436 netcp_addr_clear_mark(netcp); 1444 netcp_addr_clear_mark(netcp);
1437 1445
@@ -1450,6 +1458,7 @@ static void netcp_set_rx_mode(struct net_device *ndev)
1450 /* finally sweep and callout into modules */ 1458 /* finally sweep and callout into modules */
1451 netcp_addr_sweep_del(netcp); 1459 netcp_addr_sweep_del(netcp);
1452 netcp_addr_sweep_add(netcp); 1460 netcp_addr_sweep_add(netcp);
1461 spin_unlock(&netcp->lock);
1453} 1462}
1454 1463
1455static void netcp_free_navigator_resources(struct netcp_intf *netcp) 1464static void netcp_free_navigator_resources(struct netcp_intf *netcp)
@@ -1614,7 +1623,6 @@ static int netcp_ndo_open(struct net_device *ndev)
1614 goto fail; 1623 goto fail;
1615 } 1624 }
1616 1625
1617 mutex_lock(&netcp_modules_lock);
1618 for_each_module(netcp, intf_modpriv) { 1626 for_each_module(netcp, intf_modpriv) {
1619 module = intf_modpriv->netcp_module; 1627 module = intf_modpriv->netcp_module;
1620 if (module->open) { 1628 if (module->open) {
@@ -1625,7 +1633,6 @@ static int netcp_ndo_open(struct net_device *ndev)
1625 } 1633 }
1626 } 1634 }
1627 } 1635 }
1628 mutex_unlock(&netcp_modules_lock);
1629 1636
1630 napi_enable(&netcp->rx_napi); 1637 napi_enable(&netcp->rx_napi);
1631 napi_enable(&netcp->tx_napi); 1638 napi_enable(&netcp->tx_napi);
@@ -1642,7 +1649,6 @@ fail_open:
1642 if (module->close) 1649 if (module->close)
1643 module->close(intf_modpriv->module_priv, ndev); 1650 module->close(intf_modpriv->module_priv, ndev);
1644 } 1651 }
1645 mutex_unlock(&netcp_modules_lock);
1646 1652
1647fail: 1653fail:
1648 netcp_free_navigator_resources(netcp); 1654 netcp_free_navigator_resources(netcp);
@@ -1666,7 +1672,6 @@ static int netcp_ndo_stop(struct net_device *ndev)
1666 napi_disable(&netcp->rx_napi); 1672 napi_disable(&netcp->rx_napi);
1667 napi_disable(&netcp->tx_napi); 1673 napi_disable(&netcp->tx_napi);
1668 1674
1669 mutex_lock(&netcp_modules_lock);
1670 for_each_module(netcp, intf_modpriv) { 1675 for_each_module(netcp, intf_modpriv) {
1671 module = intf_modpriv->netcp_module; 1676 module = intf_modpriv->netcp_module;
1672 if (module->close) { 1677 if (module->close) {
@@ -1675,7 +1680,6 @@ static int netcp_ndo_stop(struct net_device *ndev)
1675 dev_err(netcp->ndev_dev, "Close failed\n"); 1680 dev_err(netcp->ndev_dev, "Close failed\n");
1676 } 1681 }
1677 } 1682 }
1678 mutex_unlock(&netcp_modules_lock);
1679 1683
1680 /* Recycle Rx descriptors from completion queue */ 1684 /* Recycle Rx descriptors from completion queue */
1681 netcp_empty_rx_queue(netcp); 1685 netcp_empty_rx_queue(netcp);
@@ -1703,7 +1707,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev,
1703 if (!netif_running(ndev)) 1707 if (!netif_running(ndev))
1704 return -EINVAL; 1708 return -EINVAL;
1705 1709
1706 mutex_lock(&netcp_modules_lock);
1707 for_each_module(netcp, intf_modpriv) { 1710 for_each_module(netcp, intf_modpriv) {
1708 module = intf_modpriv->netcp_module; 1711 module = intf_modpriv->netcp_module;
1709 if (!module->ioctl) 1712 if (!module->ioctl)
@@ -1719,7 +1722,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev,
1719 } 1722 }
1720 1723
1721out: 1724out:
1722 mutex_unlock(&netcp_modules_lock);
1723 return (ret == 0) ? 0 : err; 1725 return (ret == 0) ? 0 : err;
1724} 1726}
1725 1727
@@ -1754,11 +1756,12 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
1754 struct netcp_intf *netcp = netdev_priv(ndev); 1756 struct netcp_intf *netcp = netdev_priv(ndev);
1755 struct netcp_intf_modpriv *intf_modpriv; 1757 struct netcp_intf_modpriv *intf_modpriv;
1756 struct netcp_module *module; 1758 struct netcp_module *module;
1759 unsigned long flags;
1757 int err = 0; 1760 int err = 0;
1758 1761
1759 dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid); 1762 dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid);
1760 1763
1761 mutex_lock(&netcp_modules_lock); 1764 spin_lock_irqsave(&netcp->lock, flags);
1762 for_each_module(netcp, intf_modpriv) { 1765 for_each_module(netcp, intf_modpriv) {
1763 module = intf_modpriv->netcp_module; 1766 module = intf_modpriv->netcp_module;
1764 if ((module->add_vid) && (vid != 0)) { 1767 if ((module->add_vid) && (vid != 0)) {
@@ -1770,7 +1773,8 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
1770 } 1773 }
1771 } 1774 }
1772 } 1775 }
1773 mutex_unlock(&netcp_modules_lock); 1776 spin_unlock_irqrestore(&netcp->lock, flags);
1777
1774 return err; 1778 return err;
1775} 1779}
1776 1780
@@ -1779,11 +1783,12 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
1779 struct netcp_intf *netcp = netdev_priv(ndev); 1783 struct netcp_intf *netcp = netdev_priv(ndev);
1780 struct netcp_intf_modpriv *intf_modpriv; 1784 struct netcp_intf_modpriv *intf_modpriv;
1781 struct netcp_module *module; 1785 struct netcp_module *module;
1786 unsigned long flags;
1782 int err = 0; 1787 int err = 0;
1783 1788
1784 dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid); 1789 dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid);
1785 1790
1786 mutex_lock(&netcp_modules_lock); 1791 spin_lock_irqsave(&netcp->lock, flags);
1787 for_each_module(netcp, intf_modpriv) { 1792 for_each_module(netcp, intf_modpriv) {
1788 module = intf_modpriv->netcp_module; 1793 module = intf_modpriv->netcp_module;
1789 if (module->del_vid) { 1794 if (module->del_vid) {
@@ -1795,7 +1800,7 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
1795 } 1800 }
1796 } 1801 }
1797 } 1802 }
1798 mutex_unlock(&netcp_modules_lock); 1803 spin_unlock_irqrestore(&netcp->lock, flags);
1799 return err; 1804 return err;
1800} 1805}
1801 1806
@@ -2040,7 +2045,6 @@ static int netcp_probe(struct platform_device *pdev)
2040 struct device_node *child, *interfaces; 2045 struct device_node *child, *interfaces;
2041 struct netcp_device *netcp_device; 2046 struct netcp_device *netcp_device;
2042 struct device *dev = &pdev->dev; 2047 struct device *dev = &pdev->dev;
2043 struct netcp_module *module;
2044 int ret; 2048 int ret;
2045 2049
2046 if (!node) { 2050 if (!node) {
@@ -2087,14 +2091,6 @@ static int netcp_probe(struct platform_device *pdev)
2087 /* Add the device instance to the list */ 2091 /* Add the device instance to the list */
2088 list_add_tail(&netcp_device->device_list, &netcp_devices); 2092 list_add_tail(&netcp_device->device_list, &netcp_devices);
2089 2093
2090 /* Probe & attach any modules already registered */
2091 mutex_lock(&netcp_modules_lock);
2092 for_each_netcp_module(module) {
2093 ret = netcp_module_probe(netcp_device, module);
2094 if (ret < 0)
2095 dev_err(dev, "module(%s) probe failed\n", module->name);
2096 }
2097 mutex_unlock(&netcp_modules_lock);
2098 return 0; 2094 return 0;
2099 2095
2100probe_quit_interface: 2096probe_quit_interface:
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 6f16d6aaf7b7..6bff8d82ceab 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -77,6 +77,7 @@
77#define GBENU_ALE_OFFSET 0x1e000 77#define GBENU_ALE_OFFSET 0x1e000
78#define GBENU_HOST_PORT_NUM 0 78#define GBENU_HOST_PORT_NUM 0
79#define GBENU_NUM_ALE_ENTRIES 1024 79#define GBENU_NUM_ALE_ENTRIES 1024
80#define GBENU_SGMII_MODULE_SIZE 0x100
80 81
81/* 10G Ethernet SS defines */ 82/* 10G Ethernet SS defines */
82#define XGBE_MODULE_NAME "netcp-xgbe" 83#define XGBE_MODULE_NAME "netcp-xgbe"
@@ -149,8 +150,8 @@
149#define XGBE_STATS2_MODULE 2 150#define XGBE_STATS2_MODULE 2
150 151
151/* s: 0-based slave_port */ 152/* s: 0-based slave_port */
152#define SGMII_BASE(s) \ 153#define SGMII_BASE(d, s) \
153 (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs) 154 (((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
154 155
155#define GBE_TX_QUEUE 648 156#define GBE_TX_QUEUE 648
156#define GBE_TXHOOK_ORDER 0 157#define GBE_TXHOOK_ORDER 0
@@ -1997,13 +1998,8 @@ static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
1997 return; 1998 return;
1998 1999
1999 if (!SLAVE_LINK_IS_XGMII(slave)) { 2000 if (!SLAVE_LINK_IS_XGMII(slave)) {
2000 if (gbe_dev->ss_version == GBE_SS_VERSION_14) 2001 sgmii_link_state =
2001 sgmii_link_state = 2002 netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
2002 netcp_sgmii_get_port_link(SGMII_BASE(sp), sp);
2003 else
2004 sgmii_link_state =
2005 netcp_sgmii_get_port_link(
2006 gbe_dev->sgmii_port_regs, sp);
2007 } 2003 }
2008 2004
2009 phy_link_state = gbe_phy_link_status(slave); 2005 phy_link_state = gbe_phy_link_status(slave);
@@ -2100,17 +2096,11 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2100static void gbe_sgmii_rtreset(struct gbe_priv *priv, 2096static void gbe_sgmii_rtreset(struct gbe_priv *priv,
2101 struct gbe_slave *slave, bool set) 2097 struct gbe_slave *slave, bool set)
2102{ 2098{
2103 void __iomem *sgmii_port_regs;
2104
2105 if (SLAVE_LINK_IS_XGMII(slave)) 2099 if (SLAVE_LINK_IS_XGMII(slave))
2106 return; 2100 return;
2107 2101
2108 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2)) 2102 netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
2109 sgmii_port_regs = priv->sgmii_port34_regs; 2103 slave->slave_num, set);
2110 else
2111 sgmii_port_regs = priv->sgmii_port_regs;
2112
2113 netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set);
2114} 2104}
2115 2105
2116static void gbe_slave_stop(struct gbe_intf *intf) 2106static void gbe_slave_stop(struct gbe_intf *intf)
@@ -2136,17 +2126,12 @@ static void gbe_slave_stop(struct gbe_intf *intf)
2136 2126
2137static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave) 2127static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
2138{ 2128{
2139 void __iomem *sgmii_port_regs; 2129 if (SLAVE_LINK_IS_XGMII(slave))
2140 2130 return;
2141 sgmii_port_regs = priv->sgmii_port_regs;
2142 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
2143 sgmii_port_regs = priv->sgmii_port34_regs;
2144 2131
2145 if (!SLAVE_LINK_IS_XGMII(slave)) { 2132 netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
2146 netcp_sgmii_reset(sgmii_port_regs, slave->slave_num); 2133 netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
2147 netcp_sgmii_config(sgmii_port_regs, slave->slave_num, 2134 slave->link_interface);
2148 slave->link_interface);
2149 }
2150} 2135}
2151 2136
2152static int gbe_slave_open(struct gbe_intf *gbe_intf) 2137static int gbe_slave_open(struct gbe_intf *gbe_intf)
@@ -2997,6 +2982,14 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
2997 gbe_dev->switch_regs = regs; 2982 gbe_dev->switch_regs = regs;
2998 2983
2999 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET; 2984 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
2985
2986 /* Although sgmii modules are mem mapped to one contiguous
2987 * region on GBENU devices, setting sgmii_port34_regs allows
2988 * consistent code when accessing sgmii api
2989 */
2990 gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
2991 (2 * GBENU_SGMII_MODULE_SIZE);
2992
3000 gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET; 2993 gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
3001 2994
3002 for (i = 0; i < (gbe_dev->max_num_ports); i++) 2995 for (i = 0; i < (gbe_dev->max_num_ports); i++)
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index 2f1264b882b9..d3d094742a7e 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -17,7 +17,7 @@ if NET_VENDOR_VIA
17 17
18config VIA_RHINE 18config VIA_RHINE
19 tristate "VIA Rhine support" 19 tristate "VIA Rhine support"
20 depends on (PCI || OF_IRQ) 20 depends on PCI || (OF_IRQ && GENERIC_PCI_IOMAP)
21 depends on HAS_DMA 21 depends on HAS_DMA
22 select CRC32 22 select CRC32
23 select MII 23 select MII
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 6008eee01a33..cf468c87ce57 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -828,6 +828,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
828 if (!phydev) 828 if (!phydev)
829 dev_info(dev, 829 dev_info(dev,
830 "MDIO of the phy is not registered yet\n"); 830 "MDIO of the phy is not registered yet\n");
831 else
832 put_device(&phydev->dev);
831 return 0; 833 return 0;
832 } 834 }
833 835
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index b5f4a78da828..2d3848c9dc35 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -1011,11 +1011,11 @@ static void fjes_hw_update_zone_task(struct work_struct *work)
1011 set_bit(epidx, &irq_bit); 1011 set_bit(epidx, &irq_bit);
1012 break; 1012 break;
1013 } 1013 }
1014 }
1015
1016 hw->ep_shm_info[epidx].es_status = info[epidx].es_status;
1017 hw->ep_shm_info[epidx].zone = info[epidx].zone;
1018 1014
1015 hw->ep_shm_info[epidx].es_status =
1016 info[epidx].es_status;
1017 hw->ep_shm_info[epidx].zone = info[epidx].zone;
1018 }
1019 break; 1019 break;
1020 } 1020 }
1021 1021
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index da3259ce7c8d..8f5c02eed47d 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -126,6 +126,8 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
126 __be32 addr; 126 __be32 addr;
127 int err; 127 int err;
128 128
129 iph = ip_hdr(skb); /* outer IP header... */
130
129 if (gs->collect_md) { 131 if (gs->collect_md) {
130 static u8 zero_vni[3]; 132 static u8 zero_vni[3];
131 133
@@ -133,7 +135,6 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
133 addr = 0; 135 addr = 0;
134 } else { 136 } else {
135 vni = gnvh->vni; 137 vni = gnvh->vni;
136 iph = ip_hdr(skb); /* Still outer IP header... */
137 addr = iph->saddr; 138 addr = iph->saddr;
138 } 139 }
139 140
@@ -178,7 +179,6 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
178 179
179 skb_reset_network_header(skb); 180 skb_reset_network_header(skb);
180 181
181 iph = ip_hdr(skb); /* Now inner IP header... */
182 err = IP_ECN_decapsulate(iph, skb); 182 err = IP_ECN_decapsulate(iph, skb);
183 183
184 if (unlikely(err)) { 184 if (unlikely(err)) {
@@ -626,6 +626,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
626 struct geneve_sock *gs = geneve->sock; 626 struct geneve_sock *gs = geneve->sock;
627 struct ip_tunnel_info *info = NULL; 627 struct ip_tunnel_info *info = NULL;
628 struct rtable *rt = NULL; 628 struct rtable *rt = NULL;
629 const struct iphdr *iip; /* interior IP header */
629 struct flowi4 fl4; 630 struct flowi4 fl4;
630 __u8 tos, ttl; 631 __u8 tos, ttl;
631 __be16 sport; 632 __be16 sport;
@@ -653,6 +654,8 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
653 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 654 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
654 skb_reset_mac_header(skb); 655 skb_reset_mac_header(skb);
655 656
657 iip = ip_hdr(skb);
658
656 if (info) { 659 if (info) {
657 const struct ip_tunnel_key *key = &info->key; 660 const struct ip_tunnel_key *key = &info->key;
658 u8 *opts = NULL; 661 u8 *opts = NULL;
@@ -668,19 +671,16 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
668 if (unlikely(err)) 671 if (unlikely(err))
669 goto err; 672 goto err;
670 673
671 tos = key->tos; 674 tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
672 ttl = key->ttl; 675 ttl = key->ttl;
673 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 676 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
674 } else { 677 } else {
675 const struct iphdr *iip; /* interior IP header */
676
677 udp_csum = false; 678 udp_csum = false;
678 err = geneve_build_skb(rt, skb, 0, geneve->vni, 679 err = geneve_build_skb(rt, skb, 0, geneve->vni,
679 0, NULL, udp_csum); 680 0, NULL, udp_csum);
680 if (unlikely(err)) 681 if (unlikely(err))
681 goto err; 682 goto err;
682 683
683 iip = ip_hdr(skb);
684 tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb); 684 tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
685 ttl = geneve->ttl; 685 ttl = geneve->ttl;
686 if (!ttl && IN_MULTICAST(ntohl(fl4.daddr))) 686 if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
@@ -748,12 +748,8 @@ static void geneve_setup(struct net_device *dev)
748 dev->features |= NETIF_F_RXCSUM; 748 dev->features |= NETIF_F_RXCSUM;
749 dev->features |= NETIF_F_GSO_SOFTWARE; 749 dev->features |= NETIF_F_GSO_SOFTWARE;
750 750
751 dev->vlan_features = dev->features;
752 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
753
754 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 751 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
755 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 752 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
756 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
757 753
758 netif_keep_dst(dev); 754 netif_keep_dst(dev);
759 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 755 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
@@ -819,7 +815,7 @@ static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
819 815
820static int geneve_configure(struct net *net, struct net_device *dev, 816static int geneve_configure(struct net *net, struct net_device *dev,
821 __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos, 817 __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos,
822 __u16 dst_port, bool metadata) 818 __be16 dst_port, bool metadata)
823{ 819{
824 struct geneve_net *gn = net_generic(net, geneve_net_id); 820 struct geneve_net *gn = net_generic(net, geneve_net_id);
825 struct geneve_dev *t, *geneve = netdev_priv(dev); 821 struct geneve_dev *t, *geneve = netdev_priv(dev);
@@ -844,10 +840,10 @@ static int geneve_configure(struct net *net, struct net_device *dev,
844 840
845 geneve->ttl = ttl; 841 geneve->ttl = ttl;
846 geneve->tos = tos; 842 geneve->tos = tos;
847 geneve->dst_port = htons(dst_port); 843 geneve->dst_port = dst_port;
848 geneve->collect_md = metadata; 844 geneve->collect_md = metadata;
849 845
850 t = geneve_find_dev(gn, htons(dst_port), rem_addr, geneve->vni, 846 t = geneve_find_dev(gn, dst_port, rem_addr, geneve->vni,
851 &tun_on_same_port, &tun_collect_md); 847 &tun_on_same_port, &tun_collect_md);
852 if (t) 848 if (t)
853 return -EBUSY; 849 return -EBUSY;
@@ -871,7 +867,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
871static int geneve_newlink(struct net *net, struct net_device *dev, 867static int geneve_newlink(struct net *net, struct net_device *dev,
872 struct nlattr *tb[], struct nlattr *data[]) 868 struct nlattr *tb[], struct nlattr *data[])
873{ 869{
874 __u16 dst_port = GENEVE_UDP_PORT; 870 __be16 dst_port = htons(GENEVE_UDP_PORT);
875 __u8 ttl = 0, tos = 0; 871 __u8 ttl = 0, tos = 0;
876 bool metadata = false; 872 bool metadata = false;
877 __be32 rem_addr; 873 __be32 rem_addr;
@@ -890,7 +886,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
890 tos = nla_get_u8(data[IFLA_GENEVE_TOS]); 886 tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
891 887
892 if (data[IFLA_GENEVE_PORT]) 888 if (data[IFLA_GENEVE_PORT])
893 dst_port = nla_get_u16(data[IFLA_GENEVE_PORT]); 889 dst_port = nla_get_be16(data[IFLA_GENEVE_PORT]);
894 890
895 if (data[IFLA_GENEVE_COLLECT_METADATA]) 891 if (data[IFLA_GENEVE_COLLECT_METADATA])
896 metadata = true; 892 metadata = true;
@@ -913,7 +909,7 @@ static size_t geneve_get_size(const struct net_device *dev)
913 nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */ 909 nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */
914 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ 910 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */
915 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ 911 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */
916 nla_total_size(sizeof(__u16)) + /* IFLA_GENEVE_PORT */ 912 nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */
917 nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */ 913 nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */
918 0; 914 0;
919} 915}
@@ -935,7 +931,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
935 nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos)) 931 nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos))
936 goto nla_put_failure; 932 goto nla_put_failure;
937 933
938 if (nla_put_u16(skb, IFLA_GENEVE_PORT, ntohs(geneve->dst_port))) 934 if (nla_put_be16(skb, IFLA_GENEVE_PORT, geneve->dst_port))
939 goto nla_put_failure; 935 goto nla_put_failure;
940 936
941 if (geneve->collect_md) { 937 if (geneve->collect_md) {
@@ -975,7 +971,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
975 if (IS_ERR(dev)) 971 if (IS_ERR(dev))
976 return dev; 972 return dev;
977 973
978 err = geneve_configure(net, dev, 0, 0, 0, 0, dst_port, true); 974 err = geneve_configure(net, dev, 0, 0, 0, 0, htons(dst_port), true);
979 if (err) { 975 if (err) {
980 free_netdev(dev); 976 free_netdev(dev);
981 return ERR_PTR(err); 977 return ERR_PTR(err);
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 58ae11a14bb6..64bb44d5d867 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -1031,7 +1031,6 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
1031static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) 1031static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
1032{ 1032{
1033 struct ali_ircc_cb *self = priv; 1033 struct ali_ircc_cb *self = priv;
1034 unsigned long flags;
1035 int iobase; 1034 int iobase;
1036 int fcr; /* FIFO control reg */ 1035 int fcr; /* FIFO control reg */
1037 int lcr; /* Line control reg */ 1036 int lcr; /* Line control reg */
@@ -1061,8 +1060,6 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
1061 /* Update accounting for new speed */ 1060 /* Update accounting for new speed */
1062 self->io.speed = speed; 1061 self->io.speed = speed;
1063 1062
1064 spin_lock_irqsave(&self->lock, flags);
1065
1066 divisor = 115200/speed; 1063 divisor = 115200/speed;
1067 1064
1068 fcr = UART_FCR_ENABLE_FIFO; 1065 fcr = UART_FCR_ENABLE_FIFO;
@@ -1089,9 +1086,6 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
1089 /* without this, the connection will be broken after come back from FIR speed, 1086 /* without this, the connection will be broken after come back from FIR speed,
1090 but with this, the SIR connection is harder to established */ 1087 but with this, the SIR connection is harder to established */
1091 outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR); 1088 outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR);
1092
1093 spin_unlock_irqrestore(&self->lock, flags);
1094
1095} 1089}
1096 1090
1097static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed) 1091static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index edd77342773a..248478c6f6e4 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -1111,10 +1111,10 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
1111 return 0; 1111 return 0;
1112 1112
1113 case TUNSETSNDBUF: 1113 case TUNSETSNDBUF:
1114 if (get_user(u, up)) 1114 if (get_user(s, sp))
1115 return -EFAULT; 1115 return -EFAULT;
1116 1116
1117 q->sk.sk_sndbuf = u; 1117 q->sk.sk_sndbuf = s;
1118 return 0; 1118 return 0;
1119 1119
1120 case TUNGETVNETHDRSZ: 1120 case TUNGETVNETHDRSZ:
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index d8757bf9ad75..a9acf7156855 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -61,11 +61,21 @@ MODULE_VERSION(NTB_NETDEV_VER);
61MODULE_LICENSE("Dual BSD/GPL"); 61MODULE_LICENSE("Dual BSD/GPL");
62MODULE_AUTHOR("Intel Corporation"); 62MODULE_AUTHOR("Intel Corporation");
63 63
64/* Time in usecs for tx resource reaper */
65static unsigned int tx_time = 1;
66
67/* Number of descriptors to free before resuming tx */
68static unsigned int tx_start = 10;
69
70/* Number of descriptors still available before stop upper layer tx */
71static unsigned int tx_stop = 5;
72
64struct ntb_netdev { 73struct ntb_netdev {
65 struct list_head list; 74 struct list_head list;
66 struct pci_dev *pdev; 75 struct pci_dev *pdev;
67 struct net_device *ndev; 76 struct net_device *ndev;
68 struct ntb_transport_qp *qp; 77 struct ntb_transport_qp *qp;
78 struct timer_list tx_timer;
69}; 79};
70 80
71#define NTB_TX_TIMEOUT_MS 1000 81#define NTB_TX_TIMEOUT_MS 1000
@@ -136,11 +146,42 @@ enqueue_again:
136 } 146 }
137} 147}
138 148
149static int __ntb_netdev_maybe_stop_tx(struct net_device *netdev,
150 struct ntb_transport_qp *qp, int size)
151{
152 struct ntb_netdev *dev = netdev_priv(netdev);
153
154 netif_stop_queue(netdev);
155 /* Make sure to see the latest value of ntb_transport_tx_free_entry()
156 * since the queue was last started.
157 */
158 smp_mb();
159
160 if (likely(ntb_transport_tx_free_entry(qp) < size)) {
161 mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
162 return -EBUSY;
163 }
164
165 netif_start_queue(netdev);
166 return 0;
167}
168
169static int ntb_netdev_maybe_stop_tx(struct net_device *ndev,
170 struct ntb_transport_qp *qp, int size)
171{
172 if (netif_queue_stopped(ndev) ||
173 (ntb_transport_tx_free_entry(qp) >= size))
174 return 0;
175
176 return __ntb_netdev_maybe_stop_tx(ndev, qp, size);
177}
178
139static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data, 179static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
140 void *data, int len) 180 void *data, int len)
141{ 181{
142 struct net_device *ndev = qp_data; 182 struct net_device *ndev = qp_data;
143 struct sk_buff *skb; 183 struct sk_buff *skb;
184 struct ntb_netdev *dev = netdev_priv(ndev);
144 185
145 skb = data; 186 skb = data;
146 if (!skb || !ndev) 187 if (!skb || !ndev)
@@ -155,6 +196,15 @@ static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
155 } 196 }
156 197
157 dev_kfree_skb(skb); 198 dev_kfree_skb(skb);
199
200 if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) {
201 /* Make sure anybody stopping the queue after this sees the new
202 * value of ntb_transport_tx_free_entry()
203 */
204 smp_mb();
205 if (netif_queue_stopped(ndev))
206 netif_wake_queue(ndev);
207 }
158} 208}
159 209
160static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb, 210static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
@@ -163,10 +213,15 @@ static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
163 struct ntb_netdev *dev = netdev_priv(ndev); 213 struct ntb_netdev *dev = netdev_priv(ndev);
164 int rc; 214 int rc;
165 215
216 ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
217
166 rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len); 218 rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
167 if (rc) 219 if (rc)
168 goto err; 220 goto err;
169 221
222 /* check for next submit */
223 ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
224
170 return NETDEV_TX_OK; 225 return NETDEV_TX_OK;
171 226
172err: 227err:
@@ -175,6 +230,23 @@ err:
175 return NETDEV_TX_BUSY; 230 return NETDEV_TX_BUSY;
176} 231}
177 232
233static void ntb_netdev_tx_timer(unsigned long data)
234{
235 struct net_device *ndev = (struct net_device *)data;
236 struct ntb_netdev *dev = netdev_priv(ndev);
237
238 if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
239 mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time));
240 } else {
241 /* Make sure anybody stopping the queue after this sees the new
242 * value of ntb_transport_tx_free_entry()
243 */
244 smp_mb();
245 if (netif_queue_stopped(ndev))
246 netif_wake_queue(ndev);
247 }
248}
249
178static int ntb_netdev_open(struct net_device *ndev) 250static int ntb_netdev_open(struct net_device *ndev)
179{ 251{
180 struct ntb_netdev *dev = netdev_priv(ndev); 252 struct ntb_netdev *dev = netdev_priv(ndev);
@@ -197,8 +269,11 @@ static int ntb_netdev_open(struct net_device *ndev)
197 } 269 }
198 } 270 }
199 271
272 setup_timer(&dev->tx_timer, ntb_netdev_tx_timer, (unsigned long)ndev);
273
200 netif_carrier_off(ndev); 274 netif_carrier_off(ndev);
201 ntb_transport_link_up(dev->qp); 275 ntb_transport_link_up(dev->qp);
276 netif_start_queue(ndev);
202 277
203 return 0; 278 return 0;
204 279
@@ -219,6 +294,8 @@ static int ntb_netdev_close(struct net_device *ndev)
219 while ((skb = ntb_transport_rx_remove(dev->qp, &len))) 294 while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
220 dev_kfree_skb(skb); 295 dev_kfree_skb(skb);
221 296
297 del_timer_sync(&dev->tx_timer);
298
222 return 0; 299 return 0;
223} 300}
224 301
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index fb1299c6326e..e23bf5b90e17 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -220,7 +220,7 @@ int fixed_phy_update_state(struct phy_device *phydev,
220 struct fixed_mdio_bus *fmb = &platform_fmb; 220 struct fixed_mdio_bus *fmb = &platform_fmb;
221 struct fixed_phy *fp; 221 struct fixed_phy *fp;
222 222
223 if (!phydev || !phydev->bus) 223 if (!phydev || phydev->bus != fmb->mii_bus)
224 return -EINVAL; 224 return -EINVAL;
225 225
226 list_for_each_entry(fp, &fmb->phys, node) { 226 list_for_each_entry(fp, &fmb->phys, node) {
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e6897b6a8a53..5de8d5827536 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -785,6 +785,7 @@ static int marvell_read_status(struct phy_device *phydev)
785 int adv; 785 int adv;
786 int err; 786 int err;
787 int lpa; 787 int lpa;
788 int lpagb;
788 int status = 0; 789 int status = 0;
789 790
790 /* Update the link, but return if there 791 /* Update the link, but return if there
@@ -802,10 +803,17 @@ static int marvell_read_status(struct phy_device *phydev)
802 if (lpa < 0) 803 if (lpa < 0)
803 return lpa; 804 return lpa;
804 805
806 lpagb = phy_read(phydev, MII_STAT1000);
807 if (lpagb < 0)
808 return lpagb;
809
805 adv = phy_read(phydev, MII_ADVERTISE); 810 adv = phy_read(phydev, MII_ADVERTISE);
806 if (adv < 0) 811 if (adv < 0)
807 return adv; 812 return adv;
808 813
814 phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb) |
815 mii_lpa_to_ethtool_lpa_t(lpa);
816
809 lpa &= adv; 817 lpa &= adv;
810 818
811 if (status & MII_M1011_PHY_STATUS_FULLDUPLEX) 819 if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
@@ -853,6 +861,7 @@ static int marvell_read_status(struct phy_device *phydev)
853 phydev->speed = SPEED_10; 861 phydev->speed = SPEED_10;
854 862
855 phydev->pause = phydev->asym_pause = 0; 863 phydev->pause = phydev->asym_pause = 0;
864 phydev->lp_advertising = 0;
856 } 865 }
857 866
858 return 0; 867 return 0;
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 6a52a7f0fa0d..4bde5e728fe0 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -244,6 +244,7 @@ static const struct of_device_id unimac_mdio_ids[] = {
244 { .compatible = "brcm,unimac-mdio", }, 244 { .compatible = "brcm,unimac-mdio", },
245 { /* sentinel */ }, 245 { /* sentinel */ },
246}; 246};
247MODULE_DEVICE_TABLE(of, unimac_mdio_ids);
247 248
248static struct platform_driver unimac_mdio_driver = { 249static struct platform_driver unimac_mdio_driver = {
249 .driver = { 250 .driver = {
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 7dc21e56a7aa..3bc9f03349f3 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -261,6 +261,7 @@ static const struct of_device_id mdio_gpio_of_match[] = {
261 { .compatible = "virtual,mdio-gpio", }, 261 { .compatible = "virtual,mdio-gpio", },
262 { /* sentinel */ } 262 { /* sentinel */ }
263}; 263};
264MODULE_DEVICE_TABLE(of, mdio_gpio_of_match);
264 265
265static struct platform_driver mdio_gpio_driver = { 266static struct platform_driver mdio_gpio_driver = {
266 .probe = mdio_gpio_probe, 267 .probe = mdio_gpio_probe,
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 4d4d25efc1e1..280c7c311f72 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -113,18 +113,18 @@ int mdio_mux_init(struct device *dev,
113 if (!parent_bus_node) 113 if (!parent_bus_node)
114 return -ENODEV; 114 return -ENODEV;
115 115
116 parent_bus = of_mdio_find_bus(parent_bus_node);
117 if (parent_bus == NULL) {
118 ret_val = -EPROBE_DEFER;
119 goto err_parent_bus;
120 }
121
122 pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); 116 pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL);
123 if (pb == NULL) { 117 if (pb == NULL) {
124 ret_val = -ENOMEM; 118 ret_val = -ENOMEM;
125 goto err_parent_bus; 119 goto err_parent_bus;
126 } 120 }
127 121
122 parent_bus = of_mdio_find_bus(parent_bus_node);
123 if (parent_bus == NULL) {
124 ret_val = -EPROBE_DEFER;
125 goto err_parent_bus;
126 }
127
128 pb->switch_data = data; 128 pb->switch_data = data;
129 pb->switch_fn = switch_fn; 129 pb->switch_fn = switch_fn;
130 pb->current_child = -1; 130 pb->current_child = -1;
@@ -173,6 +173,10 @@ int mdio_mux_init(struct device *dev,
173 dev_info(dev, "Version " DRV_VERSION "\n"); 173 dev_info(dev, "Version " DRV_VERSION "\n");
174 return 0; 174 return 0;
175 } 175 }
176
177 /* balance the reference of_mdio_find_bus() took */
178 put_device(&pb->mii_bus->dev);
179
176err_parent_bus: 180err_parent_bus:
177 of_node_put(parent_bus_node); 181 of_node_put(parent_bus_node);
178 return ret_val; 182 return ret_val;
@@ -189,6 +193,9 @@ void mdio_mux_uninit(void *mux_handle)
189 mdiobus_free(cb->mii_bus); 193 mdiobus_free(cb->mii_bus);
190 cb = cb->next; 194 cb = cb->next;
191 } 195 }
196
197 /* balance the reference of_mdio_find_bus() in mdio_mux_init() took */
198 put_device(&pb->mii_bus->dev);
192} 199}
193EXPORT_SYMBOL_GPL(mdio_mux_uninit); 200EXPORT_SYMBOL_GPL(mdio_mux_uninit);
194 201
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 02a4615b65f8..12f44c53cc8e 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -167,7 +167,9 @@ static int of_mdio_bus_match(struct device *dev, const void *mdio_bus_np)
167 * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. 167 * of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
168 * @mdio_bus_np: Pointer to the mii_bus. 168 * @mdio_bus_np: Pointer to the mii_bus.
169 * 169 *
170 * Returns a pointer to the mii_bus, or NULL if none found. 170 * Returns a reference to the mii_bus, or NULL if none found. The
171 * embedded struct device will have its reference count incremented,
172 * and this must be put once the bus is finished with.
171 * 173 *
172 * Because the association of a device_node and mii_bus is made via 174 * Because the association of a device_node and mii_bus is made via
173 * of_mdiobus_register(), the mii_bus cannot be found before it is 175 * of_mdiobus_register(), the mii_bus cannot be found before it is
@@ -234,15 +236,18 @@ static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
234#endif 236#endif
235 237
236/** 238/**
237 * mdiobus_register - bring up all the PHYs on a given bus and attach them to bus 239 * __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
238 * @bus: target mii_bus 240 * @bus: target mii_bus
241 * @owner: module containing bus accessor functions
239 * 242 *
240 * Description: Called by a bus driver to bring up all the PHYs 243 * Description: Called by a bus driver to bring up all the PHYs
241 * on a given bus, and attach them to the bus. 244 * on a given bus, and attach them to the bus. Drivers should use
245 * mdiobus_register() rather than __mdiobus_register() unless they
246 * need to pass a specific owner module.
242 * 247 *
243 * Returns 0 on success or < 0 on error. 248 * Returns 0 on success or < 0 on error.
244 */ 249 */
245int mdiobus_register(struct mii_bus *bus) 250int __mdiobus_register(struct mii_bus *bus, struct module *owner)
246{ 251{
247 int i, err; 252 int i, err;
248 253
@@ -253,6 +258,7 @@ int mdiobus_register(struct mii_bus *bus)
253 BUG_ON(bus->state != MDIOBUS_ALLOCATED && 258 BUG_ON(bus->state != MDIOBUS_ALLOCATED &&
254 bus->state != MDIOBUS_UNREGISTERED); 259 bus->state != MDIOBUS_UNREGISTERED);
255 260
261 bus->owner = owner;
256 bus->dev.parent = bus->parent; 262 bus->dev.parent = bus->parent;
257 bus->dev.class = &mdio_bus_class; 263 bus->dev.class = &mdio_bus_class;
258 bus->dev.groups = NULL; 264 bus->dev.groups = NULL;
@@ -288,13 +294,16 @@ int mdiobus_register(struct mii_bus *bus)
288 294
289error: 295error:
290 while (--i >= 0) { 296 while (--i >= 0) {
291 if (bus->phy_map[i]) 297 struct phy_device *phydev = bus->phy_map[i];
292 device_unregister(&bus->phy_map[i]->dev); 298 if (phydev) {
299 phy_device_remove(phydev);
300 phy_device_free(phydev);
301 }
293 } 302 }
294 device_del(&bus->dev); 303 device_del(&bus->dev);
295 return err; 304 return err;
296} 305}
297EXPORT_SYMBOL(mdiobus_register); 306EXPORT_SYMBOL(__mdiobus_register);
298 307
299void mdiobus_unregister(struct mii_bus *bus) 308void mdiobus_unregister(struct mii_bus *bus)
300{ 309{
@@ -304,9 +313,11 @@ void mdiobus_unregister(struct mii_bus *bus)
304 bus->state = MDIOBUS_UNREGISTERED; 313 bus->state = MDIOBUS_UNREGISTERED;
305 314
306 for (i = 0; i < PHY_MAX_ADDR; i++) { 315 for (i = 0; i < PHY_MAX_ADDR; i++) {
307 if (bus->phy_map[i]) 316 struct phy_device *phydev = bus->phy_map[i];
308 device_unregister(&bus->phy_map[i]->dev); 317 if (phydev) {
309 bus->phy_map[i] = NULL; 318 phy_device_remove(phydev);
319 phy_device_free(phydev);
320 }
310 } 321 }
311 device_del(&bus->dev); 322 device_del(&bus->dev);
312} 323}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index c0f211127274..f761288abe66 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -384,6 +384,24 @@ int phy_device_register(struct phy_device *phydev)
384EXPORT_SYMBOL(phy_device_register); 384EXPORT_SYMBOL(phy_device_register);
385 385
386/** 386/**
387 * phy_device_remove - Remove a previously registered phy device from the MDIO bus
388 * @phydev: phy_device structure to remove
389 *
390 * This doesn't free the phy_device itself, it merely reverses the effects
391 * of phy_device_register(). Use phy_device_free() to free the device
392 * after calling this function.
393 */
394void phy_device_remove(struct phy_device *phydev)
395{
396 struct mii_bus *bus = phydev->bus;
397 int addr = phydev->addr;
398
399 device_del(&phydev->dev);
400 bus->phy_map[addr] = NULL;
401}
402EXPORT_SYMBOL(phy_device_remove);
403
404/**
387 * phy_find_first - finds the first PHY device on the bus 405 * phy_find_first - finds the first PHY device on the bus
388 * @bus: the target MII bus 406 * @bus: the target MII bus
389 */ 407 */
@@ -578,14 +596,22 @@ EXPORT_SYMBOL(phy_init_hw);
578 * generic driver is used. The phy_device is given a ptr to 596 * generic driver is used. The phy_device is given a ptr to
579 * the attaching device, and given a callback for link status 597 * the attaching device, and given a callback for link status
580 * change. The phy_device is returned to the attaching driver. 598 * change. The phy_device is returned to the attaching driver.
599 * This function takes a reference on the phy device.
581 */ 600 */
582int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, 601int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
583 u32 flags, phy_interface_t interface) 602 u32 flags, phy_interface_t interface)
584{ 603{
604 struct mii_bus *bus = phydev->bus;
585 struct device *d = &phydev->dev; 605 struct device *d = &phydev->dev;
586 struct module *bus_module;
587 int err; 606 int err;
588 607
608 if (!try_module_get(bus->owner)) {
609 dev_err(&dev->dev, "failed to get the bus module\n");
610 return -EIO;
611 }
612
613 get_device(d);
614
589 /* Assume that if there is no driver, that it doesn't 615 /* Assume that if there is no driver, that it doesn't
590 * exist, and we should use the genphy driver. 616 * exist, and we should use the genphy driver.
591 */ 617 */
@@ -600,20 +626,13 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
600 err = device_bind_driver(d); 626 err = device_bind_driver(d);
601 627
602 if (err) 628 if (err)
603 return err; 629 goto error;
604 } 630 }
605 631
606 if (phydev->attached_dev) { 632 if (phydev->attached_dev) {
607 dev_err(&dev->dev, "PHY already attached\n"); 633 dev_err(&dev->dev, "PHY already attached\n");
608 return -EBUSY; 634 err = -EBUSY;
609 } 635 goto error;
610
611 /* Increment the bus module reference count */
612 bus_module = phydev->bus->dev.driver ?
613 phydev->bus->dev.driver->owner : NULL;
614 if (!try_module_get(bus_module)) {
615 dev_err(&dev->dev, "failed to get the bus module\n");
616 return -EIO;
617 } 636 }
618 637
619 phydev->attached_dev = dev; 638 phydev->attached_dev = dev;
@@ -636,6 +655,11 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
636 phy_resume(phydev); 655 phy_resume(phydev);
637 656
638 return err; 657 return err;
658
659error:
660 put_device(d);
661 module_put(bus->owner);
662 return err;
639} 663}
640EXPORT_SYMBOL(phy_attach_direct); 664EXPORT_SYMBOL(phy_attach_direct);
641 665
@@ -677,14 +701,15 @@ EXPORT_SYMBOL(phy_attach);
677/** 701/**
678 * phy_detach - detach a PHY device from its network device 702 * phy_detach - detach a PHY device from its network device
679 * @phydev: target phy_device struct 703 * @phydev: target phy_device struct
704 *
705 * This detaches the phy device from its network device and the phy
706 * driver, and drops the reference count taken in phy_attach_direct().
680 */ 707 */
681void phy_detach(struct phy_device *phydev) 708void phy_detach(struct phy_device *phydev)
682{ 709{
710 struct mii_bus *bus;
683 int i; 711 int i;
684 712
685 if (phydev->bus->dev.driver)
686 module_put(phydev->bus->dev.driver->owner);
687
688 phydev->attached_dev->phydev = NULL; 713 phydev->attached_dev->phydev = NULL;
689 phydev->attached_dev = NULL; 714 phydev->attached_dev = NULL;
690 phy_suspend(phydev); 715 phy_suspend(phydev);
@@ -700,6 +725,15 @@ void phy_detach(struct phy_device *phydev)
700 break; 725 break;
701 } 726 }
702 } 727 }
728
729 /*
730 * The phydev might go away on the put_device() below, so avoid
731 * a use-after-free bug by reading the underlying bus first.
732 */
733 bus = phydev->bus;
734
735 put_device(&phydev->dev);
736 module_put(bus->owner);
703} 737}
704EXPORT_SYMBOL(phy_detach); 738EXPORT_SYMBOL(phy_detach);
705 739
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 17cad185169d..76cad712ddb2 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -66,7 +66,6 @@
66#define PHY_ID_VSC8244 0x000fc6c0 66#define PHY_ID_VSC8244 0x000fc6c0
67#define PHY_ID_VSC8514 0x00070670 67#define PHY_ID_VSC8514 0x00070670
68#define PHY_ID_VSC8574 0x000704a0 68#define PHY_ID_VSC8574 0x000704a0
69#define PHY_ID_VSC8641 0x00070431
70#define PHY_ID_VSC8662 0x00070660 69#define PHY_ID_VSC8662 0x00070660
71#define PHY_ID_VSC8221 0x000fc550 70#define PHY_ID_VSC8221 0x000fc550
72#define PHY_ID_VSC8211 0x000fc4b0 71#define PHY_ID_VSC8211 0x000fc4b0
@@ -273,18 +272,6 @@ static struct phy_driver vsc82xx_driver[] = {
273 .config_intr = &vsc82xx_config_intr, 272 .config_intr = &vsc82xx_config_intr,
274 .driver = { .owner = THIS_MODULE,}, 273 .driver = { .owner = THIS_MODULE,},
275}, { 274}, {
276 .phy_id = PHY_ID_VSC8641,
277 .name = "Vitesse VSC8641",
278 .phy_id_mask = 0x000ffff0,
279 .features = PHY_GBIT_FEATURES,
280 .flags = PHY_HAS_INTERRUPT,
281 .config_init = &vsc824x_config_init,
282 .config_aneg = &vsc82x4_config_aneg,
283 .read_status = &genphy_read_status,
284 .ack_interrupt = &vsc824x_ack_interrupt,
285 .config_intr = &vsc82xx_config_intr,
286 .driver = { .owner = THIS_MODULE,},
287}, {
288 .phy_id = PHY_ID_VSC8662, 275 .phy_id = PHY_ID_VSC8662,
289 .name = "Vitesse VSC8662", 276 .name = "Vitesse VSC8662",
290 .phy_id_mask = 0x000ffff0, 277 .phy_id_mask = 0x000ffff0,
@@ -331,7 +318,6 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
331 { PHY_ID_VSC8244, 0x000fffc0 }, 318 { PHY_ID_VSC8244, 0x000fffc0 },
332 { PHY_ID_VSC8514, 0x000ffff0 }, 319 { PHY_ID_VSC8514, 0x000ffff0 },
333 { PHY_ID_VSC8574, 0x000ffff0 }, 320 { PHY_ID_VSC8574, 0x000ffff0 },
334 { PHY_ID_VSC8641, 0x000ffff0 },
335 { PHY_ID_VSC8662, 0x000ffff0 }, 321 { PHY_ID_VSC8662, 0x000ffff0 },
336 { PHY_ID_VSC8221, 0x000ffff0 }, 322 { PHY_ID_VSC8221, 0x000ffff0 },
337 { PHY_ID_VSC8211, 0x000ffff0 }, 323 { PHY_ID_VSC8211, 0x000ffff0 },
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 0481daf9201a..ed00446759b2 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2755,6 +2755,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
2755 */ 2755 */
2756 dev_net_set(dev, net); 2756 dev_net_set(dev, net);
2757 2757
2758 rtnl_lock();
2758 mutex_lock(&pn->all_ppp_mutex); 2759 mutex_lock(&pn->all_ppp_mutex);
2759 2760
2760 if (unit < 0) { 2761 if (unit < 0) {
@@ -2785,7 +2786,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
2785 ppp->file.index = unit; 2786 ppp->file.index = unit;
2786 sprintf(dev->name, "ppp%d", unit); 2787 sprintf(dev->name, "ppp%d", unit);
2787 2788
2788 ret = register_netdev(dev); 2789 ret = register_netdevice(dev);
2789 if (ret != 0) { 2790 if (ret != 0) {
2790 unit_put(&pn->units_idr, unit); 2791 unit_put(&pn->units_idr, unit);
2791 netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n", 2792 netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
@@ -2797,6 +2798,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
2797 2798
2798 atomic_inc(&ppp_unit_count); 2799 atomic_inc(&ppp_unit_count);
2799 mutex_unlock(&pn->all_ppp_mutex); 2800 mutex_unlock(&pn->all_ppp_mutex);
2801 rtnl_unlock();
2800 2802
2801 *retp = 0; 2803 *retp = 0;
2802 return ppp; 2804 return ppp;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 167cfc503a78..3a8a36c8ded1 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -585,4 +585,15 @@ config USB_VL600
585 585
586 http://ubuntuforums.org/showpost.php?p=10589647&postcount=17 586 http://ubuntuforums.org/showpost.php?p=10589647&postcount=17
587 587
588config USB_NET_CH9200
589 tristate "QingHeng CH9200 USB ethernet support"
590 depends on USB_USBNET
591 select MII
592 help
593 Choose this option if you have a USB ethernet adapter with a QinHeng
594 CH9200 chipset.
595
596 To compile this driver as a module, choose M here: the
597 module will be called ch9200.
598
588endif # USB_NET_DRIVERS 599endif # USB_NET_DRIVERS
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index cf6a0e610a7f..b5f04068dbe4 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -38,4 +38,4 @@ obj-$(CONFIG_USB_NET_HUAWEI_CDC_NCM) += huawei_cdc_ncm.o
38obj-$(CONFIG_USB_VL600) += lg-vl600.o 38obj-$(CONFIG_USB_VL600) += lg-vl600.o
39obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o 39obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o
40obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o 40obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o
41 41obj-$(CONFIG_USB_NET_CH9200) += ch9200.o
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
new file mode 100644
index 000000000000..5e151e6a3e09
--- /dev/null
+++ b/drivers/net/usb/ch9200.c
@@ -0,0 +1,432 @@
1/*
2 * USB 10M/100M ethernet adapter
3 *
4 * This file is licensed under the terms of the GNU General Public License
5 * version 2. This program is licensed "as is" without any warranty of any
6 * kind, whether express or implied
7 *
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/stddef.h>
14#include <linux/init.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/ethtool.h>
18#include <linux/mii.h>
19#include <linux/usb.h>
20#include <linux/crc32.h>
21#include <linux/usb/usbnet.h>
22#include <linux/slab.h>
23
24#define CH9200_VID 0x1A86
25#define CH9200_PID_E092 0xE092
26
27#define CTRL_TIMEOUT_MS 1000
28
29#define CONTROL_TIMEOUT_MS 1000
30
31#define REQUEST_READ 0x0E
32#define REQUEST_WRITE 0x0F
33
34/* Address space:
35 * 00-63 : MII
36 * 64-128: MAC
37 *
38 * Note: all accesses must be 16-bit
39 */
40
41#define MAC_REG_CTRL 64
42#define MAC_REG_STATUS 66
43#define MAC_REG_INTERRUPT_MASK 68
44#define MAC_REG_PHY_COMMAND 70
45#define MAC_REG_PHY_DATA 72
46#define MAC_REG_STATION_L 74
47#define MAC_REG_STATION_M 76
48#define MAC_REG_STATION_H 78
49#define MAC_REG_HASH_L 80
50#define MAC_REG_HASH_M1 82
51#define MAC_REG_HASH_M2 84
52#define MAC_REG_HASH_H 86
53#define MAC_REG_THRESHOLD 88
54#define MAC_REG_FIFO_DEPTH 90
55#define MAC_REG_PAUSE 92
56#define MAC_REG_FLOW_CONTROL 94
57
58/* Control register bits
59 *
60 * Note: bits 13 and 15 are reserved
61 */
62#define LOOPBACK (0x01 << 14)
63#define BASE100X (0x01 << 12)
64#define MBPS_10 (0x01 << 11)
65#define DUPLEX_MODE (0x01 << 10)
66#define PAUSE_FRAME (0x01 << 9)
67#define PROMISCUOUS (0x01 << 8)
68#define MULTICAST (0x01 << 7)
69#define BROADCAST (0x01 << 6)
70#define HASH (0x01 << 5)
71#define APPEND_PAD (0x01 << 4)
72#define APPEND_CRC (0x01 << 3)
73#define TRANSMITTER_ACTION (0x01 << 2)
74#define RECEIVER_ACTION (0x01 << 1)
75#define DMA_ACTION (0x01 << 0)
76
77/* Status register bits
78 *
79 * Note: bits 7-15 are reserved
80 */
81#define ALIGNMENT (0x01 << 6)
82#define FIFO_OVER_RUN (0x01 << 5)
83#define FIFO_UNDER_RUN (0x01 << 4)
84#define RX_ERROR (0x01 << 3)
85#define RX_COMPLETE (0x01 << 2)
86#define TX_ERROR (0x01 << 1)
87#define TX_COMPLETE (0x01 << 0)
88
89/* FIFO depth register bits
90 *
91 * Note: bits 6 and 14 are reserved
92 */
93
94#define ETH_TXBD (0x01 << 15)
95#define ETN_TX_FIFO_DEPTH (0x01 << 8)
96#define ETH_RXBD (0x01 << 7)
97#define ETH_RX_FIFO_DEPTH (0x01 << 0)
98
99static int control_read(struct usbnet *dev,
100 unsigned char request, unsigned short value,
101 unsigned short index, void *data, unsigned short size,
102 int timeout)
103{
104 unsigned char *buf = NULL;
105 unsigned char request_type;
106 int err = 0;
107
108 if (request == REQUEST_READ)
109 request_type = (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER);
110 else
111 request_type = (USB_DIR_IN | USB_TYPE_VENDOR |
112 USB_RECIP_DEVICE);
113
114 netdev_dbg(dev->net, "Control_read() index=0x%02x size=%d\n",
115 index, size);
116
117 buf = kmalloc(size, GFP_KERNEL);
118 if (!buf) {
119 err = -ENOMEM;
120 goto err_out;
121 }
122
123 err = usb_control_msg(dev->udev,
124 usb_rcvctrlpipe(dev->udev, 0),
125 request, request_type, value, index, buf, size,
126 timeout);
127 if (err == size)
128 memcpy(data, buf, size);
129 else if (err >= 0)
130 err = -EINVAL;
131 kfree(buf);
132
133 return err;
134
135err_out:
136 return err;
137}
138
139static int control_write(struct usbnet *dev, unsigned char request,
140 unsigned short value, unsigned short index,
141 void *data, unsigned short size, int timeout)
142{
143 unsigned char *buf = NULL;
144 unsigned char request_type;
145 int err = 0;
146
147 if (request == REQUEST_WRITE)
148 request_type = (USB_DIR_OUT | USB_TYPE_VENDOR |
149 USB_RECIP_OTHER);
150 else
151 request_type = (USB_DIR_OUT | USB_TYPE_VENDOR |
152 USB_RECIP_DEVICE);
153
154 netdev_dbg(dev->net, "Control_write() index=0x%02x size=%d\n",
155 index, size);
156
157 if (data) {
158 buf = kmalloc(size, GFP_KERNEL);
159 if (!buf) {
160 err = -ENOMEM;
161 goto err_out;
162 }
163 memcpy(buf, data, size);
164 }
165
166 err = usb_control_msg(dev->udev,
167 usb_sndctrlpipe(dev->udev, 0),
168 request, request_type, value, index, buf, size,
169 timeout);
170 if (err >= 0 && err < size)
171 err = -EINVAL;
172 kfree(buf);
173
174 return 0;
175
176err_out:
177 return err;
178}
179
180static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
181{
182 struct usbnet *dev = netdev_priv(netdev);
183 unsigned char buff[2];
184
185 netdev_dbg(netdev, "ch9200_mdio_read phy_id:%02x loc:%02x\n",
186 phy_id, loc);
187
188 if (phy_id != 0)
189 return -ENODEV;
190
191 control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02,
192 CONTROL_TIMEOUT_MS);
193
194 return (buff[0] | buff[1] << 8);
195}
196
197static void ch9200_mdio_write(struct net_device *netdev,
198 int phy_id, int loc, int val)
199{
200 struct usbnet *dev = netdev_priv(netdev);
201 unsigned char buff[2];
202
203 netdev_dbg(netdev, "ch9200_mdio_write() phy_id=%02x loc:%02x\n",
204 phy_id, loc);
205
206 if (phy_id != 0)
207 return;
208
209 buff[0] = (unsigned char)val;
210 buff[1] = (unsigned char)(val >> 8);
211
212 control_write(dev, REQUEST_WRITE, 0, loc * 2, buff, 0x02,
213 CONTROL_TIMEOUT_MS);
214}
215
216static int ch9200_link_reset(struct usbnet *dev)
217{
218 struct ethtool_cmd ecmd;
219
220 mii_check_media(&dev->mii, 1, 1);
221 mii_ethtool_gset(&dev->mii, &ecmd);
222
223 netdev_dbg(dev->net, "link_reset() speed:%d duplex:%d\n",
224 ecmd.speed, ecmd.duplex);
225
226 return 0;
227}
228
229static void ch9200_status(struct usbnet *dev, struct urb *urb)
230{
231 int link;
232 unsigned char *buf;
233
234 if (urb->actual_length < 16)
235 return;
236
237 buf = urb->transfer_buffer;
238 link = !!(buf[0] & 0x01);
239
240 if (link) {
241 netif_carrier_on(dev->net);
242 usbnet_defer_kevent(dev, EVENT_LINK_RESET);
243 } else {
244 netif_carrier_off(dev->net);
245 }
246}
247
248static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
249 gfp_t flags)
250{
251 int i = 0;
252 int len = 0;
253 int tx_overhead = 0;
254
255 tx_overhead = 0x40;
256
257 len = skb->len;
258 if (skb_headroom(skb) < tx_overhead) {
259 struct sk_buff *skb2;
260
261 skb2 = skb_copy_expand(skb, tx_overhead, 0, flags);
262 dev_kfree_skb_any(skb);
263 skb = skb2;
264 if (!skb)
265 return NULL;
266 }
267
268 __skb_push(skb, tx_overhead);
269 /* usbnet adds padding if length is a multiple of packet size
270 * if so, adjust length value in header
271 */
272 if ((skb->len % dev->maxpacket) == 0)
273 len++;
274
275 skb->data[0] = len;
276 skb->data[1] = len >> 8;
277 skb->data[2] = 0x00;
278 skb->data[3] = 0x80;
279
280 for (i = 4; i < 48; i++)
281 skb->data[i] = 0x00;
282
283 skb->data[48] = len;
284 skb->data[49] = len >> 8;
285 skb->data[50] = 0x00;
286 skb->data[51] = 0x80;
287
288 for (i = 52; i < 64; i++)
289 skb->data[i] = 0x00;
290
291 return skb;
292}
293
294static int ch9200_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
295{
296 int len = 0;
297 int rx_overhead = 0;
298
299 rx_overhead = 64;
300
301 if (unlikely(skb->len < rx_overhead)) {
302 dev_err(&dev->udev->dev, "unexpected tiny rx frame\n");
303 return 0;
304 }
305
306 len = (skb->data[skb->len - 16] | skb->data[skb->len - 15] << 8);
307 skb_trim(skb, len);
308
309 return 1;
310}
311
312static int get_mac_address(struct usbnet *dev, unsigned char *data)
313{
314 int err = 0;
315 unsigned char mac_addr[0x06];
316 int rd_mac_len = 0;
317
318 netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n",
319 dev->udev->descriptor.idVendor,
320 dev->udev->descriptor.idProduct);
321
322 memset(mac_addr, 0, sizeof(mac_addr));
323 rd_mac_len = control_read(dev, REQUEST_READ, 0,
324 MAC_REG_STATION_L, mac_addr, 0x02,
325 CONTROL_TIMEOUT_MS);
326 rd_mac_len += control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_M,
327 mac_addr + 2, 0x02, CONTROL_TIMEOUT_MS);
328 rd_mac_len += control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_H,
329 mac_addr + 4, 0x02, CONTROL_TIMEOUT_MS);
330 if (rd_mac_len != ETH_ALEN)
331 err = -EINVAL;
332
333 data[0] = mac_addr[5];
334 data[1] = mac_addr[4];
335 data[2] = mac_addr[3];
336 data[3] = mac_addr[2];
337 data[4] = mac_addr[1];
338 data[5] = mac_addr[0];
339
340 return err;
341}
342
343static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf)
344{
345 int retval = 0;
346 unsigned char data[2];
347
348 retval = usbnet_get_endpoints(dev, intf);
349 if (retval)
350 return retval;
351
352 dev->mii.dev = dev->net;
353 dev->mii.mdio_read = ch9200_mdio_read;
354 dev->mii.mdio_write = ch9200_mdio_write;
355 dev->mii.reg_num_mask = 0x1f;
356
357 dev->mii.phy_id_mask = 0x1f;
358
359 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
360 dev->rx_urb_size = 24 * 64 + 16;
361 mii_nway_restart(&dev->mii);
362
363 data[0] = 0x01;
364 data[1] = 0x0F;
365 retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_THRESHOLD, data,
366 0x02, CONTROL_TIMEOUT_MS);
367
368 data[0] = 0xA0;
369 data[1] = 0x90;
370 retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_FIFO_DEPTH, data,
371 0x02, CONTROL_TIMEOUT_MS);
372
373 data[0] = 0x30;
374 data[1] = 0x00;
375 retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_PAUSE, data,
376 0x02, CONTROL_TIMEOUT_MS);
377
378 data[0] = 0x17;
379 data[1] = 0xD8;
380 retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_FLOW_CONTROL,
381 data, 0x02, CONTROL_TIMEOUT_MS);
382
383 /* Undocumented register */
384 data[0] = 0x01;
385 data[1] = 0x00;
386 retval = control_write(dev, REQUEST_WRITE, 0, 254, data, 0x02,
387 CONTROL_TIMEOUT_MS);
388
389 data[0] = 0x5F;
390 data[1] = 0x0D;
391 retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_CTRL, data, 0x02,
392 CONTROL_TIMEOUT_MS);
393
394 retval = get_mac_address(dev, dev->net->dev_addr);
395
396 return retval;
397}
398
399static const struct driver_info ch9200_info = {
400 .description = "CH9200 USB to Network Adaptor",
401 .flags = FLAG_ETHER,
402 .bind = ch9200_bind,
403 .rx_fixup = ch9200_rx_fixup,
404 .tx_fixup = ch9200_tx_fixup,
405 .status = ch9200_status,
406 .link_reset = ch9200_link_reset,
407 .reset = ch9200_link_reset,
408};
409
410static const struct usb_device_id ch9200_products[] = {
411 {
412 USB_DEVICE(0x1A86, 0xE092),
413 .driver_info = (unsigned long)&ch9200_info,
414 },
415 {},
416};
417
418MODULE_DEVICE_TABLE(usb, ch9200_products);
419
420static struct usb_driver ch9200_driver = {
421 .name = "ch9200",
422 .id_table = ch9200_products,
423 .probe = usbnet_probe,
424 .disconnect = usbnet_disconnect,
425 .suspend = usbnet_suspend,
426 .resume = usbnet_resume,
427};
428
429module_usb_driver(ch9200_driver);
430
431MODULE_DESCRIPTION("QinHeng CH9200 USB Network device");
432MODULE_LICENSE("GPL");
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 637e9fd1e14c..4ecb3a3e516a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -193,7 +193,8 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
193 .flowi4_oif = vrf_dev->ifindex, 193 .flowi4_oif = vrf_dev->ifindex,
194 .flowi4_iif = LOOPBACK_IFINDEX, 194 .flowi4_iif = LOOPBACK_IFINDEX,
195 .flowi4_tos = RT_TOS(ip4h->tos), 195 .flowi4_tos = RT_TOS(ip4h->tos),
196 .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC, 196 .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC |
197 FLOWI_FLAG_SKIP_NH_OIF,
197 .daddr = ip4h->daddr, 198 .daddr = ip4h->daddr,
198 }; 199 };
199 200
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index cf8b7f0473b3..bbac1d35ed4e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2392,10 +2392,6 @@ static void vxlan_setup(struct net_device *dev)
2392 2392
2393 eth_hw_addr_random(dev); 2393 eth_hw_addr_random(dev);
2394 ether_setup(dev); 2394 ether_setup(dev);
2395 if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
2396 dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
2397 else
2398 dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
2399 2395
2400 dev->netdev_ops = &vxlan_netdev_ops; 2396 dev->netdev_ops = &vxlan_netdev_ops;
2401 dev->destructor = free_netdev; 2397 dev->destructor = free_netdev;
@@ -2640,8 +2636,11 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2640 dst->remote_ip.sa.sa_family = AF_INET; 2636 dst->remote_ip.sa.sa_family = AF_INET;
2641 2637
2642 if (dst->remote_ip.sa.sa_family == AF_INET6 || 2638 if (dst->remote_ip.sa.sa_family == AF_INET6 ||
2643 vxlan->cfg.saddr.sa.sa_family == AF_INET6) 2639 vxlan->cfg.saddr.sa.sa_family == AF_INET6) {
2640 if (!IS_ENABLED(CONFIG_IPV6))
2641 return -EPFNOSUPPORT;
2644 use_ipv6 = true; 2642 use_ipv6 = true;
2643 }
2645 2644
2646 if (conf->remote_ifindex) { 2645 if (conf->remote_ifindex) {
2647 struct net_device *lowerdev 2646 struct net_device *lowerdev
@@ -2670,8 +2669,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2670 2669
2671 dev->needed_headroom = lowerdev->hard_header_len + 2670 dev->needed_headroom = lowerdev->hard_header_len +
2672 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2671 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2673 } else if (use_ipv6) 2672 } else if (use_ipv6) {
2674 vxlan->flags |= VXLAN_F_IPV6; 2673 vxlan->flags |= VXLAN_F_IPV6;
2674 dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
2675 } else {
2676 dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
2677 }
2675 2678
2676 memcpy(&vxlan->cfg, conf, sizeof(*conf)); 2679 memcpy(&vxlan->cfg, conf, sizeof(*conf));
2677 if (!vxlan->cfg.dst_port) 2680 if (!vxlan->cfg.dst_port)
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index 87751cfd6f4f..865a3e3cc581 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -190,14 +190,17 @@ static inline int pdev_is_xeon(struct pci_dev *pdev)
190 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB: 190 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
191 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT: 191 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
192 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX: 192 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
193 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
193 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF: 194 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
194 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB: 195 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
195 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT: 196 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
196 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX: 197 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
198 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
197 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF: 199 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
198 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB: 200 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
199 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT: 201 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
200 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX: 202 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
203 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
201 return 1; 204 return 1;
202 } 205 }
203 return 0; 206 return 0;
@@ -237,7 +240,7 @@ static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
237 240
238static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx) 241static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
239{ 242{
240 if (idx < 0 || idx > ndev->mw_count) 243 if (idx < 0 || idx >= ndev->mw_count)
241 return -EINVAL; 244 return -EINVAL;
242 return ndev->reg->mw_bar[idx]; 245 return ndev->reg->mw_bar[idx];
243} 246}
@@ -572,10 +575,13 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
572 "Connection Topology -\t%s\n", 575 "Connection Topology -\t%s\n",
573 ntb_topo_string(ndev->ntb.topo)); 576 ntb_topo_string(ndev->ntb.topo));
574 577
575 off += scnprintf(buf + off, buf_size - off, 578 if (ndev->b2b_idx != UINT_MAX) {
576 "B2B Offset -\t\t%#lx\n", ndev->b2b_off); 579 off += scnprintf(buf + off, buf_size - off,
577 off += scnprintf(buf + off, buf_size - off, 580 "B2B MW Idx -\t\t%u\n", ndev->b2b_idx);
578 "B2B MW Idx -\t\t%d\n", ndev->b2b_idx); 581 off += scnprintf(buf + off, buf_size - off,
582 "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
583 }
584
579 off += scnprintf(buf + off, buf_size - off, 585 off += scnprintf(buf + off, buf_size - off,
580 "BAR4 Split -\t\t%s\n", 586 "BAR4 Split -\t\t%s\n",
581 ndev->bar4_split ? "yes" : "no"); 587 ndev->bar4_split ? "yes" : "no");
@@ -1484,7 +1490,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
1484 pdev = ndev_pdev(ndev); 1490 pdev = ndev_pdev(ndev);
1485 mmio = ndev->self_mmio; 1491 mmio = ndev->self_mmio;
1486 1492
1487 if (ndev->b2b_idx >= ndev->mw_count) { 1493 if (ndev->b2b_idx == UINT_MAX) {
1488 dev_dbg(ndev_dev(ndev), "not using b2b mw\n"); 1494 dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
1489 b2b_bar = 0; 1495 b2b_bar = 0;
1490 ndev->b2b_off = 0; 1496 ndev->b2b_off = 0;
@@ -1776,6 +1782,13 @@ static int xeon_init_ntb(struct intel_ntb_dev *ndev)
1776 else 1782 else
1777 ndev->b2b_idx = b2b_mw_idx; 1783 ndev->b2b_idx = b2b_mw_idx;
1778 1784
1785 if (ndev->b2b_idx >= ndev->mw_count) {
1786 dev_dbg(ndev_dev(ndev),
1787 "b2b_mw_idx %d invalid for mw_count %u\n",
1788 b2b_mw_idx, ndev->mw_count);
1789 return -EINVAL;
1790 }
1791
1779 dev_dbg(ndev_dev(ndev), 1792 dev_dbg(ndev_dev(ndev),
1780 "setting up b2b mw idx %d means %d\n", 1793 "setting up b2b mw idx %d means %d\n",
1781 b2b_mw_idx, ndev->b2b_idx); 1794 b2b_mw_idx, ndev->b2b_idx);
@@ -1843,6 +1856,9 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev)
1843 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX: 1856 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1844 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX: 1857 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1845 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX: 1858 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1859 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
1860 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
1861 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
1846 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP; 1862 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
1847 break; 1863 break;
1848 } 1864 }
@@ -1857,6 +1873,9 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev)
1857 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX: 1873 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1858 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX: 1874 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1859 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX: 1875 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1876 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
1877 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
1878 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
1860 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP; 1879 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
1861 break; 1880 break;
1862 } 1881 }
@@ -1878,6 +1897,9 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev)
1878 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX: 1897 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1879 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX: 1898 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1880 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX: 1899 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1900 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
1901 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
1902 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
1881 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14; 1903 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
1882 break; 1904 break;
1883 } 1905 }
@@ -1996,7 +2018,7 @@ static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
1996 ndev->ntb.ops = &intel_ntb_ops; 2018 ndev->ntb.ops = &intel_ntb_ops;
1997 2019
1998 ndev->b2b_off = 0; 2020 ndev->b2b_off = 0;
1999 ndev->b2b_idx = INT_MAX; 2021 ndev->b2b_idx = UINT_MAX;
2000 2022
2001 ndev->bar4_split = 0; 2023 ndev->bar4_split = 0;
2002 2024
@@ -2234,14 +2256,17 @@ static const struct pci_device_id intel_ntb_pci_tbl[] = {
2234 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)}, 2256 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
2235 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)}, 2257 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
2236 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)}, 2258 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
2259 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BDX)},
2237 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)}, 2260 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
2238 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)}, 2261 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
2239 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)}, 2262 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
2240 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)}, 2263 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
2264 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_BDX)},
2241 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)}, 2265 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
2242 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)}, 2266 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
2243 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)}, 2267 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
2244 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)}, 2268 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
2269 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
2245 {0} 2270 {0}
2246}; 2271};
2247MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl); 2272MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
index 7ddaf387b679..ea0612f797df 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.h
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -67,6 +67,9 @@
67#define PCI_DEVICE_ID_INTEL_NTB_PS_HSX 0x2F0E 67#define PCI_DEVICE_ID_INTEL_NTB_PS_HSX 0x2F0E
68#define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F 68#define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F
69#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E 69#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E
70#define PCI_DEVICE_ID_INTEL_NTB_B2B_BDX 0x6F0D
71#define PCI_DEVICE_ID_INTEL_NTB_PS_BDX 0x6F0E
72#define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F
70 73
71/* Intel Xeon hardware */ 74/* Intel Xeon hardware */
72 75
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 1c6386d5f79c..6e3ee907d186 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -119,7 +119,8 @@ struct ntb_transport_qp {
119 struct ntb_transport_ctx *transport; 119 struct ntb_transport_ctx *transport;
120 struct ntb_dev *ndev; 120 struct ntb_dev *ndev;
121 void *cb_data; 121 void *cb_data;
122 struct dma_chan *dma_chan; 122 struct dma_chan *tx_dma_chan;
123 struct dma_chan *rx_dma_chan;
123 124
124 bool client_ready; 125 bool client_ready;
125 bool link_is_up; 126 bool link_is_up;
@@ -297,7 +298,7 @@ static LIST_HEAD(ntb_transport_list);
297 298
298static int ntb_bus_init(struct ntb_transport_ctx *nt) 299static int ntb_bus_init(struct ntb_transport_ctx *nt)
299{ 300{
300 list_add(&nt->entry, &ntb_transport_list); 301 list_add_tail(&nt->entry, &ntb_transport_list);
301 return 0; 302 return 0;
302} 303}
303 304
@@ -452,7 +453,7 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
452 453
453 out_offset = 0; 454 out_offset = 0;
454 out_offset += snprintf(buf + out_offset, out_count - out_offset, 455 out_offset += snprintf(buf + out_offset, out_count - out_offset,
455 "NTB QP stats\n"); 456 "\nNTB QP stats:\n\n");
456 out_offset += snprintf(buf + out_offset, out_count - out_offset, 457 out_offset += snprintf(buf + out_offset, out_count - out_offset,
457 "rx_bytes - \t%llu\n", qp->rx_bytes); 458 "rx_bytes - \t%llu\n", qp->rx_bytes);
458 out_offset += snprintf(buf + out_offset, out_count - out_offset, 459 out_offset += snprintf(buf + out_offset, out_count - out_offset,
@@ -470,11 +471,11 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
470 out_offset += snprintf(buf + out_offset, out_count - out_offset, 471 out_offset += snprintf(buf + out_offset, out_count - out_offset,
471 "rx_err_ver - \t%llu\n", qp->rx_err_ver); 472 "rx_err_ver - \t%llu\n", qp->rx_err_ver);
472 out_offset += snprintf(buf + out_offset, out_count - out_offset, 473 out_offset += snprintf(buf + out_offset, out_count - out_offset,
473 "rx_buff - \t%p\n", qp->rx_buff); 474 "rx_buff - \t0x%p\n", qp->rx_buff);
474 out_offset += snprintf(buf + out_offset, out_count - out_offset, 475 out_offset += snprintf(buf + out_offset, out_count - out_offset,
475 "rx_index - \t%u\n", qp->rx_index); 476 "rx_index - \t%u\n", qp->rx_index);
476 out_offset += snprintf(buf + out_offset, out_count - out_offset, 477 out_offset += snprintf(buf + out_offset, out_count - out_offset,
477 "rx_max_entry - \t%u\n", qp->rx_max_entry); 478 "rx_max_entry - \t%u\n\n", qp->rx_max_entry);
478 479
479 out_offset += snprintf(buf + out_offset, out_count - out_offset, 480 out_offset += snprintf(buf + out_offset, out_count - out_offset,
480 "tx_bytes - \t%llu\n", qp->tx_bytes); 481 "tx_bytes - \t%llu\n", qp->tx_bytes);
@@ -489,15 +490,32 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
489 out_offset += snprintf(buf + out_offset, out_count - out_offset, 490 out_offset += snprintf(buf + out_offset, out_count - out_offset,
490 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf); 491 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
491 out_offset += snprintf(buf + out_offset, out_count - out_offset, 492 out_offset += snprintf(buf + out_offset, out_count - out_offset,
492 "tx_mw - \t%p\n", qp->tx_mw); 493 "tx_mw - \t0x%p\n", qp->tx_mw);
493 out_offset += snprintf(buf + out_offset, out_count - out_offset, 494 out_offset += snprintf(buf + out_offset, out_count - out_offset,
494 "tx_index - \t%u\n", qp->tx_index); 495 "tx_index (H) - \t%u\n", qp->tx_index);
496 out_offset += snprintf(buf + out_offset, out_count - out_offset,
497 "RRI (T) - \t%u\n",
498 qp->remote_rx_info->entry);
495 out_offset += snprintf(buf + out_offset, out_count - out_offset, 499 out_offset += snprintf(buf + out_offset, out_count - out_offset,
496 "tx_max_entry - \t%u\n", qp->tx_max_entry); 500 "tx_max_entry - \t%u\n", qp->tx_max_entry);
501 out_offset += snprintf(buf + out_offset, out_count - out_offset,
502 "free tx - \t%u\n",
503 ntb_transport_tx_free_entry(qp));
497 504
498 out_offset += snprintf(buf + out_offset, out_count - out_offset, 505 out_offset += snprintf(buf + out_offset, out_count - out_offset,
499 "\nQP Link %s\n", 506 "\n");
507 out_offset += snprintf(buf + out_offset, out_count - out_offset,
508 "Using TX DMA - \t%s\n",
509 qp->tx_dma_chan ? "Yes" : "No");
510 out_offset += snprintf(buf + out_offset, out_count - out_offset,
511 "Using RX DMA - \t%s\n",
512 qp->rx_dma_chan ? "Yes" : "No");
513 out_offset += snprintf(buf + out_offset, out_count - out_offset,
514 "QP Link - \t%s\n",
500 qp->link_is_up ? "Up" : "Down"); 515 qp->link_is_up ? "Up" : "Down");
516 out_offset += snprintf(buf + out_offset, out_count - out_offset,
517 "\n");
518
501 if (out_offset > out_count) 519 if (out_offset > out_count)
502 out_offset = out_count; 520 out_offset = out_count;
503 521
@@ -535,6 +553,7 @@ static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
535 } 553 }
536 entry = list_first_entry(list, struct ntb_queue_entry, entry); 554 entry = list_first_entry(list, struct ntb_queue_entry, entry);
537 list_del(&entry->entry); 555 list_del(&entry->entry);
556
538out: 557out:
539 spin_unlock_irqrestore(lock, flags); 558 spin_unlock_irqrestore(lock, flags);
540 559
@@ -1206,7 +1225,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1206{ 1225{
1207 struct dma_async_tx_descriptor *txd; 1226 struct dma_async_tx_descriptor *txd;
1208 struct ntb_transport_qp *qp = entry->qp; 1227 struct ntb_transport_qp *qp = entry->qp;
1209 struct dma_chan *chan = qp->dma_chan; 1228 struct dma_chan *chan = qp->rx_dma_chan;
1210 struct dma_device *device; 1229 struct dma_device *device;
1211 size_t pay_off, buff_off, len; 1230 size_t pay_off, buff_off, len;
1212 struct dmaengine_unmap_data *unmap; 1231 struct dmaengine_unmap_data *unmap;
@@ -1219,18 +1238,18 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1219 goto err; 1238 goto err;
1220 1239
1221 if (len < copy_bytes) 1240 if (len < copy_bytes)
1222 goto err_wait; 1241 goto err;
1223 1242
1224 device = chan->device; 1243 device = chan->device;
1225 pay_off = (size_t)offset & ~PAGE_MASK; 1244 pay_off = (size_t)offset & ~PAGE_MASK;
1226 buff_off = (size_t)buf & ~PAGE_MASK; 1245 buff_off = (size_t)buf & ~PAGE_MASK;
1227 1246
1228 if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) 1247 if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1229 goto err_wait; 1248 goto err;
1230 1249
1231 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); 1250 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
1232 if (!unmap) 1251 if (!unmap)
1233 goto err_wait; 1252 goto err;
1234 1253
1235 unmap->len = len; 1254 unmap->len = len;
1236 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), 1255 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
@@ -1273,12 +1292,6 @@ err_set_unmap:
1273 dmaengine_unmap_put(unmap); 1292 dmaengine_unmap_put(unmap);
1274err_get_unmap: 1293err_get_unmap:
1275 dmaengine_unmap_put(unmap); 1294 dmaengine_unmap_put(unmap);
1276err_wait:
1277 /* If the callbacks come out of order, the writing of the index to the
1278 * last completed will be out of order. This may result in the
1279 * receive stalling forever.
1280 */
1281 dma_sync_wait(chan, qp->last_cookie);
1282err: 1295err:
1283 ntb_memcpy_rx(entry, offset); 1296 ntb_memcpy_rx(entry, offset);
1284 qp->rx_memcpy++; 1297 qp->rx_memcpy++;
@@ -1373,8 +1386,8 @@ static void ntb_transport_rxc_db(unsigned long data)
1373 break; 1386 break;
1374 } 1387 }
1375 1388
1376 if (i && qp->dma_chan) 1389 if (i && qp->rx_dma_chan)
1377 dma_async_issue_pending(qp->dma_chan); 1390 dma_async_issue_pending(qp->rx_dma_chan);
1378 1391
1379 if (i == qp->rx_max_entry) { 1392 if (i == qp->rx_max_entry) {
1380 /* there is more work to do */ 1393 /* there is more work to do */
@@ -1441,7 +1454,7 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
1441{ 1454{
1442 struct ntb_payload_header __iomem *hdr; 1455 struct ntb_payload_header __iomem *hdr;
1443 struct dma_async_tx_descriptor *txd; 1456 struct dma_async_tx_descriptor *txd;
1444 struct dma_chan *chan = qp->dma_chan; 1457 struct dma_chan *chan = qp->tx_dma_chan;
1445 struct dma_device *device; 1458 struct dma_device *device;
1446 size_t dest_off, buff_off; 1459 size_t dest_off, buff_off;
1447 struct dmaengine_unmap_data *unmap; 1460 struct dmaengine_unmap_data *unmap;
@@ -1634,14 +1647,27 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
1634 dma_cap_set(DMA_MEMCPY, dma_mask); 1647 dma_cap_set(DMA_MEMCPY, dma_mask);
1635 1648
1636 if (use_dma) { 1649 if (use_dma) {
1637 qp->dma_chan = dma_request_channel(dma_mask, ntb_dma_filter_fn, 1650 qp->tx_dma_chan =
1638 (void *)(unsigned long)node); 1651 dma_request_channel(dma_mask, ntb_dma_filter_fn,
1639 if (!qp->dma_chan) 1652 (void *)(unsigned long)node);
1640 dev_info(&pdev->dev, "Unable to allocate DMA channel\n"); 1653 if (!qp->tx_dma_chan)
1654 dev_info(&pdev->dev, "Unable to allocate TX DMA channel\n");
1655
1656 qp->rx_dma_chan =
1657 dma_request_channel(dma_mask, ntb_dma_filter_fn,
1658 (void *)(unsigned long)node);
1659 if (!qp->rx_dma_chan)
1660 dev_info(&pdev->dev, "Unable to allocate RX DMA channel\n");
1641 } else { 1661 } else {
1642 qp->dma_chan = NULL; 1662 qp->tx_dma_chan = NULL;
1663 qp->rx_dma_chan = NULL;
1643 } 1664 }
1644 dev_dbg(&pdev->dev, "Using %s memcpy\n", qp->dma_chan ? "DMA" : "CPU"); 1665
1666 dev_dbg(&pdev->dev, "Using %s memcpy for TX\n",
1667 qp->tx_dma_chan ? "DMA" : "CPU");
1668
1669 dev_dbg(&pdev->dev, "Using %s memcpy for RX\n",
1670 qp->rx_dma_chan ? "DMA" : "CPU");
1645 1671
1646 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1672 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1647 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); 1673 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
@@ -1676,8 +1702,10 @@ err2:
1676err1: 1702err1:
1677 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) 1703 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1678 kfree(entry); 1704 kfree(entry);
1679 if (qp->dma_chan) 1705 if (qp->tx_dma_chan)
1680 dma_release_channel(qp->dma_chan); 1706 dma_release_channel(qp->tx_dma_chan);
1707 if (qp->rx_dma_chan)
1708 dma_release_channel(qp->rx_dma_chan);
1681 nt->qp_bitmap_free |= qp_bit; 1709 nt->qp_bitmap_free |= qp_bit;
1682err: 1710err:
1683 return NULL; 1711 return NULL;
@@ -1701,12 +1729,27 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1701 1729
1702 pdev = qp->ndev->pdev; 1730 pdev = qp->ndev->pdev;
1703 1731
1704 if (qp->dma_chan) { 1732 if (qp->tx_dma_chan) {
1705 struct dma_chan *chan = qp->dma_chan; 1733 struct dma_chan *chan = qp->tx_dma_chan;
1734 /* Putting the dma_chan to NULL will force any new traffic to be
1735 * processed by the CPU instead of the DAM engine
1736 */
1737 qp->tx_dma_chan = NULL;
1738
1739 /* Try to be nice and wait for any queued DMA engine
1740 * transactions to process before smashing it with a rock
1741 */
1742 dma_sync_wait(chan, qp->last_cookie);
1743 dmaengine_terminate_all(chan);
1744 dma_release_channel(chan);
1745 }
1746
1747 if (qp->rx_dma_chan) {
1748 struct dma_chan *chan = qp->rx_dma_chan;
1706 /* Putting the dma_chan to NULL will force any new traffic to be 1749 /* Putting the dma_chan to NULL will force any new traffic to be
1707 * processed by the CPU instead of the DAM engine 1750 * processed by the CPU instead of the DAM engine
1708 */ 1751 */
1709 qp->dma_chan = NULL; 1752 qp->rx_dma_chan = NULL;
1710 1753
1711 /* Try to be nice and wait for any queued DMA engine 1754 /* Try to be nice and wait for any queued DMA engine
1712 * transactions to process before smashing it with a rock 1755 * transactions to process before smashing it with a rock
@@ -1843,7 +1886,7 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1843 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1886 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1844 if (!entry) { 1887 if (!entry) {
1845 qp->tx_err_no_buf++; 1888 qp->tx_err_no_buf++;
1846 return -ENOMEM; 1889 return -EBUSY;
1847 } 1890 }
1848 1891
1849 entry->cb_data = cb; 1892 entry->cb_data = cb;
@@ -1954,21 +1997,34 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1954unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) 1997unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1955{ 1998{
1956 unsigned int max; 1999 unsigned int max;
2000 unsigned int copy_align;
1957 2001
1958 if (!qp) 2002 if (!qp)
1959 return 0; 2003 return 0;
1960 2004
1961 if (!qp->dma_chan) 2005 if (!qp->tx_dma_chan && !qp->rx_dma_chan)
1962 return qp->tx_max_frame - sizeof(struct ntb_payload_header); 2006 return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1963 2007
2008 copy_align = max(qp->tx_dma_chan->device->copy_align,
2009 qp->rx_dma_chan->device->copy_align);
2010
1964 /* If DMA engine usage is possible, try to find the max size for that */ 2011 /* If DMA engine usage is possible, try to find the max size for that */
1965 max = qp->tx_max_frame - sizeof(struct ntb_payload_header); 2012 max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
1966 max -= max % (1 << qp->dma_chan->device->copy_align); 2013 max -= max % (1 << copy_align);
1967 2014
1968 return max; 2015 return max;
1969} 2016}
1970EXPORT_SYMBOL_GPL(ntb_transport_max_size); 2017EXPORT_SYMBOL_GPL(ntb_transport_max_size);
1971 2018
2019unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
2020{
2021 unsigned int head = qp->tx_index;
2022 unsigned int tail = qp->remote_rx_info->entry;
2023
2024 return tail > head ? tail - head : qp->tx_max_entry + tail - head;
2025}
2026EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry);
2027
1972static void ntb_transport_doorbell_callback(void *data, int vector) 2028static void ntb_transport_doorbell_callback(void *data, int vector)
1973{ 2029{
1974 struct ntb_transport_ctx *nt = data; 2030 struct ntb_transport_ctx *nt = data;
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 59ad54a63d9f..cb477518dd0e 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -128,13 +128,13 @@ static ssize_t namespace_store(struct device *dev,
128 struct nd_btt *nd_btt = to_nd_btt(dev); 128 struct nd_btt *nd_btt = to_nd_btt(dev);
129 ssize_t rc; 129 ssize_t rc;
130 130
131 nvdimm_bus_lock(dev);
132 device_lock(dev); 131 device_lock(dev);
132 nvdimm_bus_lock(dev);
133 rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len); 133 rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len);
134 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, 134 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
135 rc, buf, buf[len - 1] == '\n' ? "" : "\n"); 135 rc, buf, buf[len - 1] == '\n' ? "" : "\n");
136 device_unlock(dev);
137 nvdimm_bus_unlock(dev); 136 nvdimm_bus_unlock(dev);
137 device_unlock(dev);
138 138
139 return rc; 139 return rc;
140} 140}
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 3fd7d0d81a47..71805a1aa0f3 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -148,13 +148,13 @@ static ssize_t namespace_store(struct device *dev,
148 struct nd_pfn *nd_pfn = to_nd_pfn(dev); 148 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
149 ssize_t rc; 149 ssize_t rc;
150 150
151 nvdimm_bus_lock(dev);
152 device_lock(dev); 151 device_lock(dev);
152 nvdimm_bus_lock(dev);
153 rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); 153 rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
154 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, 154 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
155 rc, buf, buf[len - 1] == '\n' ? "" : "\n"); 155 rc, buf, buf[len - 1] == '\n' ? "" : "\n");
156 device_unlock(dev);
157 nvdimm_bus_unlock(dev); 156 nvdimm_bus_unlock(dev);
157 device_unlock(dev);
158 158
159 return rc; 159 return rc;
160} 160}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index b9525385c0dc..0ba6a978f227 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -92,6 +92,8 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
92 struct pmem_device *pmem = bdev->bd_disk->private_data; 92 struct pmem_device *pmem = bdev->bd_disk->private_data;
93 93
94 pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector); 94 pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
95 if (rw & WRITE)
96 wmb_pmem();
95 page_endio(page, rw & WRITE, 0); 97 page_endio(page, rw & WRITE, 0);
96 98
97 return 0; 99 return 0;
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 1350fa25cdb0..a87a868fed64 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -197,7 +197,8 @@ static int of_phy_match(struct device *dev, void *phy_np)
197 * of_phy_find_device - Give a PHY node, find the phy_device 197 * of_phy_find_device - Give a PHY node, find the phy_device
198 * @phy_np: Pointer to the phy's device tree node 198 * @phy_np: Pointer to the phy's device tree node
199 * 199 *
200 * Returns a pointer to the phy_device. 200 * If successful, returns a pointer to the phy_device with the embedded
201 * struct device refcount incremented by one, or NULL on failure.
201 */ 202 */
202struct phy_device *of_phy_find_device(struct device_node *phy_np) 203struct phy_device *of_phy_find_device(struct device_node *phy_np)
203{ 204{
@@ -217,7 +218,9 @@ EXPORT_SYMBOL(of_phy_find_device);
217 * @hndlr: Link state callback for the network device 218 * @hndlr: Link state callback for the network device
218 * @iface: PHY data interface type 219 * @iface: PHY data interface type
219 * 220 *
220 * Returns a pointer to the phy_device if successful. NULL otherwise 221 * If successful, returns a pointer to the phy_device with the embedded
222 * struct device refcount incremented by one, or NULL on failure. The
223 * refcount must be dropped by calling phy_disconnect() or phy_detach().
221 */ 224 */
222struct phy_device *of_phy_connect(struct net_device *dev, 225struct phy_device *of_phy_connect(struct net_device *dev,
223 struct device_node *phy_np, 226 struct device_node *phy_np,
@@ -225,13 +228,19 @@ struct phy_device *of_phy_connect(struct net_device *dev,
225 phy_interface_t iface) 228 phy_interface_t iface)
226{ 229{
227 struct phy_device *phy = of_phy_find_device(phy_np); 230 struct phy_device *phy = of_phy_find_device(phy_np);
231 int ret;
228 232
229 if (!phy) 233 if (!phy)
230 return NULL; 234 return NULL;
231 235
232 phy->dev_flags = flags; 236 phy->dev_flags = flags;
233 237
234 return phy_connect_direct(dev, phy, hndlr, iface) ? NULL : phy; 238 ret = phy_connect_direct(dev, phy, hndlr, iface);
239
240 /* refcount is held by phy_connect_direct() on success */
241 put_device(&phy->dev);
242
243 return ret ? NULL : phy;
235} 244}
236EXPORT_SYMBOL(of_phy_connect); 245EXPORT_SYMBOL(of_phy_connect);
237 246
@@ -241,17 +250,27 @@ EXPORT_SYMBOL(of_phy_connect);
241 * @phy_np: Node pointer for the PHY 250 * @phy_np: Node pointer for the PHY
242 * @flags: flags to pass to the PHY 251 * @flags: flags to pass to the PHY
243 * @iface: PHY data interface type 252 * @iface: PHY data interface type
253 *
254 * If successful, returns a pointer to the phy_device with the embedded
255 * struct device refcount incremented by one, or NULL on failure. The
256 * refcount must be dropped by calling phy_disconnect() or phy_detach().
244 */ 257 */
245struct phy_device *of_phy_attach(struct net_device *dev, 258struct phy_device *of_phy_attach(struct net_device *dev,
246 struct device_node *phy_np, u32 flags, 259 struct device_node *phy_np, u32 flags,
247 phy_interface_t iface) 260 phy_interface_t iface)
248{ 261{
249 struct phy_device *phy = of_phy_find_device(phy_np); 262 struct phy_device *phy = of_phy_find_device(phy_np);
263 int ret;
250 264
251 if (!phy) 265 if (!phy)
252 return NULL; 266 return NULL;
253 267
254 return phy_attach_direct(dev, phy, flags, iface) ? NULL : phy; 268 ret = phy_attach_direct(dev, phy, flags, iface);
269
270 /* refcount is held by phy_attach_direct() on success */
271 put_device(&phy->dev);
272
273 return ret ? NULL : phy;
255} 274}
256EXPORT_SYMBOL(of_phy_attach); 275EXPORT_SYMBOL(of_phy_attach);
257 276
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c
index 1710d9dc7fc2..2306313c0029 100644
--- a/drivers/of/of_pci_irq.c
+++ b/drivers/of/of_pci_irq.c
@@ -38,8 +38,8 @@ int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq
38 */ 38 */
39 rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); 39 rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
40 if (rc != 0) 40 if (rc != 0)
41 return rc; 41 goto err;
42 /* No pin, exit */ 42 /* No pin, exit with no error message. */
43 if (pin == 0) 43 if (pin == 0)
44 return -ENODEV; 44 return -ENODEV;
45 45
@@ -53,8 +53,10 @@ int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq
53 ppnode = pci_bus_to_OF_node(pdev->bus); 53 ppnode = pci_bus_to_OF_node(pdev->bus);
54 54
55 /* No node for host bridge ? give up */ 55 /* No node for host bridge ? give up */
56 if (ppnode == NULL) 56 if (ppnode == NULL) {
57 return -EINVAL; 57 rc = -EINVAL;
58 goto err;
59 }
58 } else { 60 } else {
59 /* We found a P2P bridge, check if it has a node */ 61 /* We found a P2P bridge, check if it has a node */
60 ppnode = pci_device_to_OF_node(ppdev); 62 ppnode = pci_device_to_OF_node(ppdev);
@@ -86,7 +88,13 @@ int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq
86 out_irq->args[0] = pin; 88 out_irq->args[0] = pin;
87 laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8)); 89 laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
88 laddr[1] = laddr[2] = cpu_to_be32(0); 90 laddr[1] = laddr[2] = cpu_to_be32(0);
89 return of_irq_parse_raw(laddr, out_irq); 91 rc = of_irq_parse_raw(laddr, out_irq);
92 if (rc)
93 goto err;
94 return 0;
95err:
96 dev_err(&pdev->dev, "of_irq_parse_pci() failed with rc=%d\n", rc);
97 return rc;
90} 98}
91EXPORT_SYMBOL_GPL(of_irq_parse_pci); 99EXPORT_SYMBOL_GPL(of_irq_parse_pci);
92 100
@@ -105,10 +113,8 @@ int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
105 int ret; 113 int ret;
106 114
107 ret = of_irq_parse_pci(dev, &oirq); 115 ret = of_irq_parse_pci(dev, &oirq);
108 if (ret) { 116 if (ret)
109 dev_err(&dev->dev, "of_irq_parse_pci() failed with rc=%d\n", ret);
110 return 0; /* Proper return code 0 == NO_IRQ */ 117 return 0; /* Proper return code 0 == NO_IRQ */
111 }
112 118
113 return irq_create_of_mapping(&oirq); 119 return irq_create_of_mapping(&oirq);
114} 120}
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index baec33c4e698..a0580afe1713 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -560,6 +560,9 @@ dino_fixup_bus(struct pci_bus *bus)
560 } else if (bus->parent) { 560 } else if (bus->parent) {
561 int i; 561 int i;
562 562
563 pci_read_bridge_bases(bus);
564
565
563 for(i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { 566 for(i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
564 if((bus->self->resource[i].flags & 567 if((bus->self->resource[i].flags &
565 (IORESOURCE_IO | IORESOURCE_MEM)) == 0) 568 (IORESOURCE_IO | IORESOURCE_MEM)) == 0)
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 7b9e89ba0465..a32c1f6c252c 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -693,6 +693,7 @@ lba_fixup_bus(struct pci_bus *bus)
693 if (bus->parent) { 693 if (bus->parent) {
694 int i; 694 int i;
695 /* PCI-PCI Bridge */ 695 /* PCI-PCI Bridge */
696 pci_read_bridge_bases(bus);
696 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) 697 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++)
697 pci_claim_bridge_resource(bus->self, i); 698 pci_claim_bridge_resource(bus->self, i);
698 } else { 699 } else {
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 769f7e35f1a2..59ac36fe7c42 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -442,7 +442,8 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = {
442static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count, 442static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
443 void *arg) 443 void *arg)
444{ 444{
445 struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn)); 445 struct pci_dev *tdev = pci_get_slot(dev->bus,
446 PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
446 ssize_t ret; 447 ssize_t ret;
447 448
448 if (!tdev) 449 if (!tdev)
@@ -456,7 +457,8 @@ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
456static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count, 457static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
457 const void *arg) 458 const void *arg)
458{ 459{
459 struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn)); 460 struct pci_dev *tdev = pci_get_slot(dev->bus,
461 PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
460 ssize_t ret; 462 ssize_t ret;
461 463
462 if (!tdev) 464 if (!tdev)
@@ -473,22 +475,6 @@ static const struct pci_vpd_ops pci_vpd_f0_ops = {
473 .release = pci_vpd_pci22_release, 475 .release = pci_vpd_pci22_release,
474}; 476};
475 477
476static int pci_vpd_f0_dev_check(struct pci_dev *dev)
477{
478 struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
479 int ret = 0;
480
481 if (!tdev)
482 return -ENODEV;
483 if (!tdev->vpd || !tdev->multifunction ||
484 dev->class != tdev->class || dev->vendor != tdev->vendor ||
485 dev->device != tdev->device)
486 ret = -ENODEV;
487
488 pci_dev_put(tdev);
489 return ret;
490}
491
492int pci_vpd_pci22_init(struct pci_dev *dev) 478int pci_vpd_pci22_init(struct pci_dev *dev)
493{ 479{
494 struct pci_vpd_pci22 *vpd; 480 struct pci_vpd_pci22 *vpd;
@@ -497,12 +483,7 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
497 cap = pci_find_capability(dev, PCI_CAP_ID_VPD); 483 cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
498 if (!cap) 484 if (!cap)
499 return -ENODEV; 485 return -ENODEV;
500 if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
501 int ret = pci_vpd_f0_dev_check(dev);
502 486
503 if (ret)
504 return ret;
505 }
506 vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC); 487 vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
507 if (!vpd) 488 if (!vpd)
508 return -ENOMEM; 489 return -ENOMEM;
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 6fbd3f2b5992..d3346d23963b 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -256,6 +256,8 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
256 256
257 res->start = start; 257 res->start = start;
258 res->end = end; 258 res->end = end;
259 res->flags &= ~IORESOURCE_UNSET;
260 orig_res.flags &= ~IORESOURCE_UNSET;
259 dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n", 261 dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
260 &orig_res, res); 262 &orig_res, res);
261 263
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index 81253e70b1c5..0aa81bd3de12 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -110,7 +110,7 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
110 return -EINVAL; 110 return -EINVAL;
111} 111}
112 112
113static void ks_pcie_msi_irq_handler(unsigned int __irq, struct irq_desc *desc) 113static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
114{ 114{
115 unsigned int irq = irq_desc_get_irq(desc); 115 unsigned int irq = irq_desc_get_irq(desc);
116 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); 116 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
@@ -138,8 +138,7 @@ static void ks_pcie_msi_irq_handler(unsigned int __irq, struct irq_desc *desc)
138 * Traverse through pending legacy interrupts and invoke handler for each. Also 138 * Traverse through pending legacy interrupts and invoke handler for each. Also
139 * takes care of interrupt controller level mask/ack operation. 139 * takes care of interrupt controller level mask/ack operation.
140 */ 140 */
141static void ks_pcie_legacy_irq_handler(unsigned int __irq, 141static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
142 struct irq_desc *desc)
143{ 142{
144 unsigned int irq = irq_desc_get_irq(desc); 143 unsigned int irq = irq_desc_get_irq(desc);
145 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); 144 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 367e28fa7564..c4f64bfee551 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -362,6 +362,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
362static struct of_device_id rcar_pci_of_match[] = { 362static struct of_device_id rcar_pci_of_match[] = {
363 { .compatible = "renesas,pci-r8a7790", }, 363 { .compatible = "renesas,pci-r8a7790", },
364 { .compatible = "renesas,pci-r8a7791", }, 364 { .compatible = "renesas,pci-r8a7791", },
365 { .compatible = "renesas,pci-r8a7794", },
365 { }, 366 { },
366}; 367};
367 368
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
index 996327cfa1e1..e491681daf22 100644
--- a/drivers/pci/host/pci-xgene-msi.c
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -295,7 +295,7 @@ static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
295 return 0; 295 return 0;
296} 296}
297 297
298static void xgene_msi_isr(unsigned int irq, struct irq_desc *desc) 298static void xgene_msi_isr(struct irq_desc *desc)
299{ 299{
300 struct irq_chip *chip = irq_desc_get_chip(desc); 300 struct irq_chip *chip = irq_desc_get_chip(desc);
301 struct xgene_msi_group *msi_groups; 301 struct xgene_msi_group *msi_groups;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 0b2be174d981..8361d27e5eca 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -676,15 +676,20 @@ static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
676static void pci_set_bus_msi_domain(struct pci_bus *bus) 676static void pci_set_bus_msi_domain(struct pci_bus *bus)
677{ 677{
678 struct irq_domain *d; 678 struct irq_domain *d;
679 struct pci_bus *b;
679 680
680 /* 681 /*
681 * Either bus is the root, and we must obtain it from the 682 * The bus can be a root bus, a subordinate bus, or a virtual bus
682 * firmware, or we inherit it from the bridge device. 683 * created by an SR-IOV device. Walk up to the first bridge device
684 * found or derive the domain from the host bridge.
683 */ 685 */
684 if (pci_is_root_bus(bus)) 686 for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
685 d = pci_host_bridge_msi_domain(bus); 687 if (b->self)
686 else 688 d = dev_get_msi_domain(&b->self->dev);
687 d = dev_get_msi_domain(&bus->self->dev); 689 }
690
691 if (!d)
692 d = pci_host_bridge_msi_domain(b);
688 693
689 dev_set_msi_domain(&bus->dev, d); 694 dev_set_msi_domain(&bus->dev, d);
690} 695}
@@ -855,9 +860,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
855 child->bridge_ctl = bctl; 860 child->bridge_ctl = bctl;
856 } 861 }
857 862
858 /* Read and initialize bridge resources */
859 pci_read_bridge_bases(child);
860
861 cmax = pci_scan_child_bus(child); 863 cmax = pci_scan_child_bus(child);
862 if (cmax > subordinate) 864 if (cmax > subordinate)
863 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n", 865 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
@@ -918,9 +920,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
918 920
919 if (!is_cardbus) { 921 if (!is_cardbus) {
920 child->bridge_ctl = bctl; 922 child->bridge_ctl = bctl;
921
922 /* Read and initialize bridge resources */
923 pci_read_bridge_bases(child);
924 max = pci_scan_child_bus(child); 923 max = pci_scan_child_bus(child);
925 } else { 924 } else {
926 /* 925 /*
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 6a30252cd79f..b03373fd05ca 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1907,11 +1907,27 @@ static void quirk_netmos(struct pci_dev *dev)
1907DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, 1907DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
1908 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos); 1908 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
1909 1909
1910/*
1911 * Quirk non-zero PCI functions to route VPD access through function 0 for
1912 * devices that share VPD resources between functions. The functions are
1913 * expected to be identical devices.
1914 */
1910static void quirk_f0_vpd_link(struct pci_dev *dev) 1915static void quirk_f0_vpd_link(struct pci_dev *dev)
1911{ 1916{
1912 if (!dev->multifunction || !PCI_FUNC(dev->devfn)) 1917 struct pci_dev *f0;
1918
1919 if (!PCI_FUNC(dev->devfn))
1913 return; 1920 return;
1914 dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0; 1921
1922 f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
1923 if (!f0)
1924 return;
1925
1926 if (f0->vpd && dev->class == f0->class &&
1927 dev->vendor == f0->vendor && dev->device == f0->device)
1928 dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
1929
1930 pci_dev_put(f0);
1915} 1931}
1916DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, 1932DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
1917 PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link); 1933 PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
index 7d9482bf8252..1ca783098e47 100644
--- a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
@@ -143,7 +143,7 @@ static inline bool cygnus_get_bit(struct cygnus_gpio *chip, unsigned int reg,
143 return !!(readl(chip->base + offset) & BIT(shift)); 143 return !!(readl(chip->base + offset) & BIT(shift));
144} 144}
145 145
146static void cygnus_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) 146static void cygnus_gpio_irq_handler(struct irq_desc *desc)
147{ 147{
148 struct gpio_chip *gc = irq_desc_get_handler_data(desc); 148 struct gpio_chip *gc = irq_desc_get_handler_data(desc);
149 struct cygnus_gpio *chip = to_cygnus_gpio(gc); 149 struct cygnus_gpio *chip = to_cygnus_gpio(gc);
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 69723e07036b..9638a00c67c2 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -349,6 +349,9 @@ static bool pinctrl_ready_for_gpio_range(unsigned gpio)
349 struct pinctrl_gpio_range *range = NULL; 349 struct pinctrl_gpio_range *range = NULL;
350 struct gpio_chip *chip = gpio_to_chip(gpio); 350 struct gpio_chip *chip = gpio_to_chip(gpio);
351 351
352 if (WARN(!chip, "no gpio_chip for gpio%i?", gpio))
353 return false;
354
352 mutex_lock(&pinctrldev_list_mutex); 355 mutex_lock(&pinctrldev_list_mutex);
353 356
354 /* Loop over the pin controllers */ 357 /* Loop over the pin controllers */
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index dac4865f3203..f79ea430f651 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -425,7 +425,7 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
425 } 425 }
426} 426}
427 427
428static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc) 428static void byt_gpio_irq_handler(struct irq_desc *desc)
429{ 429{
430 struct irq_data *data = irq_desc_get_irq_data(desc); 430 struct irq_data *data = irq_desc_get_irq_data(desc);
431 struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc)); 431 struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc));
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 2d5d3ddc36e5..270c127e03ea 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1414,7 +1414,7 @@ static struct irq_chip chv_gpio_irqchip = {
1414 .flags = IRQCHIP_SKIP_SET_WAKE, 1414 .flags = IRQCHIP_SKIP_SET_WAKE,
1415}; 1415};
1416 1416
1417static void chv_gpio_irq_handler(unsigned irq, struct irq_desc *desc) 1417static void chv_gpio_irq_handler(struct irq_desc *desc)
1418{ 1418{
1419 struct gpio_chip *gc = irq_desc_get_handler_data(desc); 1419 struct gpio_chip *gc = irq_desc_get_handler_data(desc);
1420 struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc); 1420 struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index bb377c110541..54848b8decef 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -836,7 +836,7 @@ static void intel_gpio_community_irq_handler(struct gpio_chip *gc,
836 } 836 }
837} 837}
838 838
839static void intel_gpio_irq_handler(unsigned irq, struct irq_desc *desc) 839static void intel_gpio_irq_handler(struct irq_desc *desc)
840{ 840{
841 struct gpio_chip *gc = irq_desc_get_handler_data(desc); 841 struct gpio_chip *gc = irq_desc_get_handler_data(desc);
842 struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc); 842 struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 7726c6caaf83..1b22f96ba839 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1190,7 +1190,7 @@ mtk_eint_debounce_process(struct mtk_pinctrl *pctl, int index)
1190 } 1190 }
1191} 1191}
1192 1192
1193static void mtk_eint_irq_handler(unsigned irq, struct irq_desc *desc) 1193static void mtk_eint_irq_handler(struct irq_desc *desc)
1194{ 1194{
1195 struct irq_chip *chip = irq_desc_get_chip(desc); 1195 struct irq_chip *chip = irq_desc_get_chip(desc);
1196 struct mtk_pinctrl *pctl = irq_desc_get_handler_data(desc); 1196 struct mtk_pinctrl *pctl = irq_desc_get_handler_data(desc);
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 352ede13a9e9..96cf03908e93 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -860,7 +860,7 @@ static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status)
860 chained_irq_exit(host_chip, desc); 860 chained_irq_exit(host_chip, desc);
861} 861}
862 862
863static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) 863static void nmk_gpio_irq_handler(struct irq_desc *desc)
864{ 864{
865 struct gpio_chip *chip = irq_desc_get_handler_data(desc); 865 struct gpio_chip *chip = irq_desc_get_handler_data(desc);
866 struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); 866 struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
@@ -873,7 +873,7 @@ static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
873 __nmk_gpio_irq_handler(desc, status); 873 __nmk_gpio_irq_handler(desc, status);
874} 874}
875 875
876static void nmk_gpio_latent_irq_handler(unsigned int irq, struct irq_desc *desc) 876static void nmk_gpio_latent_irq_handler(struct irq_desc *desc)
877{ 877{
878 struct gpio_chip *chip = irq_desc_get_handler_data(desc); 878 struct gpio_chip *chip = irq_desc_get_handler_data(desc);
879 struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); 879 struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c
index a5976ebc4482..f6be68518c87 100644
--- a/drivers/pinctrl/pinctrl-adi2.c
+++ b/drivers/pinctrl/pinctrl-adi2.c
@@ -530,8 +530,7 @@ static inline void preflow_handler(struct irq_desc *desc)
530static inline void preflow_handler(struct irq_desc *desc) { } 530static inline void preflow_handler(struct irq_desc *desc) { }
531#endif 531#endif
532 532
533static void adi_gpio_handle_pint_irq(unsigned int inta_irq, 533static void adi_gpio_handle_pint_irq(struct irq_desc *desc)
534 struct irq_desc *desc)
535{ 534{
536 u32 request; 535 u32 request;
537 u32 level_mask, hwirq; 536 u32 level_mask, hwirq;
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 5e86bb8ca80e..3318f1d6193c 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -492,15 +492,15 @@ static struct irq_chip amd_gpio_irqchip = {
492 .irq_set_type = amd_gpio_irq_set_type, 492 .irq_set_type = amd_gpio_irq_set_type,
493}; 493};
494 494
495static void amd_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc) 495static void amd_gpio_irq_handler(struct irq_desc *desc)
496{ 496{
497 unsigned int irq = irq_desc_get_irq(desc);
498 u32 i; 497 u32 i;
499 u32 off; 498 u32 off;
500 u32 reg; 499 u32 reg;
501 u32 pin_reg; 500 u32 pin_reg;
502 u64 reg64; 501 u64 reg64;
503 int handled = 0; 502 int handled = 0;
503 unsigned int irq;
504 unsigned long flags; 504 unsigned long flags;
505 struct irq_chip *chip = irq_desc_get_chip(desc); 505 struct irq_chip *chip = irq_desc_get_chip(desc);
506 struct gpio_chip *gc = irq_desc_get_handler_data(desc); 506 struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -541,7 +541,7 @@ static void amd_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
541 } 541 }
542 542
543 if (handled == 0) 543 if (handled == 0)
544 handle_bad_irq(irq, desc); 544 handle_bad_irq(desc);
545 545
546 spin_lock_irqsave(&gpio_dev->lock, flags); 546 spin_lock_irqsave(&gpio_dev->lock, flags);
547 reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG); 547 reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index bae0012ee356..b0fde0f385e6 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1585,7 +1585,7 @@ static struct irq_chip gpio_irqchip = {
1585 .irq_set_wake = gpio_irq_set_wake, 1585 .irq_set_wake = gpio_irq_set_wake,
1586}; 1586};
1587 1587
1588static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) 1588static void gpio_irq_handler(struct irq_desc *desc)
1589{ 1589{
1590 struct irq_chip *chip = irq_desc_get_chip(desc); 1590 struct irq_chip *chip = irq_desc_get_chip(desc);
1591 struct gpio_chip *gpio_chip = irq_desc_get_handler_data(desc); 1591 struct gpio_chip *gpio_chip = irq_desc_get_handler_data(desc);
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index 3731cc67a88b..9c9b88934bcc 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -519,7 +519,7 @@ static struct irq_chip u300_gpio_irqchip = {
519 .irq_set_type = u300_gpio_irq_type, 519 .irq_set_type = u300_gpio_irq_type,
520}; 520};
521 521
522static void u300_gpio_irq_handler(unsigned __irq, struct irq_desc *desc) 522static void u300_gpio_irq_handler(struct irq_desc *desc)
523{ 523{
524 unsigned int irq = irq_desc_get_irq(desc); 524 unsigned int irq = irq_desc_get_irq(desc);
525 struct irq_chip *parent_chip = irq_desc_get_chip(desc); 525 struct irq_chip *parent_chip = irq_desc_get_chip(desc);
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
index 461fffc4c62a..11f8b835d3b6 100644
--- a/drivers/pinctrl/pinctrl-digicolor.c
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -337,9 +337,9 @@ static int dc_pinctrl_probe(struct platform_device *pdev)
337 pmap->dev = &pdev->dev; 337 pmap->dev = &pdev->dev;
338 338
339 pmap->pctl = pinctrl_register(pctl_desc, &pdev->dev, pmap); 339 pmap->pctl = pinctrl_register(pctl_desc, &pdev->dev, pmap);
340 if (!pmap->pctl) { 340 if (IS_ERR(pmap->pctl)) {
341 dev_err(&pdev->dev, "pinctrl driver registration failed\n"); 341 dev_err(&pdev->dev, "pinctrl driver registration failed\n");
342 return -EINVAL; 342 return PTR_ERR(pmap->pctl);
343 } 343 }
344 344
345 ret = dc_gpiochip_add(pmap, pdev->dev.of_node); 345 ret = dc_gpiochip_add(pmap, pdev->dev.of_node);
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 3dc2ae15f3a1..952b1c623887 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1303,20 +1303,18 @@ static int pistachio_gpio_irq_set_type(struct irq_data *data, unsigned int type)
1303 } 1303 }
1304 1304
1305 if (type & IRQ_TYPE_LEVEL_MASK) 1305 if (type & IRQ_TYPE_LEVEL_MASK)
1306 __irq_set_handler_locked(data->irq, handle_level_irq); 1306 irq_set_handler_locked(data, handle_level_irq);
1307 else 1307 else
1308 __irq_set_handler_locked(data->irq, handle_edge_irq); 1308 irq_set_handler_locked(data, handle_edge_irq);
1309 1309
1310 return 0; 1310 return 0;
1311} 1311}
1312 1312
1313static void pistachio_gpio_irq_handler(unsigned int __irq, 1313static void pistachio_gpio_irq_handler(struct irq_desc *desc)
1314 struct irq_desc *desc)
1315{ 1314{
1316 unsigned int irq = irq_desc_get_irq(desc);
1317 struct gpio_chip *gc = irq_desc_get_handler_data(desc); 1315 struct gpio_chip *gc = irq_desc_get_handler_data(desc);
1318 struct pistachio_gpio_bank *bank = gc_to_bank(gc); 1316 struct pistachio_gpio_bank *bank = gc_to_bank(gc);
1319 struct irq_chip *chip = irq_get_chip(irq); 1317 struct irq_chip *chip = irq_desc_get_chip(desc);
1320 unsigned long pending; 1318 unsigned long pending;
1321 unsigned int pin; 1319 unsigned int pin;
1322 1320
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index c5246c05f70c..88bb707e107a 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -1475,7 +1475,7 @@ static const struct gpio_chip rockchip_gpiolib_chip = {
1475 * Interrupt handling 1475 * Interrupt handling
1476 */ 1476 */
1477 1477
1478static void rockchip_irq_demux(unsigned int __irq, struct irq_desc *desc) 1478static void rockchip_irq_demux(struct irq_desc *desc)
1479{ 1479{
1480 struct irq_chip *chip = irq_desc_get_chip(desc); 1480 struct irq_chip *chip = irq_desc_get_chip(desc);
1481 struct rockchip_pin_bank *bank = irq_desc_get_handler_data(desc); 1481 struct rockchip_pin_bank *bank = irq_desc_get_handler_data(desc);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index bf548c2a7a9d..ef04b962c3d5 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1679,7 +1679,7 @@ static irqreturn_t pcs_irq_handler(int irq, void *d)
1679 * Use this if you have a separate interrupt for each 1679 * Use this if you have a separate interrupt for each
1680 * pinctrl-single instance. 1680 * pinctrl-single instance.
1681 */ 1681 */
1682static void pcs_irq_chain_handler(unsigned int irq, struct irq_desc *desc) 1682static void pcs_irq_chain_handler(struct irq_desc *desc)
1683{ 1683{
1684 struct pcs_soc_data *pcs_soc = irq_desc_get_handler_data(desc); 1684 struct pcs_soc_data *pcs_soc = irq_desc_get_handler_data(desc);
1685 struct irq_chip *chip; 1685 struct irq_chip *chip;
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index f8338d2e6b6b..389526e704fb 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1460,7 +1460,7 @@ static void __gpio_irq_handler(struct st_gpio_bank *bank)
1460 } 1460 }
1461} 1461}
1462 1462
1463static void st_gpio_irq_handler(unsigned irq, struct irq_desc *desc) 1463static void st_gpio_irq_handler(struct irq_desc *desc)
1464{ 1464{
1465 /* interrupt dedicated per bank */ 1465 /* interrupt dedicated per bank */
1466 struct irq_chip *chip = irq_desc_get_chip(desc); 1466 struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -1472,7 +1472,7 @@ static void st_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
1472 chained_irq_exit(chip, desc); 1472 chained_irq_exit(chip, desc);
1473} 1473}
1474 1474
1475static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc) 1475static void st_gpio_irqmux_handler(struct irq_desc *desc)
1476{ 1476{
1477 struct irq_chip *chip = irq_desc_get_chip(desc); 1477 struct irq_chip *chip = irq_desc_get_chip(desc);
1478 struct st_pinctrl *info = irq_desc_get_handler_data(desc); 1478 struct st_pinctrl *info = irq_desc_get_handler_data(desc);
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 67e08cb315c4..29984b36926a 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -313,8 +313,7 @@ static int pinmux_func_name_to_selector(struct pinctrl_dev *pctldev,
313 313
314 /* See if this pctldev has this function */ 314 /* See if this pctldev has this function */
315 while (selector < nfuncs) { 315 while (selector < nfuncs) {
316 const char *fname = ops->get_function_name(pctldev, 316 const char *fname = ops->get_function_name(pctldev, selector);
317 selector);
318 317
319 if (!strcmp(function, fname)) 318 if (!strcmp(function, fname))
320 return selector; 319 return selector;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 492cdd51dc5c..a0c7407c1cac 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -765,9 +765,8 @@ static struct irq_chip msm_gpio_irq_chip = {
765 .irq_set_wake = msm_gpio_irq_set_wake, 765 .irq_set_wake = msm_gpio_irq_set_wake,
766}; 766};
767 767
768static void msm_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc) 768static void msm_gpio_irq_handler(struct irq_desc *desc)
769{ 769{
770 unsigned int irq = irq_desc_get_irq(desc);
771 struct gpio_chip *gc = irq_desc_get_handler_data(desc); 770 struct gpio_chip *gc = irq_desc_get_handler_data(desc);
772 const struct msm_pingroup *g; 771 const struct msm_pingroup *g;
773 struct msm_pinctrl *pctrl = to_msm_pinctrl(gc); 772 struct msm_pinctrl *pctrl = to_msm_pinctrl(gc);
@@ -795,7 +794,7 @@ static void msm_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
795 794
796 /* No interrupts were flagged */ 795 /* No interrupts were flagged */
797 if (handled == 0) 796 if (handled == 0)
798 handle_bad_irq(irq, desc); 797 handle_bad_irq(desc);
799 798
800 chained_irq_exit(chip, desc); 799 chained_irq_exit(chip, desc);
801} 800}
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index c978b311031b..e1a3721bc8e5 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -723,9 +723,9 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
723#endif 723#endif
724 724
725 pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl); 725 pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl);
726 if (!pctrl->pctrl) { 726 if (IS_ERR(pctrl->pctrl)) {
727 dev_err(&pdev->dev, "couldn't register pm8xxx gpio driver\n"); 727 dev_err(&pdev->dev, "couldn't register pm8xxx gpio driver\n");
728 return -ENODEV; 728 return PTR_ERR(pctrl->pctrl);
729 } 729 }
730 730
731 pctrl->chip = pm8xxx_gpio_template; 731 pctrl->chip = pm8xxx_gpio_template;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 2d1b69f171be..6652b8d7f707 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -814,9 +814,9 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev)
814#endif 814#endif
815 815
816 pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl); 816 pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl);
817 if (!pctrl->pctrl) { 817 if (IS_ERR(pctrl->pctrl)) {
818 dev_err(&pdev->dev, "couldn't register pm8xxx mpp driver\n"); 818 dev_err(&pdev->dev, "couldn't register pm8xxx mpp driver\n");
819 return -ENODEV; 819 return PTR_ERR(pctrl->pctrl);
820 } 820 }
821 821
822 pctrl->chip = pm8xxx_mpp_template; 822 pctrl->chip = pm8xxx_mpp_template;
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 5f45caaef46d..71ccf6a90b22 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -419,7 +419,7 @@ static const struct of_device_id exynos_wkup_irq_ids[] = {
419}; 419};
420 420
421/* interrupt handler for wakeup interrupts 0..15 */ 421/* interrupt handler for wakeup interrupts 0..15 */
422static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc) 422static void exynos_irq_eint0_15(struct irq_desc *desc)
423{ 423{
424 struct exynos_weint_data *eintd = irq_desc_get_handler_data(desc); 424 struct exynos_weint_data *eintd = irq_desc_get_handler_data(desc);
425 struct samsung_pin_bank *bank = eintd->bank; 425 struct samsung_pin_bank *bank = eintd->bank;
@@ -451,7 +451,7 @@ static inline void exynos_irq_demux_eint(unsigned long pend,
451} 451}
452 452
453/* interrupt handler for wakeup interrupt 16 */ 453/* interrupt handler for wakeup interrupt 16 */
454static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc) 454static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
455{ 455{
456 struct irq_chip *chip = irq_desc_get_chip(desc); 456 struct irq_chip *chip = irq_desc_get_chip(desc);
457 struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc); 457 struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc);
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index 019844d479bb..3d92f827da7a 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -240,7 +240,7 @@ static struct irq_chip s3c2410_eint0_3_chip = {
240 .irq_set_type = s3c24xx_eint_type, 240 .irq_set_type = s3c24xx_eint_type,
241}; 241};
242 242
243static void s3c2410_demux_eint0_3(unsigned int irq, struct irq_desc *desc) 243static void s3c2410_demux_eint0_3(struct irq_desc *desc)
244{ 244{
245 struct irq_data *data = irq_desc_get_irq_data(desc); 245 struct irq_data *data = irq_desc_get_irq_data(desc);
246 struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc); 246 struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
@@ -295,7 +295,7 @@ static struct irq_chip s3c2412_eint0_3_chip = {
295 .irq_set_type = s3c24xx_eint_type, 295 .irq_set_type = s3c24xx_eint_type,
296}; 296};
297 297
298static void s3c2412_demux_eint0_3(unsigned int irq, struct irq_desc *desc) 298static void s3c2412_demux_eint0_3(struct irq_desc *desc)
299{ 299{
300 struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc); 300 struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
301 struct irq_data *data = irq_desc_get_irq_data(desc); 301 struct irq_data *data = irq_desc_get_irq_data(desc);
@@ -361,7 +361,7 @@ static inline void s3c24xx_demux_eint(struct irq_desc *desc,
361 u32 offset, u32 range) 361 u32 offset, u32 range)
362{ 362{
363 struct s3c24xx_eint_data *data = irq_desc_get_handler_data(desc); 363 struct s3c24xx_eint_data *data = irq_desc_get_handler_data(desc);
364 struct irq_chip *chip = irq_desc_get_irq_chip(desc); 364 struct irq_chip *chip = irq_desc_get_chip(desc);
365 struct samsung_pinctrl_drv_data *d = data->drvdata; 365 struct samsung_pinctrl_drv_data *d = data->drvdata;
366 unsigned int pend, mask; 366 unsigned int pend, mask;
367 367
@@ -388,12 +388,12 @@ static inline void s3c24xx_demux_eint(struct irq_desc *desc,
388 chained_irq_exit(chip, desc); 388 chained_irq_exit(chip, desc);
389} 389}
390 390
391static void s3c24xx_demux_eint4_7(unsigned int irq, struct irq_desc *desc) 391static void s3c24xx_demux_eint4_7(struct irq_desc *desc)
392{ 392{
393 s3c24xx_demux_eint(desc, 0, 0xf0); 393 s3c24xx_demux_eint(desc, 0, 0xf0);
394} 394}
395 395
396static void s3c24xx_demux_eint8_23(unsigned int irq, struct irq_desc *desc) 396static void s3c24xx_demux_eint8_23(struct irq_desc *desc)
397{ 397{
398 s3c24xx_demux_eint(desc, 8, 0xffff00); 398 s3c24xx_demux_eint(desc, 8, 0xffff00);
399} 399}
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index f5ea40a69711..43407ab248f5 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -407,7 +407,7 @@ static const struct irq_domain_ops s3c64xx_gpio_irqd_ops = {
407 .xlate = irq_domain_xlate_twocell, 407 .xlate = irq_domain_xlate_twocell,
408}; 408};
409 409
410static void s3c64xx_eint_gpio_irq(unsigned int irq, struct irq_desc *desc) 410static void s3c64xx_eint_gpio_irq(struct irq_desc *desc)
411{ 411{
412 struct irq_chip *chip = irq_desc_get_chip(desc); 412 struct irq_chip *chip = irq_desc_get_chip(desc);
413 struct s3c64xx_eint_gpio_data *data = irq_desc_get_handler_data(desc); 413 struct s3c64xx_eint_gpio_data *data = irq_desc_get_handler_data(desc);
@@ -631,22 +631,22 @@ static inline void s3c64xx_irq_demux_eint(struct irq_desc *desc, u32 range)
631 chained_irq_exit(chip, desc); 631 chained_irq_exit(chip, desc);
632} 632}
633 633
634static void s3c64xx_demux_eint0_3(unsigned int irq, struct irq_desc *desc) 634static void s3c64xx_demux_eint0_3(struct irq_desc *desc)
635{ 635{
636 s3c64xx_irq_demux_eint(desc, 0xf); 636 s3c64xx_irq_demux_eint(desc, 0xf);
637} 637}
638 638
639static void s3c64xx_demux_eint4_11(unsigned int irq, struct irq_desc *desc) 639static void s3c64xx_demux_eint4_11(struct irq_desc *desc)
640{ 640{
641 s3c64xx_irq_demux_eint(desc, 0xff0); 641 s3c64xx_irq_demux_eint(desc, 0xff0);
642} 642}
643 643
644static void s3c64xx_demux_eint12_19(unsigned int irq, struct irq_desc *desc) 644static void s3c64xx_demux_eint12_19(struct irq_desc *desc)
645{ 645{
646 s3c64xx_irq_demux_eint(desc, 0xff000); 646 s3c64xx_irq_demux_eint(desc, 0xff000);
647} 647}
648 648
649static void s3c64xx_demux_eint20_27(unsigned int irq, struct irq_desc *desc) 649static void s3c64xx_demux_eint20_27(struct irq_desc *desc)
650{ 650{
651 s3c64xx_irq_demux_eint(desc, 0xff00000); 651 s3c64xx_irq_demux_eint(desc, 0xff00000);
652} 652}
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 9df0c5f25824..0d24d9e4b70c 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -4489,7 +4489,7 @@ static struct irq_chip atlas7_gpio_irq_chip = {
4489 .irq_set_type = atlas7_gpio_irq_type, 4489 .irq_set_type = atlas7_gpio_irq_type,
4490}; 4490};
4491 4491
4492static void atlas7_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc) 4492static void atlas7_gpio_handle_irq(struct irq_desc *desc)
4493{ 4493{
4494 struct gpio_chip *gc = irq_desc_get_handler_data(desc); 4494 struct gpio_chip *gc = irq_desc_get_handler_data(desc);
4495 struct atlas7_gpio_chip *a7gc = to_atlas7_gpio(gc); 4495 struct atlas7_gpio_chip *a7gc = to_atlas7_gpio(gc);
@@ -4512,7 +4512,7 @@ static void atlas7_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc)
4512 if (!status) { 4512 if (!status) {
4513 pr_warn("%s: gpio [%s] status %#x no interrupt is flaged\n", 4513 pr_warn("%s: gpio [%s] status %#x no interrupt is flaged\n",
4514 __func__, gc->label, status); 4514 __func__, gc->label, status);
4515 handle_bad_irq(irq, desc); 4515 handle_bad_irq(desc);
4516 return; 4516 return;
4517 } 4517 }
4518 4518
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index f8bd9fb52033..2a8d69725de8 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -545,7 +545,7 @@ static struct irq_chip sirfsoc_irq_chip = {
545 .irq_set_type = sirfsoc_gpio_irq_type, 545 .irq_set_type = sirfsoc_gpio_irq_type,
546}; 546};
547 547
548static void sirfsoc_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc) 548static void sirfsoc_gpio_handle_irq(struct irq_desc *desc)
549{ 549{
550 unsigned int irq = irq_desc_get_irq(desc); 550 unsigned int irq = irq_desc_get_irq(desc);
551 struct gpio_chip *gc = irq_desc_get_handler_data(desc); 551 struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -570,7 +570,7 @@ static void sirfsoc_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc)
570 printk(KERN_WARNING 570 printk(KERN_WARNING
571 "%s: gpio id %d status %#x no interrupt is flagged\n", 571 "%s: gpio id %d status %#x no interrupt is flagged\n",
572 __func__, bank->id, status); 572 __func__, bank->id, status);
573 handle_bad_irq(irq, desc); 573 handle_bad_irq(desc);
574 return; 574 return;
575 } 575 }
576 576
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index ae8f29fb5536..1f0af250dbb5 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -356,7 +356,7 @@ static struct irq_chip plgpio_irqchip = {
356 .irq_set_type = plgpio_irq_set_type, 356 .irq_set_type = plgpio_irq_set_type,
357}; 357};
358 358
359static void plgpio_irq_handler(unsigned irq, struct irq_desc *desc) 359static void plgpio_irq_handler(struct irq_desc *desc)
360{ 360{
361 struct gpio_chip *gc = irq_desc_get_handler_data(desc); 361 struct gpio_chip *gc = irq_desc_get_handler_data(desc);
362 struct plgpio *plgpio = container_of(gc, struct plgpio, chip); 362 struct plgpio *plgpio = container_of(gc, struct plgpio, chip);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index fb4669c0ce0e..38e0c7bdd2ac 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -617,13 +617,11 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
617 spin_lock_irqsave(&pctl->lock, flags); 617 spin_lock_irqsave(&pctl->lock, flags);
618 618
619 if (type & IRQ_TYPE_LEVEL_MASK) 619 if (type & IRQ_TYPE_LEVEL_MASK)
620 __irq_set_chip_handler_name_locked(d->irq, 620 irq_set_chip_handler_name_locked(d, &sunxi_pinctrl_level_irq_chip,
621 &sunxi_pinctrl_level_irq_chip, 621 handle_fasteoi_irq, NULL);
622 handle_fasteoi_irq, NULL);
623 else 622 else
624 __irq_set_chip_handler_name_locked(d->irq, 623 irq_set_chip_handler_name_locked(d, &sunxi_pinctrl_edge_irq_chip,
625 &sunxi_pinctrl_edge_irq_chip, 624 handle_edge_irq, NULL);
626 handle_edge_irq, NULL);
627 625
628 regval = readl(pctl->membase + reg); 626 regval = readl(pctl->membase + reg);
629 regval &= ~(IRQ_CFG_IRQ_MASK << index); 627 regval &= ~(IRQ_CFG_IRQ_MASK << index);
@@ -742,7 +740,7 @@ static struct irq_domain_ops sunxi_pinctrl_irq_domain_ops = {
742 .xlate = sunxi_pinctrl_irq_of_xlate, 740 .xlate = sunxi_pinctrl_irq_of_xlate,
743}; 741};
744 742
745static void sunxi_pinctrl_irq_handler(unsigned __irq, struct irq_desc *desc) 743static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
746{ 744{
747 unsigned int irq = irq_desc_get_irq(desc); 745 unsigned int irq = irq_desc_get_irq(desc);
748 struct irq_chip *chip = irq_desc_get_chip(desc); 746 struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 1ef02daddb60..460fa6708bfc 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -346,8 +346,7 @@ static void acerhdf_check_param(struct thermal_zone_device *thermal)
346 * as late as the polling interval is since we can't do that in the respective 346 * as late as the polling interval is since we can't do that in the respective
347 * accessors of the module parameters. 347 * accessors of the module parameters.
348 */ 348 */
349static int acerhdf_get_ec_temp(struct thermal_zone_device *thermal, 349static int acerhdf_get_ec_temp(struct thermal_zone_device *thermal, int *t)
350 unsigned long *t)
351{ 350{
352 int temp, err = 0; 351 int temp, err = 0;
353 352
@@ -453,7 +452,7 @@ static int acerhdf_get_trip_type(struct thermal_zone_device *thermal, int trip,
453} 452}
454 453
455static int acerhdf_get_trip_hyst(struct thermal_zone_device *thermal, int trip, 454static int acerhdf_get_trip_hyst(struct thermal_zone_device *thermal, int trip,
456 unsigned long *temp) 455 int *temp)
457{ 456{
458 if (trip != 0) 457 if (trip != 0)
459 return -EINVAL; 458 return -EINVAL;
@@ -464,7 +463,7 @@ static int acerhdf_get_trip_hyst(struct thermal_zone_device *thermal, int trip,
464} 463}
465 464
466static int acerhdf_get_trip_temp(struct thermal_zone_device *thermal, int trip, 465static int acerhdf_get_trip_temp(struct thermal_zone_device *thermal, int trip,
467 unsigned long *temp) 466 int *temp)
468{ 467{
469 if (trip == 0) 468 if (trip == 0)
470 *temp = fanon; 469 *temp = fanon;
@@ -477,7 +476,7 @@ static int acerhdf_get_trip_temp(struct thermal_zone_device *thermal, int trip,
477} 476}
478 477
479static int acerhdf_get_crit_temp(struct thermal_zone_device *thermal, 478static int acerhdf_get_crit_temp(struct thermal_zone_device *thermal,
480 unsigned long *temperature) 479 int *temperature)
481{ 480{
482 *temperature = ACERHDF_TEMP_CRIT; 481 *temperature = ACERHDF_TEMP_CRIT;
483 return 0; 482 return 0;
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index abdaed34c728..131fee2b093e 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -128,6 +128,24 @@ static const struct dmi_system_id asus_quirks[] = {
128 }, 128 },
129 { 129 {
130 .callback = dmi_matched, 130 .callback = dmi_matched,
131 .ident = "ASUSTeK COMPUTER INC. X456UA",
132 .matches = {
133 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
134 DMI_MATCH(DMI_PRODUCT_NAME, "X456UA"),
135 },
136 .driver_data = &quirk_asus_wapf4,
137 },
138 {
139 .callback = dmi_matched,
140 .ident = "ASUSTeK COMPUTER INC. X456UF",
141 .matches = {
142 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
143 DMI_MATCH(DMI_PRODUCT_NAME, "X456UF"),
144 },
145 .driver_data = &quirk_asus_wapf4,
146 },
147 {
148 .callback = dmi_matched,
131 .ident = "ASUSTeK COMPUTER INC. X501U", 149 .ident = "ASUSTeK COMPUTER INC. X501U",
132 .matches = { 150 .matches = {
133 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 151 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 06697315a088..fb4dd7b3ee71 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -54,8 +54,9 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
54#define HPWMI_HARDWARE_QUERY 0x4 54#define HPWMI_HARDWARE_QUERY 0x4
55#define HPWMI_WIRELESS_QUERY 0x5 55#define HPWMI_WIRELESS_QUERY 0x5
56#define HPWMI_BIOS_QUERY 0x9 56#define HPWMI_BIOS_QUERY 0x9
57#define HPWMI_FEATURE_QUERY 0xb
57#define HPWMI_HOTKEY_QUERY 0xc 58#define HPWMI_HOTKEY_QUERY 0xc
58#define HPWMI_FEATURE_QUERY 0xd 59#define HPWMI_FEATURE2_QUERY 0xd
59#define HPWMI_WIRELESS2_QUERY 0x1b 60#define HPWMI_WIRELESS2_QUERY 0x1b
60#define HPWMI_POSTCODEERROR_QUERY 0x2a 61#define HPWMI_POSTCODEERROR_QUERY 0x2a
61 62
@@ -295,25 +296,33 @@ static int hp_wmi_tablet_state(void)
295 return (state & 0x4) ? 1 : 0; 296 return (state & 0x4) ? 1 : 0;
296} 297}
297 298
298static int __init hp_wmi_bios_2009_later(void) 299static int __init hp_wmi_bios_2008_later(void)
299{ 300{
300 int state = 0; 301 int state = 0;
301 int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, 0, &state, 302 int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, 0, &state,
302 sizeof(state), sizeof(state)); 303 sizeof(state), sizeof(state));
303 if (ret) 304 if (!ret)
304 return ret; 305 return 1;
305 306
306 return (state & 0x10) ? 1 : 0; 307 return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
307} 308}
308 309
309static int hp_wmi_enable_hotkeys(void) 310static int __init hp_wmi_bios_2009_later(void)
310{ 311{
311 int ret; 312 int state = 0;
312 int query = 0x6e; 313 int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, 0, &state,
314 sizeof(state), sizeof(state));
315 if (!ret)
316 return 1;
313 317
314 ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query), 318 return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
315 0); 319}
316 320
321static int __init hp_wmi_enable_hotkeys(void)
322{
323 int value = 0x6e;
324 int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &value,
325 sizeof(value), 0);
317 if (ret) 326 if (ret)
318 return -EINVAL; 327 return -EINVAL;
319 return 0; 328 return 0;
@@ -663,7 +672,7 @@ static int __init hp_wmi_input_setup(void)
663 hp_wmi_tablet_state()); 672 hp_wmi_tablet_state());
664 input_sync(hp_wmi_input_dev); 673 input_sync(hp_wmi_input_dev);
665 674
666 if (hp_wmi_bios_2009_later() == 4) 675 if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later())
667 hp_wmi_enable_hotkeys(); 676 hp_wmi_enable_hotkeys();
668 677
669 status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL); 678 status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL);
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 0944e834af8d..9f713b832ba3 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -132,7 +132,7 @@ static int is_valid_adc(uint16_t adc_val, uint16_t min, uint16_t max)
132 * to achieve very close approximate temp value with less than 132 * to achieve very close approximate temp value with less than
133 * 0.5C error 133 * 0.5C error
134 */ 134 */
135static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp) 135static int adc_to_temp(int direct, uint16_t adc_val, int *tp)
136{ 136{
137 int temp; 137 int temp;
138 138
@@ -174,14 +174,13 @@ static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp)
174 * 174 *
175 * Can sleep 175 * Can sleep
176 */ 176 */
177static int mid_read_temp(struct thermal_zone_device *tzd, unsigned long *temp) 177static int mid_read_temp(struct thermal_zone_device *tzd, int *temp)
178{ 178{
179 struct thermal_device_info *td_info = tzd->devdata; 179 struct thermal_device_info *td_info = tzd->devdata;
180 uint16_t adc_val, addr; 180 uint16_t adc_val, addr;
181 uint8_t data = 0; 181 uint8_t data = 0;
182 int ret; 182 int ret;
183 unsigned long curr_temp; 183 int curr_temp;
184
185 184
186 addr = td_info->chnl_addr; 185 addr = td_info->chnl_addr;
187 186
@@ -453,7 +452,7 @@ static SIMPLE_DEV_PM_OPS(mid_thermal_pm,
453 * 452 *
454 * Can sleep 453 * Can sleep
455 */ 454 */
456static int read_curr_temp(struct thermal_zone_device *tzd, unsigned long *temp) 455static int read_curr_temp(struct thermal_zone_device *tzd, int *temp)
457{ 456{
458 WARN_ON(tzd == NULL); 457 WARN_ON(tzd == NULL);
459 return mid_read_temp(tzd, temp); 458 return mid_read_temp(tzd, temp);
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 6740c513919c..f2372f400ddb 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -938,7 +938,7 @@ static int toshiba_usb_sleep_music_get(struct toshiba_acpi_dev *dev, u32 *state)
938 else if (result == TOS_NOT_SUPPORTED) 938 else if (result == TOS_NOT_SUPPORTED)
939 return -ENODEV; 939 return -ENODEV;
940 940
941 return result = TOS_SUCCESS ? 0 : -EIO; 941 return result == TOS_SUCCESS ? 0 : -EIO;
942} 942}
943 943
944static int toshiba_usb_sleep_music_set(struct toshiba_acpi_dev *dev, u32 state) 944static int toshiba_usb_sleep_music_set(struct toshiba_acpi_dev *dev, u32 state)
@@ -2398,11 +2398,9 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
2398 if (error) 2398 if (error)
2399 return error; 2399 return error;
2400 2400
2401 error = toshiba_hotkey_event_type_get(dev, &events_type); 2401 if (toshiba_hotkey_event_type_get(dev, &events_type))
2402 if (error) { 2402 pr_notice("Unable to query Hotkey Event Type\n");
2403 pr_err("Unable to query Hotkey Event Type\n"); 2403
2404 return error;
2405 }
2406 dev->hotkey_event_type = events_type; 2404 dev->hotkey_event_type = events_type;
2407 2405
2408 dev->hotkey_dev = input_allocate_device(); 2406 dev->hotkey_dev = input_allocate_device();
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index aac47573f9ed..eb391a281833 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -194,34 +194,6 @@ static bool wmi_parse_guid(const u8 *src, u8 *dest)
194 return true; 194 return true;
195} 195}
196 196
197/*
198 * Convert a raw GUID to the ACII string representation
199 */
200static int wmi_gtoa(const char *in, char *out)
201{
202 int i;
203
204 for (i = 3; i >= 0; i--)
205 out += sprintf(out, "%02X", in[i] & 0xFF);
206
207 out += sprintf(out, "-");
208 out += sprintf(out, "%02X", in[5] & 0xFF);
209 out += sprintf(out, "%02X", in[4] & 0xFF);
210 out += sprintf(out, "-");
211 out += sprintf(out, "%02X", in[7] & 0xFF);
212 out += sprintf(out, "%02X", in[6] & 0xFF);
213 out += sprintf(out, "-");
214 out += sprintf(out, "%02X", in[8] & 0xFF);
215 out += sprintf(out, "%02X", in[9] & 0xFF);
216 out += sprintf(out, "-");
217
218 for (i = 10; i <= 15; i++)
219 out += sprintf(out, "%02X", in[i] & 0xFF);
220
221 *out = '\0';
222 return 0;
223}
224
225static bool find_guid(const char *guid_string, struct wmi_block **out) 197static bool find_guid(const char *guid_string, struct wmi_block **out)
226{ 198{
227 char tmp[16], guid_input[16]; 199 char tmp[16], guid_input[16];
@@ -457,11 +429,7 @@ EXPORT_SYMBOL_GPL(wmi_set_block);
457 429
458static void wmi_dump_wdg(const struct guid_block *g) 430static void wmi_dump_wdg(const struct guid_block *g)
459{ 431{
460 char guid_string[37]; 432 pr_info("%pUL:\n", g->guid);
461
462 wmi_gtoa(g->guid, guid_string);
463
464 pr_info("%s:\n", guid_string);
465 pr_info("\tobject_id: %c%c\n", g->object_id[0], g->object_id[1]); 433 pr_info("\tobject_id: %c%c\n", g->object_id[0], g->object_id[1]);
466 pr_info("\tnotify_id: %02X\n", g->notify_id); 434 pr_info("\tnotify_id: %02X\n", g->notify_id);
467 pr_info("\treserved: %02X\n", g->reserved); 435 pr_info("\treserved: %02X\n", g->reserved);
@@ -661,7 +629,6 @@ EXPORT_SYMBOL_GPL(wmi_has_guid);
661static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 629static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
662 char *buf) 630 char *buf)
663{ 631{
664 char guid_string[37];
665 struct wmi_block *wblock; 632 struct wmi_block *wblock;
666 633
667 wblock = dev_get_drvdata(dev); 634 wblock = dev_get_drvdata(dev);
@@ -670,9 +637,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
670 return strlen(buf); 637 return strlen(buf);
671 } 638 }
672 639
673 wmi_gtoa(wblock->gblock.guid, guid_string); 640 return sprintf(buf, "wmi:%pUL\n", wblock->gblock.guid);
674
675 return sprintf(buf, "wmi:%s\n", guid_string);
676} 641}
677static DEVICE_ATTR_RO(modalias); 642static DEVICE_ATTR_RO(modalias);
678 643
@@ -695,7 +660,7 @@ static int wmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
695 if (!wblock) 660 if (!wblock)
696 return -ENOMEM; 661 return -ENOMEM;
697 662
698 wmi_gtoa(wblock->gblock.guid, guid_string); 663 sprintf(guid_string, "%pUL", wblock->gblock.guid);
699 664
700 strcpy(&env->buf[env->buflen - 1], "wmi:"); 665 strcpy(&env->buf[env->buflen - 1], "wmi:");
701 memcpy(&env->buf[env->buflen - 1 + 4], guid_string, 36); 666 memcpy(&env->buf[env->buflen - 1 + 4], guid_string, 36);
@@ -721,12 +686,9 @@ static struct class wmi_class = {
721static int wmi_create_device(const struct guid_block *gblock, 686static int wmi_create_device(const struct guid_block *gblock,
722 struct wmi_block *wblock, acpi_handle handle) 687 struct wmi_block *wblock, acpi_handle handle)
723{ 688{
724 char guid_string[37];
725
726 wblock->dev.class = &wmi_class; 689 wblock->dev.class = &wmi_class;
727 690
728 wmi_gtoa(gblock->guid, guid_string); 691 dev_set_name(&wblock->dev, "%pUL", gblock->guid);
729 dev_set_name(&wblock->dev, "%s", guid_string);
730 692
731 dev_set_drvdata(&wblock->dev, wblock); 693 dev_set_drvdata(&wblock->dev, wblock);
732 694
@@ -877,7 +839,6 @@ static void acpi_wmi_notify(struct acpi_device *device, u32 event)
877 struct guid_block *block; 839 struct guid_block *block;
878 struct wmi_block *wblock; 840 struct wmi_block *wblock;
879 struct list_head *p; 841 struct list_head *p;
880 char guid_string[37];
881 842
882 list_for_each(p, &wmi_block_list) { 843 list_for_each(p, &wmi_block_list) {
883 wblock = list_entry(p, struct wmi_block, list); 844 wblock = list_entry(p, struct wmi_block, list);
@@ -888,8 +849,8 @@ static void acpi_wmi_notify(struct acpi_device *device, u32 event)
888 if (wblock->handler) 849 if (wblock->handler)
889 wblock->handler(event, wblock->handler_data); 850 wblock->handler(event, wblock->handler_data);
890 if (debug_event) { 851 if (debug_event) {
891 wmi_gtoa(wblock->gblock.guid, guid_string); 852 pr_info("DEBUG Event GUID: %pUL\n",
892 pr_info("DEBUG Event GUID: %s\n", guid_string); 853 wblock->gblock.guid);
893 } 854 }
894 855
895 acpi_bus_generate_netlink_event( 856 acpi_bus_generate_netlink_event(
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 1c202ccbd2a6..907293e6f2a4 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -619,7 +619,7 @@ static int cm_get_battery_temperature(struct charger_manager *cm,
619 619
620#ifdef CONFIG_THERMAL 620#ifdef CONFIG_THERMAL
621 if (cm->tzd_batt) { 621 if (cm->tzd_batt) {
622 ret = thermal_zone_get_temp(cm->tzd_batt, (unsigned long *)temp); 622 ret = thermal_zone_get_temp(cm->tzd_batt, temp);
623 if (!ret) 623 if (!ret)
624 /* Calibrate temperature unit */ 624 /* Calibrate temperature unit */
625 *temp /= 100; 625 *temp /= 100;
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 869284c2e1e8..456987c88baa 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -557,7 +557,7 @@ EXPORT_SYMBOL_GPL(power_supply_unreg_notifier);
557 557
558#ifdef CONFIG_THERMAL 558#ifdef CONFIG_THERMAL
559static int power_supply_read_temp(struct thermal_zone_device *tzd, 559static int power_supply_read_temp(struct thermal_zone_device *tzd,
560 unsigned long *temp) 560 int *temp)
561{ 561{
562 struct power_supply *psy; 562 struct power_supply *psy;
563 union power_supply_propval val; 563 union power_supply_propval val;
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index f4f2c1f76c32..74f2d3ff1d7c 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -91,7 +91,7 @@
91#define TWL4030_MSTATEC_COMPLETE1 0x0b 91#define TWL4030_MSTATEC_COMPLETE1 0x0b
92#define TWL4030_MSTATEC_COMPLETE4 0x0e 92#define TWL4030_MSTATEC_COMPLETE4 0x0e
93 93
94#if IS_ENABLED(CONFIG_TWL4030_MADC) 94#if IS_REACHABLE(CONFIG_TWL4030_MADC)
95/* 95/*
96 * If AC (Accessory Charger) voltage exceeds 4.5V (MADC 11) 96 * If AC (Accessory Charger) voltage exceeds 4.5V (MADC 11)
97 * then AC is available. 97 * then AC is available.
@@ -1057,13 +1057,9 @@ static int twl4030_bci_probe(struct platform_device *pdev)
1057 1057
1058 phynode = of_find_compatible_node(bci->dev->of_node->parent, 1058 phynode = of_find_compatible_node(bci->dev->of_node->parent,
1059 NULL, "ti,twl4030-usb"); 1059 NULL, "ti,twl4030-usb");
1060 if (phynode) { 1060 if (phynode)
1061 bci->transceiver = devm_usb_get_phy_by_node( 1061 bci->transceiver = devm_usb_get_phy_by_node(
1062 bci->dev, phynode, &bci->usb_nb); 1062 bci->dev, phynode, &bci->usb_nb);
1063 if (IS_ERR(bci->transceiver) &&
1064 PTR_ERR(bci->transceiver) == -EPROBE_DEFER)
1065 return -EPROBE_DEFER;
1066 }
1067 } 1063 }
1068 1064
1069 /* Enable interrupts now. */ 1065 /* Enable interrupts now. */
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 738adfa5332b..52ea605f8130 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -318,6 +318,7 @@ static const struct of_device_id of_anatop_regulator_match_tbl[] = {
318 { .compatible = "fsl,anatop-regulator", }, 318 { .compatible = "fsl,anatop-regulator", },
319 { /* end */ } 319 { /* end */ }
320}; 320};
321MODULE_DEVICE_TABLE(of, of_anatop_regulator_match_tbl);
321 322
322static struct platform_driver anatop_regulator_driver = { 323static struct platform_driver anatop_regulator_driver = {
323 .driver = { 324 .driver = {
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7a85ac9e32c5..7849187d91ae 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1394,15 +1394,15 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
1394 return 0; 1394 return 0;
1395 1395
1396 r = regulator_dev_lookup(dev, rdev->supply_name, &ret); 1396 r = regulator_dev_lookup(dev, rdev->supply_name, &ret);
1397 if (ret == -ENODEV) {
1398 /*
1399 * No supply was specified for this regulator and
1400 * there will never be one.
1401 */
1402 return 0;
1403 }
1404
1405 if (!r) { 1397 if (!r) {
1398 if (ret == -ENODEV) {
1399 /*
1400 * No supply was specified for this regulator and
1401 * there will never be one.
1402 */
1403 return 0;
1404 }
1405
1406 if (have_full_constraints()) { 1406 if (have_full_constraints()) {
1407 r = dummy_regulator_rdev; 1407 r = dummy_regulator_rdev;
1408 } else { 1408 } else {
@@ -1422,11 +1422,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
1422 return ret; 1422 return ret;
1423 1423
1424 /* Cascade always-on state to supply */ 1424 /* Cascade always-on state to supply */
1425 if (_regulator_is_enabled(rdev)) { 1425 if (_regulator_is_enabled(rdev) && rdev->supply) {
1426 ret = regulator_enable(rdev->supply); 1426 ret = regulator_enable(rdev->supply);
1427 if (ret < 0) { 1427 if (ret < 0) {
1428 if (rdev->supply) 1428 _regulator_put(rdev->supply);
1429 _regulator_put(rdev->supply);
1430 return ret; 1429 return ret;
1431 } 1430 }
1432 } 1431 }
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 464018de7e97..7bba8b747f30 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -394,6 +394,7 @@ static const struct of_device_id regulator_gpio_of_match[] = {
394 { .compatible = "regulator-gpio", }, 394 { .compatible = "regulator-gpio", },
395 {}, 395 {},
396}; 396};
397MODULE_DEVICE_TABLE(of, regulator_gpio_of_match);
397#endif 398#endif
398 399
399static struct platform_driver gpio_regulator_driver = { 400static struct platform_driver gpio_regulator_driver = {
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index 4fa7bcaf454e..f9d74d63be7c 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -45,6 +45,10 @@ struct pbias_regulator_data {
45 int voltage; 45 int voltage;
46}; 46};
47 47
48struct pbias_of_data {
49 unsigned int offset;
50};
51
48static const unsigned int pbias_volt_table[] = { 52static const unsigned int pbias_volt_table[] = {
49 1800000, 53 1800000,
50 3000000 54 3000000
@@ -102,8 +106,35 @@ static struct of_regulator_match pbias_matches[] = {
102}; 106};
103#define PBIAS_NUM_REGS ARRAY_SIZE(pbias_matches) 107#define PBIAS_NUM_REGS ARRAY_SIZE(pbias_matches)
104 108
109/* Offset from SCM general area (and syscon) base */
110
111static const struct pbias_of_data pbias_of_data_omap2 = {
112 .offset = 0x230,
113};
114
115static const struct pbias_of_data pbias_of_data_omap3 = {
116 .offset = 0x2b0,
117};
118
119static const struct pbias_of_data pbias_of_data_omap4 = {
120 .offset = 0x60,
121};
122
123static const struct pbias_of_data pbias_of_data_omap5 = {
124 .offset = 0x60,
125};
126
127static const struct pbias_of_data pbias_of_data_dra7 = {
128 .offset = 0xe00,
129};
130
105static const struct of_device_id pbias_of_match[] = { 131static const struct of_device_id pbias_of_match[] = {
106 { .compatible = "ti,pbias-omap", }, 132 { .compatible = "ti,pbias-omap", },
133 { .compatible = "ti,pbias-omap2", .data = &pbias_of_data_omap2, },
134 { .compatible = "ti,pbias-omap3", .data = &pbias_of_data_omap3, },
135 { .compatible = "ti,pbias-omap4", .data = &pbias_of_data_omap4, },
136 { .compatible = "ti,pbias-omap5", .data = &pbias_of_data_omap5, },
137 { .compatible = "ti,pbias-dra7", .data = &pbias_of_data_dra7, },
107 {}, 138 {},
108}; 139};
109MODULE_DEVICE_TABLE(of, pbias_of_match); 140MODULE_DEVICE_TABLE(of, pbias_of_match);
@@ -118,6 +149,9 @@ static int pbias_regulator_probe(struct platform_device *pdev)
118 const struct pbias_reg_info *info; 149 const struct pbias_reg_info *info;
119 int ret = 0; 150 int ret = 0;
120 int count, idx, data_idx = 0; 151 int count, idx, data_idx = 0;
152 const struct of_device_id *match;
153 const struct pbias_of_data *data;
154 unsigned int offset;
121 155
122 count = of_regulator_match(&pdev->dev, np, pbias_matches, 156 count = of_regulator_match(&pdev->dev, np, pbias_matches,
123 PBIAS_NUM_REGS); 157 PBIAS_NUM_REGS);
@@ -133,6 +167,20 @@ static int pbias_regulator_probe(struct platform_device *pdev)
133 if (IS_ERR(syscon)) 167 if (IS_ERR(syscon))
134 return PTR_ERR(syscon); 168 return PTR_ERR(syscon);
135 169
170 match = of_match_device(of_match_ptr(pbias_of_match), &pdev->dev);
171 if (match && match->data) {
172 data = match->data;
173 offset = data->offset;
174 } else {
175 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
176 if (!res)
177 return -EINVAL;
178
179 offset = res->start;
180 dev_WARN(&pdev->dev,
181 "using legacy dt data for pbias offset\n");
182 }
183
136 cfg.regmap = syscon; 184 cfg.regmap = syscon;
137 cfg.dev = &pdev->dev; 185 cfg.dev = &pdev->dev;
138 186
@@ -145,10 +193,6 @@ static int pbias_regulator_probe(struct platform_device *pdev)
145 if (!info) 193 if (!info)
146 return -ENODEV; 194 return -ENODEV;
147 195
148 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
149 if (!res)
150 return -EINVAL;
151
152 drvdata[data_idx].syscon = syscon; 196 drvdata[data_idx].syscon = syscon;
153 drvdata[data_idx].info = info; 197 drvdata[data_idx].info = info;
154 drvdata[data_idx].desc.name = info->name; 198 drvdata[data_idx].desc.name = info->name;
@@ -158,9 +202,9 @@ static int pbias_regulator_probe(struct platform_device *pdev)
158 drvdata[data_idx].desc.volt_table = pbias_volt_table; 202 drvdata[data_idx].desc.volt_table = pbias_volt_table;
159 drvdata[data_idx].desc.n_voltages = 2; 203 drvdata[data_idx].desc.n_voltages = 2;
160 drvdata[data_idx].desc.enable_time = info->enable_time; 204 drvdata[data_idx].desc.enable_time = info->enable_time;
161 drvdata[data_idx].desc.vsel_reg = res->start; 205 drvdata[data_idx].desc.vsel_reg = offset;
162 drvdata[data_idx].desc.vsel_mask = info->vmode; 206 drvdata[data_idx].desc.vsel_mask = info->vmode;
163 drvdata[data_idx].desc.enable_reg = res->start; 207 drvdata[data_idx].desc.enable_reg = offset;
164 drvdata[data_idx].desc.enable_mask = info->enable_mask; 208 drvdata[data_idx].desc.enable_mask = info->enable_mask;
165 drvdata[data_idx].desc.enable_val = info->enable; 209 drvdata[data_idx].desc.enable_val = info->enable;
166 drvdata[data_idx].desc.disable_val = info->disable_val; 210 drvdata[data_idx].desc.disable_val = info->disable_val;
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index 7f97223f95c5..a02c1b961039 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -73,7 +73,7 @@ static const struct regulator_linear_range dcdc4_ranges[] = {
73}; 73};
74 74
75static struct tps_info tps65218_pmic_regs[] = { 75static struct tps_info tps65218_pmic_regs[] = {
76 TPS65218_INFO(DCDC1, "DCDC1", 850000, 167500), 76 TPS65218_INFO(DCDC1, "DCDC1", 850000, 1675000),
77 TPS65218_INFO(DCDC2, "DCDC2", 850000, 1675000), 77 TPS65218_INFO(DCDC2, "DCDC2", 850000, 1675000),
78 TPS65218_INFO(DCDC3, "DCDC3", 900000, 3400000), 78 TPS65218_INFO(DCDC3, "DCDC3", 900000, 3400000),
79 TPS65218_INFO(DCDC4, "DCDC4", 1175000, 3400000), 79 TPS65218_INFO(DCDC4, "DCDC4", 1175000, 3400000),
diff --git a/drivers/regulator/vexpress.c b/drivers/regulator/vexpress.c
index bed9d3ee4198..c810cbbd463f 100644
--- a/drivers/regulator/vexpress.c
+++ b/drivers/regulator/vexpress.c
@@ -103,6 +103,7 @@ static const struct of_device_id vexpress_regulator_of_match[] = {
103 { .compatible = "arm,vexpress-volt", }, 103 { .compatible = "arm,vexpress-volt", },
104 { } 104 { }
105}; 105};
106MODULE_DEVICE_TABLE(of, vexpress_regulator_of_match);
106 107
107static struct platform_driver vexpress_regulator_driver = { 108static struct platform_driver vexpress_regulator_driver = {
108 .probe = vexpress_regulator_probe, 109 .probe = vexpress_regulator_probe,
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index f8d8fdb26b72..e9fae30fafda 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -400,12 +400,16 @@ static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
400static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, 400static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
401 struct ccw1 *ccw, int index) 401 struct ccw1 *ccw, int index)
402{ 402{
403 int ret;
404
403 vcdev->config_block->index = index; 405 vcdev->config_block->index = index;
404 ccw->cmd_code = CCW_CMD_READ_VQ_CONF; 406 ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
405 ccw->flags = 0; 407 ccw->flags = 0;
406 ccw->count = sizeof(struct vq_config_block); 408 ccw->count = sizeof(struct vq_config_block);
407 ccw->cda = (__u32)(unsigned long)(vcdev->config_block); 409 ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
408 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF); 410 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
411 if (ret)
412 return ret;
409 return vcdev->config_block->num; 413 return vcdev->config_block->num;
410} 414}
411 415
@@ -503,6 +507,10 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
503 goto out_err; 507 goto out_err;
504 } 508 }
505 info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i); 509 info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
510 if (info->num < 0) {
511 err = info->num;
512 goto out_err;
513 }
506 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); 514 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
507 info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); 515 info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
508 if (info->queue == NULL) { 516 if (info->queue == NULL) {
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 471d08791766..1a8c9b53fafa 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -172,6 +172,7 @@ scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
172scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o 172scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
173scsi_mod-y += scsi_trace.o scsi_logging.o 173scsi_mod-y += scsi_trace.o scsi_logging.o
174scsi_mod-$(CONFIG_PM) += scsi_pm.o 174scsi_mod-$(CONFIG_PM) += scsi_pm.o
175scsi_mod-$(CONFIG_SCSI_DH) += scsi_dh.o
175 176
176hv_storvsc-y := storvsc_drv.o 177hv_storvsc-y := storvsc_drv.o
177 178
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index edb43fda9f36..c831e30411fa 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -983,7 +983,7 @@ static int asd_process_ctrl_a_user(struct asd_ha_struct *asd_ha,
983{ 983{
984 int err, i; 984 int err, i;
985 u32 offs, size; 985 u32 offs, size;
986 struct asd_ll_el *el; 986 struct asd_ll_el *el = NULL;
987 struct asd_ctrla_phy_settings *ps; 987 struct asd_ctrla_phy_settings *ps;
988 struct asd_ctrla_phy_settings dflt_ps; 988 struct asd_ctrla_phy_settings dflt_ps;
989 989
@@ -1004,6 +1004,7 @@ static int asd_process_ctrl_a_user(struct asd_ha_struct *asd_ha,
1004 1004
1005 size = sizeof(struct asd_ctrla_phy_settings); 1005 size = sizeof(struct asd_ctrla_phy_settings);
1006 ps = &dflt_ps; 1006 ps = &dflt_ps;
1007 goto out_process;
1007 } 1008 }
1008 1009
1009 if (size == 0) 1010 if (size == 0)
@@ -1028,7 +1029,7 @@ static int asd_process_ctrl_a_user(struct asd_ha_struct *asd_ha,
1028 ASD_DPRINTK("couldn't find ctrla phy settings struct\n"); 1029 ASD_DPRINTK("couldn't find ctrla phy settings struct\n");
1029 goto out2; 1030 goto out2;
1030 } 1031 }
1031 1032out_process:
1032 err = asd_process_ctrla_phy_settings(asd_ha, ps); 1033 err = asd_process_ctrla_phy_settings(asd_ha, ps);
1033 if (err) { 1034 if (err) {
1034 ASD_DPRINTK("couldn't process ctrla phy settings\n"); 1035 ASD_DPRINTK("couldn't process ctrla phy settings\n");
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 315d6d6dcfc8..98f7e8cca52d 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -3665,19 +3665,19 @@ bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3665 if (sfp->state_query_cbfn) 3665 if (sfp->state_query_cbfn)
3666 sfp->state_query_cbfn(sfp->state_query_cbarg, 3666 sfp->state_query_cbfn(sfp->state_query_cbarg,
3667 sfp->status); 3667 sfp->status);
3668 sfp->media = NULL; 3668 sfp->media = NULL;
3669 } 3669 }
3670 3670
3671 if (sfp->portspeed) { 3671 if (sfp->portspeed) {
3672 sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed); 3672 sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3673 if (sfp->state_query_cbfn) 3673 if (sfp->state_query_cbfn)
3674 sfp->state_query_cbfn(sfp->state_query_cbarg, 3674 sfp->state_query_cbfn(sfp->state_query_cbarg,
3675 sfp->status); 3675 sfp->status);
3676 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN; 3676 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3677 } 3677 }
3678 3678
3679 sfp->state_query_lock = 0; 3679 sfp->state_query_lock = 0;
3680 sfp->state_query_cbfn = NULL; 3680 sfp->state_query_cbfn = NULL;
3681} 3681}
3682 3682
3683/* 3683/*
@@ -3878,7 +3878,7 @@ bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3878 bfa_trc(sfp, sfp->data_valid); 3878 bfa_trc(sfp, sfp->data_valid);
3879 if (sfp->data_valid) { 3879 if (sfp->data_valid) {
3880 u32 size = sizeof(struct sfp_mem_s); 3880 u32 size = sizeof(struct sfp_mem_s);
3881 u8 *des = (u8 *) &(sfp->sfpmem); 3881 u8 *des = (u8 *)(sfp->sfpmem);
3882 memcpy(des, sfp->dbuf_kva, size); 3882 memcpy(des, sfp->dbuf_kva, size);
3883 } 3883 }
3884 /* 3884 /*
diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
index 69abd0ad48e2..e5647d59224f 100644
--- a/drivers/scsi/device_handler/Kconfig
+++ b/drivers/scsi/device_handler/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5menuconfig SCSI_DH 5menuconfig SCSI_DH
6 tristate "SCSI Device Handlers" 6 bool "SCSI Device Handlers"
7 depends on SCSI 7 depends on SCSI
8 default n 8 default n
9 help 9 help
diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile
index e1d2ea083e15..09866c50fbb4 100644
--- a/drivers/scsi/device_handler/Makefile
+++ b/drivers/scsi/device_handler/Makefile
@@ -1,7 +1,6 @@
1# 1#
2# SCSI Device Handler 2# SCSI Device Handler
3# 3#
4obj-$(CONFIG_SCSI_DH) += scsi_dh.o
5obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o 4obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o
6obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o 5obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o
7obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o 6obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
deleted file mode 100644
index 1efebc9eedfb..000000000000
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ /dev/null
@@ -1,621 +0,0 @@
1/*
2 * SCSI device handler infrastruture.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2007
19 * Authors:
20 * Chandra Seetharaman <sekharan@us.ibm.com>
21 * Mike Anderson <andmike@linux.vnet.ibm.com>
22 */
23
24#include <linux/slab.h>
25#include <linux/module.h>
26#include <scsi/scsi_dh.h>
27#include "../scsi_priv.h"
28
29static DEFINE_SPINLOCK(list_lock);
30static LIST_HEAD(scsi_dh_list);
31
32static struct scsi_device_handler *get_device_handler(const char *name)
33{
34 struct scsi_device_handler *tmp, *found = NULL;
35
36 spin_lock(&list_lock);
37 list_for_each_entry(tmp, &scsi_dh_list, list) {
38 if (!strncmp(tmp->name, name, strlen(tmp->name))) {
39 found = tmp;
40 break;
41 }
42 }
43 spin_unlock(&list_lock);
44 return found;
45}
46
47/*
48 * device_handler_match_function - Match a device handler to a device
49 * @sdev - SCSI device to be tested
50 *
51 * Tests @sdev against the match function of all registered device_handler.
52 * Returns the found device handler or NULL if not found.
53 */
54static struct scsi_device_handler *
55device_handler_match_function(struct scsi_device *sdev)
56{
57 struct scsi_device_handler *tmp_dh, *found_dh = NULL;
58
59 spin_lock(&list_lock);
60 list_for_each_entry(tmp_dh, &scsi_dh_list, list) {
61 if (tmp_dh->match && tmp_dh->match(sdev)) {
62 found_dh = tmp_dh;
63 break;
64 }
65 }
66 spin_unlock(&list_lock);
67 return found_dh;
68}
69
70/*
71 * device_handler_match - Attach a device handler to a device
72 * @scsi_dh - The device handler to match against or NULL
73 * @sdev - SCSI device to be tested against @scsi_dh
74 *
75 * Tests @sdev against the device handler @scsi_dh or against
76 * all registered device_handler if @scsi_dh == NULL.
77 * Returns the found device handler or NULL if not found.
78 */
79static struct scsi_device_handler *
80device_handler_match(struct scsi_device_handler *scsi_dh,
81 struct scsi_device *sdev)
82{
83 struct scsi_device_handler *found_dh;
84
85 found_dh = device_handler_match_function(sdev);
86
87 if (scsi_dh && found_dh != scsi_dh)
88 found_dh = NULL;
89
90 return found_dh;
91}
92
93/*
94 * scsi_dh_handler_attach - Attach a device handler to a device
95 * @sdev - SCSI device the device handler should attach to
96 * @scsi_dh - The device handler to attach
97 */
98static int scsi_dh_handler_attach(struct scsi_device *sdev,
99 struct scsi_device_handler *scsi_dh)
100{
101 struct scsi_dh_data *d;
102
103 if (sdev->scsi_dh_data) {
104 if (sdev->scsi_dh_data->scsi_dh != scsi_dh)
105 return -EBUSY;
106
107 kref_get(&sdev->scsi_dh_data->kref);
108 return 0;
109 }
110
111 if (!try_module_get(scsi_dh->module))
112 return -EINVAL;
113
114 d = scsi_dh->attach(sdev);
115 if (IS_ERR(d)) {
116 sdev_printk(KERN_ERR, sdev, "%s: Attach failed (%ld)\n",
117 scsi_dh->name, PTR_ERR(d));
118 module_put(scsi_dh->module);
119 return PTR_ERR(d);
120 }
121
122 d->scsi_dh = scsi_dh;
123 kref_init(&d->kref);
124 d->sdev = sdev;
125
126 spin_lock_irq(sdev->request_queue->queue_lock);
127 sdev->scsi_dh_data = d;
128 spin_unlock_irq(sdev->request_queue->queue_lock);
129 return 0;
130}
131
132static void __detach_handler (struct kref *kref)
133{
134 struct scsi_dh_data *scsi_dh_data =
135 container_of(kref, struct scsi_dh_data, kref);
136 struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh;
137 struct scsi_device *sdev = scsi_dh_data->sdev;
138
139 scsi_dh->detach(sdev);
140
141 spin_lock_irq(sdev->request_queue->queue_lock);
142 sdev->scsi_dh_data = NULL;
143 spin_unlock_irq(sdev->request_queue->queue_lock);
144
145 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name);
146 module_put(scsi_dh->module);
147}
148
149/*
150 * scsi_dh_handler_detach - Detach a device handler from a device
151 * @sdev - SCSI device the device handler should be detached from
152 * @scsi_dh - Device handler to be detached
153 *
154 * Detach from a device handler. If a device handler is specified,
155 * only detach if the currently attached handler matches @scsi_dh.
156 */
157static void scsi_dh_handler_detach(struct scsi_device *sdev,
158 struct scsi_device_handler *scsi_dh)
159{
160 if (!sdev->scsi_dh_data)
161 return;
162
163 if (scsi_dh && scsi_dh != sdev->scsi_dh_data->scsi_dh)
164 return;
165
166 if (!scsi_dh)
167 scsi_dh = sdev->scsi_dh_data->scsi_dh;
168
169 if (scsi_dh)
170 kref_put(&sdev->scsi_dh_data->kref, __detach_handler);
171}
172
173/*
174 * Functions for sysfs attribute 'dh_state'
175 */
176static ssize_t
177store_dh_state(struct device *dev, struct device_attribute *attr,
178 const char *buf, size_t count)
179{
180 struct scsi_device *sdev = to_scsi_device(dev);
181 struct scsi_device_handler *scsi_dh;
182 int err = -EINVAL;
183
184 if (sdev->sdev_state == SDEV_CANCEL ||
185 sdev->sdev_state == SDEV_DEL)
186 return -ENODEV;
187
188 if (!sdev->scsi_dh_data) {
189 /*
190 * Attach to a device handler
191 */
192 if (!(scsi_dh = get_device_handler(buf)))
193 return err;
194 err = scsi_dh_handler_attach(sdev, scsi_dh);
195 } else {
196 scsi_dh = sdev->scsi_dh_data->scsi_dh;
197 if (!strncmp(buf, "detach", 6)) {
198 /*
199 * Detach from a device handler
200 */
201 scsi_dh_handler_detach(sdev, scsi_dh);
202 err = 0;
203 } else if (!strncmp(buf, "activate", 8)) {
204 /*
205 * Activate a device handler
206 */
207 if (scsi_dh->activate)
208 err = scsi_dh->activate(sdev, NULL, NULL);
209 else
210 err = 0;
211 }
212 }
213
214 return err<0?err:count;
215}
216
217static ssize_t
218show_dh_state(struct device *dev, struct device_attribute *attr, char *buf)
219{
220 struct scsi_device *sdev = to_scsi_device(dev);
221
222 if (!sdev->scsi_dh_data)
223 return snprintf(buf, 20, "detached\n");
224
225 return snprintf(buf, 20, "%s\n", sdev->scsi_dh_data->scsi_dh->name);
226}
227
228static struct device_attribute scsi_dh_state_attr =
229 __ATTR(dh_state, S_IRUGO | S_IWUSR, show_dh_state,
230 store_dh_state);
231
232/*
233 * scsi_dh_sysfs_attr_add - Callback for scsi_init_dh
234 */
235static int scsi_dh_sysfs_attr_add(struct device *dev, void *data)
236{
237 struct scsi_device *sdev;
238 int err;
239
240 if (!scsi_is_sdev_device(dev))
241 return 0;
242
243 sdev = to_scsi_device(dev);
244
245 err = device_create_file(&sdev->sdev_gendev,
246 &scsi_dh_state_attr);
247
248 return 0;
249}
250
251/*
252 * scsi_dh_sysfs_attr_remove - Callback for scsi_exit_dh
253 */
254static int scsi_dh_sysfs_attr_remove(struct device *dev, void *data)
255{
256 struct scsi_device *sdev;
257
258 if (!scsi_is_sdev_device(dev))
259 return 0;
260
261 sdev = to_scsi_device(dev);
262
263 device_remove_file(&sdev->sdev_gendev,
264 &scsi_dh_state_attr);
265
266 return 0;
267}
268
269/*
270 * scsi_dh_notifier - notifier chain callback
271 */
272static int scsi_dh_notifier(struct notifier_block *nb,
273 unsigned long action, void *data)
274{
275 struct device *dev = data;
276 struct scsi_device *sdev;
277 int err = 0;
278 struct scsi_device_handler *devinfo = NULL;
279
280 if (!scsi_is_sdev_device(dev))
281 return 0;
282
283 sdev = to_scsi_device(dev);
284
285 if (action == BUS_NOTIFY_ADD_DEVICE) {
286 err = device_create_file(dev, &scsi_dh_state_attr);
287 /* don't care about err */
288 devinfo = device_handler_match(NULL, sdev);
289 if (devinfo)
290 err = scsi_dh_handler_attach(sdev, devinfo);
291 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
292 device_remove_file(dev, &scsi_dh_state_attr);
293 scsi_dh_handler_detach(sdev, NULL);
294 }
295 return err;
296}
297
298/*
299 * scsi_dh_notifier_add - Callback for scsi_register_device_handler
300 */
301static int scsi_dh_notifier_add(struct device *dev, void *data)
302{
303 struct scsi_device_handler *scsi_dh = data;
304 struct scsi_device *sdev;
305
306 if (!scsi_is_sdev_device(dev))
307 return 0;
308
309 if (!get_device(dev))
310 return 0;
311
312 sdev = to_scsi_device(dev);
313
314 if (device_handler_match(scsi_dh, sdev))
315 scsi_dh_handler_attach(sdev, scsi_dh);
316
317 put_device(dev);
318
319 return 0;
320}
321
322/*
323 * scsi_dh_notifier_remove - Callback for scsi_unregister_device_handler
324 */
325static int scsi_dh_notifier_remove(struct device *dev, void *data)
326{
327 struct scsi_device_handler *scsi_dh = data;
328 struct scsi_device *sdev;
329
330 if (!scsi_is_sdev_device(dev))
331 return 0;
332
333 if (!get_device(dev))
334 return 0;
335
336 sdev = to_scsi_device(dev);
337
338 scsi_dh_handler_detach(sdev, scsi_dh);
339
340 put_device(dev);
341
342 return 0;
343}
344
345/*
346 * scsi_register_device_handler - register a device handler personality
347 * module.
348 * @scsi_dh - device handler to be registered.
349 *
350 * Returns 0 on success, -EBUSY if handler already registered.
351 */
352int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
353{
354
355 if (get_device_handler(scsi_dh->name))
356 return -EBUSY;
357
358 if (!scsi_dh->attach || !scsi_dh->detach)
359 return -EINVAL;
360
361 spin_lock(&list_lock);
362 list_add(&scsi_dh->list, &scsi_dh_list);
363 spin_unlock(&list_lock);
364
365 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
366 printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
367
368 return SCSI_DH_OK;
369}
370EXPORT_SYMBOL_GPL(scsi_register_device_handler);
371
372/*
373 * scsi_unregister_device_handler - register a device handler personality
374 * module.
375 * @scsi_dh - device handler to be unregistered.
376 *
377 * Returns 0 on success, -ENODEV if handler not registered.
378 */
379int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
380{
381
382 if (!get_device_handler(scsi_dh->name))
383 return -ENODEV;
384
385 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
386 scsi_dh_notifier_remove);
387
388 spin_lock(&list_lock);
389 list_del(&scsi_dh->list);
390 spin_unlock(&list_lock);
391 printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name);
392
393 return SCSI_DH_OK;
394}
395EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
396
397/*
398 * scsi_dh_activate - activate the path associated with the scsi_device
399 * corresponding to the given request queue.
400 * Returns immediately without waiting for activation to be completed.
401 * @q - Request queue that is associated with the scsi_device to be
402 * activated.
403 * @fn - Function to be called upon completion of the activation.
404 * Function fn is called with data (below) and the error code.
405 * Function fn may be called from the same calling context. So,
406 * do not hold the lock in the caller which may be needed in fn.
407 * @data - data passed to the function fn upon completion.
408 *
409 */
410int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
411{
412 int err = 0;
413 unsigned long flags;
414 struct scsi_device *sdev;
415 struct scsi_device_handler *scsi_dh = NULL;
416 struct device *dev = NULL;
417
418 spin_lock_irqsave(q->queue_lock, flags);
419 sdev = q->queuedata;
420 if (!sdev) {
421 spin_unlock_irqrestore(q->queue_lock, flags);
422 err = SCSI_DH_NOSYS;
423 if (fn)
424 fn(data, err);
425 return err;
426 }
427
428 if (sdev->scsi_dh_data)
429 scsi_dh = sdev->scsi_dh_data->scsi_dh;
430 dev = get_device(&sdev->sdev_gendev);
431 if (!scsi_dh || !dev ||
432 sdev->sdev_state == SDEV_CANCEL ||
433 sdev->sdev_state == SDEV_DEL)
434 err = SCSI_DH_NOSYS;
435 if (sdev->sdev_state == SDEV_OFFLINE)
436 err = SCSI_DH_DEV_OFFLINED;
437 spin_unlock_irqrestore(q->queue_lock, flags);
438
439 if (err) {
440 if (fn)
441 fn(data, err);
442 goto out;
443 }
444
445 if (scsi_dh->activate)
446 err = scsi_dh->activate(sdev, fn, data);
447out:
448 put_device(dev);
449 return err;
450}
451EXPORT_SYMBOL_GPL(scsi_dh_activate);
452
453/*
454 * scsi_dh_set_params - set the parameters for the device as per the
455 * string specified in params.
456 * @q - Request queue that is associated with the scsi_device for
457 * which the parameters to be set.
458 * @params - parameters in the following format
459 * "no_of_params\0param1\0param2\0param3\0...\0"
460 * for example, string for 2 parameters with value 10 and 21
461 * is specified as "2\010\021\0".
462 */
463int scsi_dh_set_params(struct request_queue *q, const char *params)
464{
465 int err = -SCSI_DH_NOSYS;
466 unsigned long flags;
467 struct scsi_device *sdev;
468 struct scsi_device_handler *scsi_dh = NULL;
469
470 spin_lock_irqsave(q->queue_lock, flags);
471 sdev = q->queuedata;
472 if (sdev && sdev->scsi_dh_data)
473 scsi_dh = sdev->scsi_dh_data->scsi_dh;
474 if (scsi_dh && scsi_dh->set_params && get_device(&sdev->sdev_gendev))
475 err = 0;
476 spin_unlock_irqrestore(q->queue_lock, flags);
477
478 if (err)
479 return err;
480 err = scsi_dh->set_params(sdev, params);
481 put_device(&sdev->sdev_gendev);
482 return err;
483}
484EXPORT_SYMBOL_GPL(scsi_dh_set_params);
485
486/*
487 * scsi_dh_handler_exist - Return TRUE(1) if a device handler exists for
488 * the given name. FALSE(0) otherwise.
489 * @name - name of the device handler.
490 */
491int scsi_dh_handler_exist(const char *name)
492{
493 return (get_device_handler(name) != NULL);
494}
495EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
496
497/*
498 * scsi_dh_attach - Attach device handler
499 * @q - Request queue that is associated with the scsi_device
500 * the handler should be attached to
501 * @name - name of the handler to attach
502 */
503int scsi_dh_attach(struct request_queue *q, const char *name)
504{
505 unsigned long flags;
506 struct scsi_device *sdev;
507 struct scsi_device_handler *scsi_dh;
508 int err = 0;
509
510 scsi_dh = get_device_handler(name);
511 if (!scsi_dh)
512 return -EINVAL;
513
514 spin_lock_irqsave(q->queue_lock, flags);
515 sdev = q->queuedata;
516 if (!sdev || !get_device(&sdev->sdev_gendev))
517 err = -ENODEV;
518 spin_unlock_irqrestore(q->queue_lock, flags);
519
520 if (!err) {
521 err = scsi_dh_handler_attach(sdev, scsi_dh);
522 put_device(&sdev->sdev_gendev);
523 }
524 return err;
525}
526EXPORT_SYMBOL_GPL(scsi_dh_attach);
527
528/*
529 * scsi_dh_detach - Detach device handler
530 * @q - Request queue that is associated with the scsi_device
531 * the handler should be detached from
532 *
533 * This function will detach the device handler only
534 * if the sdev is not part of the internal list, ie
535 * if it has been attached manually.
536 */
537void scsi_dh_detach(struct request_queue *q)
538{
539 unsigned long flags;
540 struct scsi_device *sdev;
541 struct scsi_device_handler *scsi_dh = NULL;
542
543 spin_lock_irqsave(q->queue_lock, flags);
544 sdev = q->queuedata;
545 if (!sdev || !get_device(&sdev->sdev_gendev))
546 sdev = NULL;
547 spin_unlock_irqrestore(q->queue_lock, flags);
548
549 if (!sdev)
550 return;
551
552 if (sdev->scsi_dh_data) {
553 scsi_dh = sdev->scsi_dh_data->scsi_dh;
554 scsi_dh_handler_detach(sdev, scsi_dh);
555 }
556 put_device(&sdev->sdev_gendev);
557}
558EXPORT_SYMBOL_GPL(scsi_dh_detach);
559
560/*
561 * scsi_dh_attached_handler_name - Get attached device handler's name
562 * @q - Request queue that is associated with the scsi_device
563 * that may have a device handler attached
564 * @gfp - the GFP mask used in the kmalloc() call when allocating memory
565 *
566 * Returns name of attached handler, NULL if no handler is attached.
567 * Caller must take care to free the returned string.
568 */
569const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
570{
571 unsigned long flags;
572 struct scsi_device *sdev;
573 const char *handler_name = NULL;
574
575 spin_lock_irqsave(q->queue_lock, flags);
576 sdev = q->queuedata;
577 if (!sdev || !get_device(&sdev->sdev_gendev))
578 sdev = NULL;
579 spin_unlock_irqrestore(q->queue_lock, flags);
580
581 if (!sdev)
582 return NULL;
583
584 if (sdev->scsi_dh_data)
585 handler_name = kstrdup(sdev->scsi_dh_data->scsi_dh->name, gfp);
586
587 put_device(&sdev->sdev_gendev);
588 return handler_name;
589}
590EXPORT_SYMBOL_GPL(scsi_dh_attached_handler_name);
591
592static struct notifier_block scsi_dh_nb = {
593 .notifier_call = scsi_dh_notifier
594};
595
596static int __init scsi_dh_init(void)
597{
598 int r;
599
600 r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
601
602 if (!r)
603 bus_for_each_dev(&scsi_bus_type, NULL, NULL,
604 scsi_dh_sysfs_attr_add);
605
606 return r;
607}
608
609static void __exit scsi_dh_exit(void)
610{
611 bus_for_each_dev(&scsi_bus_type, NULL, NULL,
612 scsi_dh_sysfs_attr_remove);
613 bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
614}
615
616module_init(scsi_dh_init);
617module_exit(scsi_dh_exit);
618
619MODULE_DESCRIPTION("SCSI device handler");
620MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>");
621MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 854b568b9931..cc2773b5de68 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -62,7 +62,6 @@
62#define ALUA_OPTIMIZE_STPG 1 62#define ALUA_OPTIMIZE_STPG 1
63 63
64struct alua_dh_data { 64struct alua_dh_data {
65 struct scsi_dh_data dh_data;
66 int group_id; 65 int group_id;
67 int rel_port; 66 int rel_port;
68 int tpgs; 67 int tpgs;
@@ -86,11 +85,6 @@ struct alua_dh_data {
86static char print_alua_state(int); 85static char print_alua_state(int);
87static int alua_check_sense(struct scsi_device *, struct scsi_sense_hdr *); 86static int alua_check_sense(struct scsi_device *, struct scsi_sense_hdr *);
88 87
89static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev)
90{
91 return container_of(sdev->scsi_dh_data, struct alua_dh_data, dh_data);
92}
93
94static int realloc_buffer(struct alua_dh_data *h, unsigned len) 88static int realloc_buffer(struct alua_dh_data *h, unsigned len)
95{ 89{
96 if (h->buff && h->buff != h->inq) 90 if (h->buff && h->buff != h->inq)
@@ -708,7 +702,7 @@ out:
708 */ 702 */
709static int alua_set_params(struct scsi_device *sdev, const char *params) 703static int alua_set_params(struct scsi_device *sdev, const char *params)
710{ 704{
711 struct alua_dh_data *h = get_alua_data(sdev); 705 struct alua_dh_data *h = sdev->handler_data;
712 unsigned int optimize = 0, argc; 706 unsigned int optimize = 0, argc;
713 const char *p = params; 707 const char *p = params;
714 int result = SCSI_DH_OK; 708 int result = SCSI_DH_OK;
@@ -746,7 +740,7 @@ MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than
746static int alua_activate(struct scsi_device *sdev, 740static int alua_activate(struct scsi_device *sdev,
747 activate_complete fn, void *data) 741 activate_complete fn, void *data)
748{ 742{
749 struct alua_dh_data *h = get_alua_data(sdev); 743 struct alua_dh_data *h = sdev->handler_data;
750 int err = SCSI_DH_OK; 744 int err = SCSI_DH_OK;
751 int stpg = 0; 745 int stpg = 0;
752 746
@@ -804,7 +798,7 @@ out:
804 */ 798 */
805static int alua_prep_fn(struct scsi_device *sdev, struct request *req) 799static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
806{ 800{
807 struct alua_dh_data *h = get_alua_data(sdev); 801 struct alua_dh_data *h = sdev->handler_data;
808 int ret = BLKPREP_OK; 802 int ret = BLKPREP_OK;
809 803
810 if (h->state == TPGS_STATE_TRANSITIONING) 804 if (h->state == TPGS_STATE_TRANSITIONING)
@@ -819,23 +813,18 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
819 813
820} 814}
821 815
822static bool alua_match(struct scsi_device *sdev)
823{
824 return (scsi_device_tpgs(sdev) != 0);
825}
826
827/* 816/*
828 * alua_bus_attach - Attach device handler 817 * alua_bus_attach - Attach device handler
829 * @sdev: device to be attached to 818 * @sdev: device to be attached to
830 */ 819 */
831static struct scsi_dh_data *alua_bus_attach(struct scsi_device *sdev) 820static int alua_bus_attach(struct scsi_device *sdev)
832{ 821{
833 struct alua_dh_data *h; 822 struct alua_dh_data *h;
834 int err; 823 int err;
835 824
836 h = kzalloc(sizeof(*h) , GFP_KERNEL); 825 h = kzalloc(sizeof(*h) , GFP_KERNEL);
837 if (!h) 826 if (!h)
838 return ERR_PTR(-ENOMEM); 827 return -ENOMEM;
839 h->tpgs = TPGS_MODE_UNINITIALIZED; 828 h->tpgs = TPGS_MODE_UNINITIALIZED;
840 h->state = TPGS_STATE_OPTIMIZED; 829 h->state = TPGS_STATE_OPTIMIZED;
841 h->group_id = -1; 830 h->group_id = -1;
@@ -848,11 +837,11 @@ static struct scsi_dh_data *alua_bus_attach(struct scsi_device *sdev)
848 if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED) 837 if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED)
849 goto failed; 838 goto failed;
850 839
851 sdev_printk(KERN_NOTICE, sdev, "%s: Attached\n", ALUA_DH_NAME); 840 sdev->handler_data = h;
852 return &h->dh_data; 841 return 0;
853failed: 842failed:
854 kfree(h); 843 kfree(h);
855 return ERR_PTR(-EINVAL); 844 return -EINVAL;
856} 845}
857 846
858/* 847/*
@@ -861,10 +850,11 @@ failed:
861 */ 850 */
862static void alua_bus_detach(struct scsi_device *sdev) 851static void alua_bus_detach(struct scsi_device *sdev)
863{ 852{
864 struct alua_dh_data *h = get_alua_data(sdev); 853 struct alua_dh_data *h = sdev->handler_data;
865 854
866 if (h->buff && h->inq != h->buff) 855 if (h->buff && h->inq != h->buff)
867 kfree(h->buff); 856 kfree(h->buff);
857 sdev->handler_data = NULL;
868 kfree(h); 858 kfree(h);
869} 859}
870 860
@@ -877,7 +867,6 @@ static struct scsi_device_handler alua_dh = {
877 .check_sense = alua_check_sense, 867 .check_sense = alua_check_sense,
878 .activate = alua_activate, 868 .activate = alua_activate,
879 .set_params = alua_set_params, 869 .set_params = alua_set_params,
880 .match = alua_match,
881}; 870};
882 871
883static int __init alua_init(void) 872static int __init alua_init(void)
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 6ed1caadbc6a..e6fb97cb12f4 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -72,7 +72,6 @@ static const char * lun_state[] =
72}; 72};
73 73
74struct clariion_dh_data { 74struct clariion_dh_data {
75 struct scsi_dh_data dh_data;
76 /* 75 /*
77 * Flags: 76 * Flags:
78 * CLARIION_SHORT_TRESPASS 77 * CLARIION_SHORT_TRESPASS
@@ -114,13 +113,6 @@ struct clariion_dh_data {
114 int current_sp; 113 int current_sp;
115}; 114};
116 115
117static inline struct clariion_dh_data
118 *get_clariion_data(struct scsi_device *sdev)
119{
120 return container_of(sdev->scsi_dh_data, struct clariion_dh_data,
121 dh_data);
122}
123
124/* 116/*
125 * Parse MODE_SELECT cmd reply. 117 * Parse MODE_SELECT cmd reply.
126 */ 118 */
@@ -450,7 +442,7 @@ static int clariion_check_sense(struct scsi_device *sdev,
450 442
451static int clariion_prep_fn(struct scsi_device *sdev, struct request *req) 443static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
452{ 444{
453 struct clariion_dh_data *h = get_clariion_data(sdev); 445 struct clariion_dh_data *h = sdev->handler_data;
454 int ret = BLKPREP_OK; 446 int ret = BLKPREP_OK;
455 447
456 if (h->lun_state != CLARIION_LUN_OWNED) { 448 if (h->lun_state != CLARIION_LUN_OWNED) {
@@ -533,7 +525,7 @@ retry:
533static int clariion_activate(struct scsi_device *sdev, 525static int clariion_activate(struct scsi_device *sdev,
534 activate_complete fn, void *data) 526 activate_complete fn, void *data)
535{ 527{
536 struct clariion_dh_data *csdev = get_clariion_data(sdev); 528 struct clariion_dh_data *csdev = sdev->handler_data;
537 int result; 529 int result;
538 530
539 result = clariion_send_inquiry(sdev, csdev); 531 result = clariion_send_inquiry(sdev, csdev);
@@ -574,7 +566,7 @@ done:
574 */ 566 */
575static int clariion_set_params(struct scsi_device *sdev, const char *params) 567static int clariion_set_params(struct scsi_device *sdev, const char *params)
576{ 568{
577 struct clariion_dh_data *csdev = get_clariion_data(sdev); 569 struct clariion_dh_data *csdev = sdev->handler_data;
578 unsigned int hr = 0, st = 0, argc; 570 unsigned int hr = 0, st = 0, argc;
579 const char *p = params; 571 const char *p = params;
580 int result = SCSI_DH_OK; 572 int result = SCSI_DH_OK;
@@ -622,42 +614,14 @@ done:
622 return result; 614 return result;
623} 615}
624 616
625static const struct { 617static int clariion_bus_attach(struct scsi_device *sdev)
626 char *vendor;
627 char *model;
628} clariion_dev_list[] = {
629 {"DGC", "RAID"},
630 {"DGC", "DISK"},
631 {"DGC", "VRAID"},
632 {NULL, NULL},
633};
634
635static bool clariion_match(struct scsi_device *sdev)
636{
637 int i;
638
639 if (scsi_device_tpgs(sdev))
640 return false;
641
642 for (i = 0; clariion_dev_list[i].vendor; i++) {
643 if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor,
644 strlen(clariion_dev_list[i].vendor)) &&
645 !strncmp(sdev->model, clariion_dev_list[i].model,
646 strlen(clariion_dev_list[i].model))) {
647 return true;
648 }
649 }
650 return false;
651}
652
653static struct scsi_dh_data *clariion_bus_attach(struct scsi_device *sdev)
654{ 618{
655 struct clariion_dh_data *h; 619 struct clariion_dh_data *h;
656 int err; 620 int err;
657 621
658 h = kzalloc(sizeof(*h) , GFP_KERNEL); 622 h = kzalloc(sizeof(*h) , GFP_KERNEL);
659 if (!h) 623 if (!h)
660 return ERR_PTR(-ENOMEM); 624 return -ENOMEM;
661 h->lun_state = CLARIION_LUN_UNINITIALIZED; 625 h->lun_state = CLARIION_LUN_UNINITIALIZED;
662 h->default_sp = CLARIION_UNBOUND_LU; 626 h->default_sp = CLARIION_UNBOUND_LU;
663 h->current_sp = CLARIION_UNBOUND_LU; 627 h->current_sp = CLARIION_UNBOUND_LU;
@@ -675,18 +639,19 @@ static struct scsi_dh_data *clariion_bus_attach(struct scsi_device *sdev)
675 CLARIION_NAME, h->current_sp + 'A', 639 CLARIION_NAME, h->current_sp + 'A',
676 h->port, lun_state[h->lun_state], 640 h->port, lun_state[h->lun_state],
677 h->default_sp + 'A'); 641 h->default_sp + 'A');
678 return &h->dh_data; 642
643 sdev->handler_data = h;
644 return 0;
679 645
680failed: 646failed:
681 kfree(h); 647 kfree(h);
682 return ERR_PTR(-EINVAL); 648 return -EINVAL;
683} 649}
684 650
685static void clariion_bus_detach(struct scsi_device *sdev) 651static void clariion_bus_detach(struct scsi_device *sdev)
686{ 652{
687 struct clariion_dh_data *h = get_clariion_data(sdev); 653 kfree(sdev->handler_data);
688 654 sdev->handler_data = NULL;
689 kfree(h);
690} 655}
691 656
692static struct scsi_device_handler clariion_dh = { 657static struct scsi_device_handler clariion_dh = {
@@ -698,7 +663,6 @@ static struct scsi_device_handler clariion_dh = {
698 .activate = clariion_activate, 663 .activate = clariion_activate,
699 .prep_fn = clariion_prep_fn, 664 .prep_fn = clariion_prep_fn,
700 .set_params = clariion_set_params, 665 .set_params = clariion_set_params,
701 .match = clariion_match,
702}; 666};
703 667
704static int __init clariion_init(void) 668static int __init clariion_init(void)
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 485d99544a15..9406d5f4a3d3 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -38,7 +38,6 @@
38#define HP_SW_PATH_PASSIVE 1 38#define HP_SW_PATH_PASSIVE 1
39 39
40struct hp_sw_dh_data { 40struct hp_sw_dh_data {
41 struct scsi_dh_data dh_data;
42 unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 41 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
43 int path_state; 42 int path_state;
44 int retries; 43 int retries;
@@ -50,11 +49,6 @@ struct hp_sw_dh_data {
50 49
51static int hp_sw_start_stop(struct hp_sw_dh_data *); 50static int hp_sw_start_stop(struct hp_sw_dh_data *);
52 51
53static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev)
54{
55 return container_of(sdev->scsi_dh_data, struct hp_sw_dh_data, dh_data);
56}
57
58/* 52/*
59 * tur_done - Handle TEST UNIT READY return status 53 * tur_done - Handle TEST UNIT READY return status
60 * @sdev: sdev the command has been sent to 54 * @sdev: sdev the command has been sent to
@@ -267,7 +261,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
267 261
268static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req) 262static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
269{ 263{
270 struct hp_sw_dh_data *h = get_hp_sw_data(sdev); 264 struct hp_sw_dh_data *h = sdev->handler_data;
271 int ret = BLKPREP_OK; 265 int ret = BLKPREP_OK;
272 266
273 if (h->path_state != HP_SW_PATH_ACTIVE) { 267 if (h->path_state != HP_SW_PATH_ACTIVE) {
@@ -292,7 +286,7 @@ static int hp_sw_activate(struct scsi_device *sdev,
292 activate_complete fn, void *data) 286 activate_complete fn, void *data)
293{ 287{
294 int ret = SCSI_DH_OK; 288 int ret = SCSI_DH_OK;
295 struct hp_sw_dh_data *h = get_hp_sw_data(sdev); 289 struct hp_sw_dh_data *h = sdev->handler_data;
296 290
297 ret = hp_sw_tur(sdev, h); 291 ret = hp_sw_tur(sdev, h);
298 292
@@ -311,43 +305,14 @@ static int hp_sw_activate(struct scsi_device *sdev,
311 return 0; 305 return 0;
312} 306}
313 307
314static const struct { 308static int hp_sw_bus_attach(struct scsi_device *sdev)
315 char *vendor;
316 char *model;
317} hp_sw_dh_data_list[] = {
318 {"COMPAQ", "MSA1000 VOLUME"},
319 {"COMPAQ", "HSV110"},
320 {"HP", "HSV100"},
321 {"DEC", "HSG80"},
322 {NULL, NULL},
323};
324
325static bool hp_sw_match(struct scsi_device *sdev)
326{
327 int i;
328
329 if (scsi_device_tpgs(sdev))
330 return false;
331
332 for (i = 0; hp_sw_dh_data_list[i].vendor; i++) {
333 if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
334 strlen(hp_sw_dh_data_list[i].vendor)) &&
335 !strncmp(sdev->model, hp_sw_dh_data_list[i].model,
336 strlen(hp_sw_dh_data_list[i].model))) {
337 return true;
338 }
339 }
340 return false;
341}
342
343static struct scsi_dh_data *hp_sw_bus_attach(struct scsi_device *sdev)
344{ 309{
345 struct hp_sw_dh_data *h; 310 struct hp_sw_dh_data *h;
346 int ret; 311 int ret;
347 312
348 h = kzalloc(sizeof(*h), GFP_KERNEL); 313 h = kzalloc(sizeof(*h), GFP_KERNEL);
349 if (!h) 314 if (!h)
350 return ERR_PTR(-ENOMEM); 315 return -ENOMEM;
351 h->path_state = HP_SW_PATH_UNINITIALIZED; 316 h->path_state = HP_SW_PATH_UNINITIALIZED;
352 h->retries = HP_SW_RETRIES; 317 h->retries = HP_SW_RETRIES;
353 h->sdev = sdev; 318 h->sdev = sdev;
@@ -359,17 +324,18 @@ static struct scsi_dh_data *hp_sw_bus_attach(struct scsi_device *sdev)
359 sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n", 324 sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n",
360 HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE? 325 HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE?
361 "active":"passive"); 326 "active":"passive");
362 return &h->dh_data; 327
328 sdev->handler_data = h;
329 return 0;
363failed: 330failed:
364 kfree(h); 331 kfree(h);
365 return ERR_PTR(-EINVAL); 332 return -EINVAL;
366} 333}
367 334
368static void hp_sw_bus_detach( struct scsi_device *sdev ) 335static void hp_sw_bus_detach( struct scsi_device *sdev )
369{ 336{
370 struct hp_sw_dh_data *h = get_hp_sw_data(sdev); 337 kfree(sdev->handler_data);
371 338 sdev->handler_data = NULL;
372 kfree(h);
373} 339}
374 340
375static struct scsi_device_handler hp_sw_dh = { 341static struct scsi_device_handler hp_sw_dh = {
@@ -379,7 +345,6 @@ static struct scsi_device_handler hp_sw_dh = {
379 .detach = hp_sw_bus_detach, 345 .detach = hp_sw_bus_detach,
380 .activate = hp_sw_activate, 346 .activate = hp_sw_activate,
381 .prep_fn = hp_sw_prep_fn, 347 .prep_fn = hp_sw_prep_fn,
382 .match = hp_sw_match,
383}; 348};
384 349
385static int __init hp_sw_init(void) 350static int __init hp_sw_init(void)
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index b46ace3d4bf0..361358134315 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -181,7 +181,6 @@ struct c2_inquiry {
181}; 181};
182 182
183struct rdac_dh_data { 183struct rdac_dh_data {
184 struct scsi_dh_data dh_data;
185 struct rdac_controller *ctlr; 184 struct rdac_controller *ctlr;
186#define UNINITIALIZED_LUN (1 << 8) 185#define UNINITIALIZED_LUN (1 << 8)
187 unsigned lun; 186 unsigned lun;
@@ -260,11 +259,6 @@ do { \
260 sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \ 259 sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
261} while (0); 260} while (0);
262 261
263static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev)
264{
265 return container_of(sdev->scsi_dh_data, struct rdac_dh_data, dh_data);
266}
267
268static struct request *get_rdac_req(struct scsi_device *sdev, 262static struct request *get_rdac_req(struct scsi_device *sdev,
269 void *buffer, unsigned buflen, int rw) 263 void *buffer, unsigned buflen, int rw)
270{ 264{
@@ -544,7 +538,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
544{ 538{
545 struct scsi_sense_hdr sense_hdr; 539 struct scsi_sense_hdr sense_hdr;
546 int err = SCSI_DH_IO, ret; 540 int err = SCSI_DH_IO, ret;
547 struct rdac_dh_data *h = get_rdac_data(sdev); 541 struct rdac_dh_data *h = sdev->handler_data;
548 542
549 ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr); 543 ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
550 if (!ret) 544 if (!ret)
@@ -589,7 +583,7 @@ static void send_mode_select(struct work_struct *work)
589 container_of(work, struct rdac_controller, ms_work); 583 container_of(work, struct rdac_controller, ms_work);
590 struct request *rq; 584 struct request *rq;
591 struct scsi_device *sdev = ctlr->ms_sdev; 585 struct scsi_device *sdev = ctlr->ms_sdev;
592 struct rdac_dh_data *h = get_rdac_data(sdev); 586 struct rdac_dh_data *h = sdev->handler_data;
593 struct request_queue *q = sdev->request_queue; 587 struct request_queue *q = sdev->request_queue;
594 int err, retry_cnt = RDAC_RETRY_COUNT; 588 int err, retry_cnt = RDAC_RETRY_COUNT;
595 struct rdac_queue_data *tmp, *qdata; 589 struct rdac_queue_data *tmp, *qdata;
@@ -648,7 +642,7 @@ static int queue_mode_select(struct scsi_device *sdev,
648 if (!qdata) 642 if (!qdata)
649 return SCSI_DH_RETRY; 643 return SCSI_DH_RETRY;
650 644
651 qdata->h = get_rdac_data(sdev); 645 qdata->h = sdev->handler_data;
652 qdata->callback_fn = fn; 646 qdata->callback_fn = fn;
653 qdata->callback_data = data; 647 qdata->callback_data = data;
654 648
@@ -667,7 +661,7 @@ static int queue_mode_select(struct scsi_device *sdev,
667static int rdac_activate(struct scsi_device *sdev, 661static int rdac_activate(struct scsi_device *sdev,
668 activate_complete fn, void *data) 662 activate_complete fn, void *data)
669{ 663{
670 struct rdac_dh_data *h = get_rdac_data(sdev); 664 struct rdac_dh_data *h = sdev->handler_data;
671 int err = SCSI_DH_OK; 665 int err = SCSI_DH_OK;
672 int act = 0; 666 int act = 0;
673 667
@@ -702,7 +696,7 @@ done:
702 696
703static int rdac_prep_fn(struct scsi_device *sdev, struct request *req) 697static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
704{ 698{
705 struct rdac_dh_data *h = get_rdac_data(sdev); 699 struct rdac_dh_data *h = sdev->handler_data;
706 int ret = BLKPREP_OK; 700 int ret = BLKPREP_OK;
707 701
708 if (h->state != RDAC_STATE_ACTIVE) { 702 if (h->state != RDAC_STATE_ACTIVE) {
@@ -716,7 +710,7 @@ static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
716static int rdac_check_sense(struct scsi_device *sdev, 710static int rdac_check_sense(struct scsi_device *sdev,
717 struct scsi_sense_hdr *sense_hdr) 711 struct scsi_sense_hdr *sense_hdr)
718{ 712{
719 struct rdac_dh_data *h = get_rdac_data(sdev); 713 struct rdac_dh_data *h = sdev->handler_data;
720 714
721 RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, " 715 RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, "
722 "I/O returned with sense %02x/%02x/%02x", 716 "I/O returned with sense %02x/%02x/%02x",
@@ -778,56 +772,7 @@ static int rdac_check_sense(struct scsi_device *sdev,
778 return SCSI_RETURN_NOT_HANDLED; 772 return SCSI_RETURN_NOT_HANDLED;
779} 773}
780 774
781static const struct { 775static int rdac_bus_attach(struct scsi_device *sdev)
782 char *vendor;
783 char *model;
784} rdac_dev_list[] = {
785 {"IBM", "1722"},
786 {"IBM", "1724"},
787 {"IBM", "1726"},
788 {"IBM", "1742"},
789 {"IBM", "1745"},
790 {"IBM", "1746"},
791 {"IBM", "1813"},
792 {"IBM", "1814"},
793 {"IBM", "1815"},
794 {"IBM", "1818"},
795 {"IBM", "3526"},
796 {"SGI", "TP9"},
797 {"SGI", "IS"},
798 {"STK", "OPENstorage D280"},
799 {"STK", "FLEXLINE 380"},
800 {"SUN", "CSM"},
801 {"SUN", "LCSM100"},
802 {"SUN", "STK6580_6780"},
803 {"SUN", "SUN_6180"},
804 {"SUN", "ArrayStorage"},
805 {"DELL", "MD3"},
806 {"NETAPP", "INF-01-00"},
807 {"LSI", "INF-01-00"},
808 {"ENGENIO", "INF-01-00"},
809 {NULL, NULL},
810};
811
812static bool rdac_match(struct scsi_device *sdev)
813{
814 int i;
815
816 if (scsi_device_tpgs(sdev))
817 return false;
818
819 for (i = 0; rdac_dev_list[i].vendor; i++) {
820 if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
821 strlen(rdac_dev_list[i].vendor)) &&
822 !strncmp(sdev->model, rdac_dev_list[i].model,
823 strlen(rdac_dev_list[i].model))) {
824 return true;
825 }
826 }
827 return false;
828}
829
830static struct scsi_dh_data *rdac_bus_attach(struct scsi_device *sdev)
831{ 776{
832 struct rdac_dh_data *h; 777 struct rdac_dh_data *h;
833 int err; 778 int err;
@@ -836,7 +781,7 @@ static struct scsi_dh_data *rdac_bus_attach(struct scsi_device *sdev)
836 781
837 h = kzalloc(sizeof(*h) , GFP_KERNEL); 782 h = kzalloc(sizeof(*h) , GFP_KERNEL);
838 if (!h) 783 if (!h)
839 return ERR_PTR(-ENOMEM); 784 return -ENOMEM;
840 h->lun = UNINITIALIZED_LUN; 785 h->lun = UNINITIALIZED_LUN;
841 h->state = RDAC_STATE_ACTIVE; 786 h->state = RDAC_STATE_ACTIVE;
842 787
@@ -861,7 +806,8 @@ static struct scsi_dh_data *rdac_bus_attach(struct scsi_device *sdev)
861 RDAC_NAME, h->lun, mode[(int)h->mode], 806 RDAC_NAME, h->lun, mode[(int)h->mode],
862 lun_state[(int)h->lun_state]); 807 lun_state[(int)h->lun_state]);
863 808
864 return &h->dh_data; 809 sdev->handler_data = h;
810 return 0;
865 811
866clean_ctlr: 812clean_ctlr:
867 spin_lock(&list_lock); 813 spin_lock(&list_lock);
@@ -870,12 +816,12 @@ clean_ctlr:
870 816
871failed: 817failed:
872 kfree(h); 818 kfree(h);
873 return ERR_PTR(-EINVAL); 819 return -EINVAL;
874} 820}
875 821
876static void rdac_bus_detach( struct scsi_device *sdev ) 822static void rdac_bus_detach( struct scsi_device *sdev )
877{ 823{
878 struct rdac_dh_data *h = get_rdac_data(sdev); 824 struct rdac_dh_data *h = sdev->handler_data;
879 825
880 if (h->ctlr && h->ctlr->ms_queued) 826 if (h->ctlr && h->ctlr->ms_queued)
881 flush_workqueue(kmpath_rdacd); 827 flush_workqueue(kmpath_rdacd);
@@ -884,6 +830,7 @@ static void rdac_bus_detach( struct scsi_device *sdev )
884 if (h->ctlr) 830 if (h->ctlr)
885 kref_put(&h->ctlr->kref, release_controller); 831 kref_put(&h->ctlr->kref, release_controller);
886 spin_unlock(&list_lock); 832 spin_unlock(&list_lock);
833 sdev->handler_data = NULL;
887 kfree(h); 834 kfree(h);
888} 835}
889 836
@@ -895,7 +842,6 @@ static struct scsi_device_handler rdac_dh = {
895 .attach = rdac_bus_attach, 842 .attach = rdac_bus_attach,
896 .detach = rdac_bus_detach, 843 .detach = rdac_bus_detach,
897 .activate = rdac_activate, 844 .activate = rdac_activate,
898 .match = rdac_match,
899}; 845};
900 846
901static int __init rdac_init(void) 847static int __init rdac_init(void)
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index ec193a8357d7..d3eb80c46bbe 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -364,7 +364,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
364 * on the ethertype for the given device 364 * on the ethertype for the given device
365 */ 365 */
366 fcoe->fcoe_packet_type.func = fcoe_rcv; 366 fcoe->fcoe_packet_type.func = fcoe_rcv;
367 fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); 367 fcoe->fcoe_packet_type.type = htons(ETH_P_FCOE);
368 fcoe->fcoe_packet_type.dev = netdev; 368 fcoe->fcoe_packet_type.dev = netdev;
369 dev_add_pack(&fcoe->fcoe_packet_type); 369 dev_add_pack(&fcoe->fcoe_packet_type);
370 370
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 341191952155..b62836ddbbee 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4555,7 +4555,7 @@ static ssize_t ipr_store_raw_mode(struct device *dev,
4555 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4555 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4556 res = (struct ipr_resource_entry *)sdev->hostdata; 4556 res = (struct ipr_resource_entry *)sdev->hostdata;
4557 if (res) { 4557 if (res) {
4558 if (ioa_cfg->sis64 && ipr_is_af_dasd_device(res)) { 4558 if (ipr_is_af_dasd_device(res)) {
4559 res->raw_mode = simple_strtoul(buf, NULL, 10); 4559 res->raw_mode = simple_strtoul(buf, NULL, 10);
4560 len = strlen(buf); 4560 len = strlen(buf);
4561 if (res->sdev) 4561 if (res->sdev)
@@ -6383,9 +6383,13 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
6383 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) { 6383 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6384 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 6384 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6385 } 6385 }
6386 if (res->raw_mode && ipr_is_af_dasd_device(res)) 6386 if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6387 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE; 6387 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6388 6388
6389 if (scsi_cmd->underflow == 0)
6390 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6391 }
6392
6389 if (ioa_cfg->sis64) 6393 if (ioa_cfg->sis64)
6390 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); 6394 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6391 else 6395 else
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 98d9bb6ff725..33c74d3436c9 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -853,12 +853,9 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
853 SAM_STAT_CHECK_CONDITION; 853 SAM_STAT_CHECK_CONDITION;
854 scsi_build_sense_buffer(1, sc->sense_buffer, 854 scsi_build_sense_buffer(1, sc->sense_buffer,
855 ILLEGAL_REQUEST, 0x10, ascq); 855 ILLEGAL_REQUEST, 0x10, ascq);
856 sc->sense_buffer[7] = 0xc; /* Additional sense length */ 856 scsi_set_sense_information(sc->sense_buffer,
857 sc->sense_buffer[8] = 0; /* Information desc type */ 857 SCSI_SENSE_BUFFERSIZE,
858 sc->sense_buffer[9] = 0xa; /* Additional desc length */ 858 sector);
859 sc->sense_buffer[10] = 0x80; /* Validity bit */
860
861 put_unaligned_be64(sector, &sc->sense_buffer[12]);
862 goto out; 859 goto out;
863 } 860 }
864 } 861 }
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index eb627724417e..4abb93a83e0f 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2284,7 +2284,7 @@ lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2284 (struct lpfc_rdp_context *)(mbox->context2); 2284 (struct lpfc_rdp_context *)(mbox->context2);
2285 2285
2286 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) 2286 if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
2287 goto error; 2287 goto error_mbuf_free;
2288 2288
2289 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2, 2289 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2,
2290 DMP_SFF_PAGE_A2_SIZE); 2290 DMP_SFF_PAGE_A2_SIZE);
@@ -2299,13 +2299,14 @@ lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2299 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat; 2299 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
2300 mbox->context2 = (struct lpfc_rdp_context *) rdp_context; 2300 mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
2301 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED) 2301 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
2302 goto error; 2302 goto error_cmd_free;
2303 2303
2304 return; 2304 return;
2305 2305
2306error: 2306error_mbuf_free:
2307 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2307 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2308 kfree(mp); 2308 kfree(mp);
2309error_cmd_free:
2309 lpfc_sli4_mbox_cmd_free(phba, mbox); 2310 lpfc_sli4_mbox_cmd_free(phba, mbox);
2310 rdp_context->cmpl(phba, rdp_context, FAILURE); 2311 rdp_context->cmpl(phba, rdp_context, FAILURE);
2311} 2312}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 6dec7cff316f..c167911221e9 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -112,9 +112,12 @@ _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
112 if (ret) 112 if (ret)
113 return ret; 113 return ret;
114 114
115 /* global ioc spinlock to protect controller list on list operations */
115 printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug); 116 printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug);
117 spin_lock(&gioc_lock);
116 list_for_each_entry(ioc, &mpt2sas_ioc_list, list) 118 list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
117 ioc->fwfault_debug = mpt2sas_fwfault_debug; 119 ioc->fwfault_debug = mpt2sas_fwfault_debug;
120 spin_unlock(&gioc_lock);
118 return 0; 121 return 0;
119} 122}
120 123
@@ -4437,6 +4440,8 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
4437 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 4440 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4438 __func__)); 4441 __func__));
4439 4442
4443 /* synchronizing freeing resource with pci_access_mutex lock */
4444 mutex_lock(&ioc->pci_access_mutex);
4440 if (ioc->chip_phys && ioc->chip) { 4445 if (ioc->chip_phys && ioc->chip) {
4441 _base_mask_interrupts(ioc); 4446 _base_mask_interrupts(ioc);
4442 ioc->shost_recovery = 1; 4447 ioc->shost_recovery = 1;
@@ -4456,6 +4461,7 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
4456 pci_disable_pcie_error_reporting(pdev); 4461 pci_disable_pcie_error_reporting(pdev);
4457 pci_disable_device(pdev); 4462 pci_disable_device(pdev);
4458 } 4463 }
4464 mutex_unlock(&ioc->pci_access_mutex);
4459 return; 4465 return;
4460} 4466}
4461 4467
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index caff8d10cca4..97ea360c6920 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -238,6 +238,7 @@
238 * @flags: MPT_TARGET_FLAGS_XXX flags 238 * @flags: MPT_TARGET_FLAGS_XXX flags
239 * @deleted: target flaged for deletion 239 * @deleted: target flaged for deletion
240 * @tm_busy: target is busy with TM request. 240 * @tm_busy: target is busy with TM request.
241 * @sdev: The sas_device associated with this target
241 */ 242 */
242struct MPT2SAS_TARGET { 243struct MPT2SAS_TARGET {
243 struct scsi_target *starget; 244 struct scsi_target *starget;
@@ -248,6 +249,7 @@ struct MPT2SAS_TARGET {
248 u32 flags; 249 u32 flags;
249 u8 deleted; 250 u8 deleted;
250 u8 tm_busy; 251 u8 tm_busy;
252 struct _sas_device *sdev;
251}; 253};
252 254
253 255
@@ -376,8 +378,24 @@ struct _sas_device {
376 u8 phy; 378 u8 phy;
377 u8 responding; 379 u8 responding;
378 u8 pfa_led_on; 380 u8 pfa_led_on;
381 struct kref refcount;
379}; 382};
380 383
384static inline void sas_device_get(struct _sas_device *s)
385{
386 kref_get(&s->refcount);
387}
388
389static inline void sas_device_free(struct kref *r)
390{
391 kfree(container_of(r, struct _sas_device, refcount));
392}
393
394static inline void sas_device_put(struct _sas_device *s)
395{
396 kref_put(&s->refcount, sas_device_free);
397}
398
381/** 399/**
382 * struct _raid_device - raid volume link list 400 * struct _raid_device - raid volume link list
383 * @list: sas device list 401 * @list: sas device list
@@ -799,6 +817,12 @@ typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc);
799 * @delayed_tr_list: target reset link list 817 * @delayed_tr_list: target reset link list
800 * @delayed_tr_volume_list: volume target reset link list 818 * @delayed_tr_volume_list: volume target reset link list
801 * @@temp_sensors_count: flag to carry the number of temperature sensors 819 * @@temp_sensors_count: flag to carry the number of temperature sensors
820 * @pci_access_mutex: Mutex to synchronize ioctl,sysfs show path and
821 * pci resource handling. PCI resource freeing will lead to free
822 * vital hardware/memory resource, which might be in use by cli/sysfs
823 * path functions resulting in Null pointer reference followed by kernel
824 * crash. To avoid the above race condition we use mutex syncrhonization
825 * which ensures the syncrhonization between cli/sysfs_show path
802 */ 826 */
803struct MPT2SAS_ADAPTER { 827struct MPT2SAS_ADAPTER {
804 struct list_head list; 828 struct list_head list;
@@ -1015,6 +1039,7 @@ struct MPT2SAS_ADAPTER {
1015 u8 mfg_pg10_hide_flag; 1039 u8 mfg_pg10_hide_flag;
1016 u8 hide_drives; 1040 u8 hide_drives;
1017 1041
1042 struct mutex pci_access_mutex;
1018}; 1043};
1019 1044
1020typedef u8 (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 1045typedef u8 (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1023,6 +1048,17 @@ typedef u8 (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1023 1048
1024/* base shared API */ 1049/* base shared API */
1025extern struct list_head mpt2sas_ioc_list; 1050extern struct list_head mpt2sas_ioc_list;
1051/* spinlock on list operations over IOCs
1052 * Case: when multiple warpdrive cards(IOCs) are in use
1053 * Each IOC will added to the ioc list stucture on initialization.
1054 * Watchdog threads run at regular intervals to check IOC for any
1055 * fault conditions which will trigger the dead_ioc thread to
1056 * deallocate pci resource, resulting deleting the IOC netry from list,
1057 * this deletion need to protected by spinlock to enusre that
1058 * ioc removal is syncrhonized, if not synchronized it might lead to
1059 * list_del corruption as the ioc list is traversed in cli path
1060 */
1061extern spinlock_t gioc_lock;
1026void mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc); 1062void mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc);
1027void mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc); 1063void mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc);
1028 1064
@@ -1095,11 +1131,12 @@ struct _sas_node *mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *
1095 u16 handle); 1131 u16 handle);
1096struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAPTER 1132struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAPTER
1097 *ioc, u64 sas_address); 1133 *ioc, u64 sas_address);
1098struct _sas_device *mpt2sas_scsih_sas_device_find_by_sas_address( 1134struct _sas_device *mpt2sas_get_sdev_by_addr(
1135 struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
1136struct _sas_device *__mpt2sas_get_sdev_by_addr(
1099 struct MPT2SAS_ADAPTER *ioc, u64 sas_address); 1137 struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
1100 1138
1101void mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc); 1139void mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc);
1102
1103void mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase); 1140void mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase);
1104 1141
1105/* config shared API */ 1142/* config shared API */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 4e509604b571..3694b63bd993 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -427,13 +427,16 @@ static int
427_ctl_verify_adapter(int ioc_number, struct MPT2SAS_ADAPTER **iocpp) 427_ctl_verify_adapter(int ioc_number, struct MPT2SAS_ADAPTER **iocpp)
428{ 428{
429 struct MPT2SAS_ADAPTER *ioc; 429 struct MPT2SAS_ADAPTER *ioc;
430 430 /* global ioc lock to protect controller on list operations */
431 spin_lock(&gioc_lock);
431 list_for_each_entry(ioc, &mpt2sas_ioc_list, list) { 432 list_for_each_entry(ioc, &mpt2sas_ioc_list, list) {
432 if (ioc->id != ioc_number) 433 if (ioc->id != ioc_number)
433 continue; 434 continue;
435 spin_unlock(&gioc_lock);
434 *iocpp = ioc; 436 *iocpp = ioc;
435 return ioc_number; 437 return ioc_number;
436 } 438 }
439 spin_unlock(&gioc_lock);
437 *iocpp = NULL; 440 *iocpp = NULL;
438 return -1; 441 return -1;
439} 442}
@@ -522,10 +525,15 @@ _ctl_poll(struct file *filep, poll_table *wait)
522 525
523 poll_wait(filep, &ctl_poll_wait, wait); 526 poll_wait(filep, &ctl_poll_wait, wait);
524 527
528 /* global ioc lock to protect controller on list operations */
529 spin_lock(&gioc_lock);
525 list_for_each_entry(ioc, &mpt2sas_ioc_list, list) { 530 list_for_each_entry(ioc, &mpt2sas_ioc_list, list) {
526 if (ioc->aen_event_read_flag) 531 if (ioc->aen_event_read_flag) {
532 spin_unlock(&gioc_lock);
527 return POLLIN | POLLRDNORM; 533 return POLLIN | POLLRDNORM;
534 }
528 } 535 }
536 spin_unlock(&gioc_lock);
529 return 0; 537 return 0;
530} 538}
531 539
@@ -2168,16 +2176,23 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
2168 2176
2169 if (_ctl_verify_adapter(ioctl_header.ioc_number, &ioc) == -1 || !ioc) 2177 if (_ctl_verify_adapter(ioctl_header.ioc_number, &ioc) == -1 || !ioc)
2170 return -ENODEV; 2178 return -ENODEV;
2179 /* pci_access_mutex lock acquired by ioctl path */
2180 mutex_lock(&ioc->pci_access_mutex);
2171 if (ioc->shost_recovery || ioc->pci_error_recovery || 2181 if (ioc->shost_recovery || ioc->pci_error_recovery ||
2172 ioc->is_driver_loading) 2182 ioc->is_driver_loading || ioc->remove_host) {
2173 return -EAGAIN; 2183 ret = -EAGAIN;
2184 goto out_unlock_pciaccess;
2185 }
2174 2186
2175 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; 2187 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
2176 if (state == NON_BLOCKING) { 2188 if (state == NON_BLOCKING) {
2177 if (!mutex_trylock(&ioc->ctl_cmds.mutex)) 2189 if (!mutex_trylock(&ioc->ctl_cmds.mutex)) {
2178 return -EAGAIN; 2190 ret = -EAGAIN;
2191 goto out_unlock_pciaccess;
2192 }
2179 } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) { 2193 } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
2180 return -ERESTARTSYS; 2194 ret = -ERESTARTSYS;
2195 goto out_unlock_pciaccess;
2181 } 2196 }
2182 2197
2183 switch (cmd) { 2198 switch (cmd) {
@@ -2258,6 +2273,8 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
2258 } 2273 }
2259 2274
2260 mutex_unlock(&ioc->ctl_cmds.mutex); 2275 mutex_unlock(&ioc->ctl_cmds.mutex);
2276out_unlock_pciaccess:
2277 mutex_unlock(&ioc->pci_access_mutex);
2261 return ret; 2278 return ret;
2262} 2279}
2263 2280
@@ -2711,6 +2728,12 @@ _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
2711 "warpdrive\n", ioc->name, __func__); 2728 "warpdrive\n", ioc->name, __func__);
2712 goto out; 2729 goto out;
2713 } 2730 }
2731 /* pci_access_mutex lock acquired by sysfs show path */
2732 mutex_lock(&ioc->pci_access_mutex);
2733 if (ioc->pci_error_recovery || ioc->remove_host) {
2734 mutex_unlock(&ioc->pci_access_mutex);
2735 return 0;
2736 }
2714 2737
2715 /* allocate upto GPIOVal 36 entries */ 2738 /* allocate upto GPIOVal 36 entries */
2716 sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36); 2739 sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
@@ -2749,6 +2772,7 @@ _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
2749 2772
2750 out: 2773 out:
2751 kfree(io_unit_pg3); 2774 kfree(io_unit_pg3);
2775 mutex_unlock(&ioc->pci_access_mutex);
2752 return rc; 2776 return rc;
2753} 2777}
2754static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL); 2778static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 3f26147bbc64..0ad09b2bff9c 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -79,7 +79,8 @@ static int _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time);
79 79
80/* global parameters */ 80/* global parameters */
81LIST_HEAD(mpt2sas_ioc_list); 81LIST_HEAD(mpt2sas_ioc_list);
82 82/* global ioc lock for list operations */
83DEFINE_SPINLOCK(gioc_lock);
83/* local parameters */ 84/* local parameters */
84static u8 scsi_io_cb_idx = -1; 85static u8 scsi_io_cb_idx = -1;
85static u8 tm_cb_idx = -1; 86static u8 tm_cb_idx = -1;
@@ -176,9 +177,37 @@ struct fw_event_work {
176 u8 VP_ID; 177 u8 VP_ID;
177 u8 ignore; 178 u8 ignore;
178 u16 event; 179 u16 event;
180 struct kref refcount;
179 char event_data[0] __aligned(4); 181 char event_data[0] __aligned(4);
180}; 182};
181 183
184static void fw_event_work_free(struct kref *r)
185{
186 kfree(container_of(r, struct fw_event_work, refcount));
187}
188
189static void fw_event_work_get(struct fw_event_work *fw_work)
190{
191 kref_get(&fw_work->refcount);
192}
193
194static void fw_event_work_put(struct fw_event_work *fw_work)
195{
196 kref_put(&fw_work->refcount, fw_event_work_free);
197}
198
199static struct fw_event_work *alloc_fw_event_work(int len)
200{
201 struct fw_event_work *fw_event;
202
203 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
204 if (!fw_event)
205 return NULL;
206
207 kref_init(&fw_event->refcount);
208 return fw_event;
209}
210
182/* raid transport support */ 211/* raid transport support */
183static struct raid_template *mpt2sas_raid_template; 212static struct raid_template *mpt2sas_raid_template;
184 213
@@ -293,8 +322,10 @@ _scsih_set_debug_level(const char *val, struct kernel_param *kp)
293 return ret; 322 return ret;
294 323
295 printk(KERN_INFO "setting logging_level(0x%08x)\n", logging_level); 324 printk(KERN_INFO "setting logging_level(0x%08x)\n", logging_level);
325 spin_lock(&gioc_lock);
296 list_for_each_entry(ioc, &mpt2sas_ioc_list, list) 326 list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
297 ioc->logging_level = logging_level; 327 ioc->logging_level = logging_level;
328 spin_unlock(&gioc_lock);
298 return 0; 329 return 0;
299} 330}
300module_param_call(logging_level, _scsih_set_debug_level, param_get_int, 331module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
@@ -526,8 +557,61 @@ _scsih_determine_boot_device(struct MPT2SAS_ADAPTER *ioc,
526 } 557 }
527} 558}
528 559
560static struct _sas_device *
561__mpt2sas_get_sdev_from_target(struct MPT2SAS_ADAPTER *ioc,
562 struct MPT2SAS_TARGET *tgt_priv)
563{
564 struct _sas_device *ret;
565
566 assert_spin_locked(&ioc->sas_device_lock);
567
568 ret = tgt_priv->sdev;
569 if (ret)
570 sas_device_get(ret);
571
572 return ret;
573}
574
575static struct _sas_device *
576mpt2sas_get_sdev_from_target(struct MPT2SAS_ADAPTER *ioc,
577 struct MPT2SAS_TARGET *tgt_priv)
578{
579 struct _sas_device *ret;
580 unsigned long flags;
581
582 spin_lock_irqsave(&ioc->sas_device_lock, flags);
583 ret = __mpt2sas_get_sdev_from_target(ioc, tgt_priv);
584 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
585
586 return ret;
587}
588
589
590struct _sas_device *
591__mpt2sas_get_sdev_by_addr(struct MPT2SAS_ADAPTER *ioc,
592 u64 sas_address)
593{
594 struct _sas_device *sas_device;
595
596 assert_spin_locked(&ioc->sas_device_lock);
597
598 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
599 if (sas_device->sas_address == sas_address)
600 goto found_device;
601
602 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
603 if (sas_device->sas_address == sas_address)
604 goto found_device;
605
606 return NULL;
607
608found_device:
609 sas_device_get(sas_device);
610 return sas_device;
611}
612
529/** 613/**
530 * mpt2sas_scsih_sas_device_find_by_sas_address - sas device search 614 * mpt2sas_get_sdev_by_addr - sas device search
531 * @ioc: per adapter object 615 * @ioc: per adapter object
532 * @sas_address: sas address 616 * @sas_address: sas address
533 * Context: Calling function should acquire ioc->sas_device_lock 617 * Context: Calling function should acquire ioc->sas_device_lock
@@ -536,24 +620,44 @@ _scsih_determine_boot_device(struct MPT2SAS_ADAPTER *ioc,
536 * object. 620 * object.
537 */ 621 */
538struct _sas_device * 622struct _sas_device *
539mpt2sas_scsih_sas_device_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc, 623mpt2sas_get_sdev_by_addr(struct MPT2SAS_ADAPTER *ioc,
540 u64 sas_address) 624 u64 sas_address)
541{ 625{
542 struct _sas_device *sas_device; 626 struct _sas_device *sas_device;
627 unsigned long flags;
628
629 spin_lock_irqsave(&ioc->sas_device_lock, flags);
630 sas_device = __mpt2sas_get_sdev_by_addr(ioc,
631 sas_address);
632 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
633
634 return sas_device;
635}
636
637static struct _sas_device *
638__mpt2sas_get_sdev_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
639{
640 struct _sas_device *sas_device;
641
642 assert_spin_locked(&ioc->sas_device_lock);
543 643
544 list_for_each_entry(sas_device, &ioc->sas_device_list, list) 644 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
545 if (sas_device->sas_address == sas_address) 645 if (sas_device->handle == handle)
546 return sas_device; 646 goto found_device;
547 647
548 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) 648 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
549 if (sas_device->sas_address == sas_address) 649 if (sas_device->handle == handle)
550 return sas_device; 650 goto found_device;
551 651
552 return NULL; 652 return NULL;
653
654found_device:
655 sas_device_get(sas_device);
656 return sas_device;
553} 657}
554 658
555/** 659/**
556 * _scsih_sas_device_find_by_handle - sas device search 660 * mpt2sas_get_sdev_by_handle - sas device search
557 * @ioc: per adapter object 661 * @ioc: per adapter object
558 * @handle: sas device handle (assigned by firmware) 662 * @handle: sas device handle (assigned by firmware)
559 * Context: Calling function should acquire ioc->sas_device_lock 663 * Context: Calling function should acquire ioc->sas_device_lock
@@ -562,19 +666,16 @@ mpt2sas_scsih_sas_device_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
562 * object. 666 * object.
563 */ 667 */
564static struct _sas_device * 668static struct _sas_device *
565_scsih_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle) 669mpt2sas_get_sdev_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
566{ 670{
567 struct _sas_device *sas_device; 671 struct _sas_device *sas_device;
672 unsigned long flags;
568 673
569 list_for_each_entry(sas_device, &ioc->sas_device_list, list) 674 spin_lock_irqsave(&ioc->sas_device_lock, flags);
570 if (sas_device->handle == handle) 675 sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle);
571 return sas_device; 676 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
572
573 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
574 if (sas_device->handle == handle)
575 return sas_device;
576 677
577 return NULL; 678 return sas_device;
578} 679}
579 680
580/** 681/**
@@ -583,7 +684,7 @@ _scsih_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
583 * @sas_device: the sas_device object 684 * @sas_device: the sas_device object
584 * Context: This function will acquire ioc->sas_device_lock. 685 * Context: This function will acquire ioc->sas_device_lock.
585 * 686 *
586 * Removing object and freeing associated memory from the ioc->sas_device_list. 687 * If sas_device is on the list, remove it and decrement its reference count.
587 */ 688 */
588static void 689static void
589_scsih_sas_device_remove(struct MPT2SAS_ADAPTER *ioc, 690_scsih_sas_device_remove(struct MPT2SAS_ADAPTER *ioc,
@@ -594,9 +695,15 @@ _scsih_sas_device_remove(struct MPT2SAS_ADAPTER *ioc,
594 if (!sas_device) 695 if (!sas_device)
595 return; 696 return;
596 697
698 /*
699 * The lock serializes access to the list, but we still need to verify
700 * that nobody removed the entry while we were waiting on the lock.
701 */
597 spin_lock_irqsave(&ioc->sas_device_lock, flags); 702 spin_lock_irqsave(&ioc->sas_device_lock, flags);
598 list_del(&sas_device->list); 703 if (!list_empty(&sas_device->list)) {
599 kfree(sas_device); 704 list_del_init(&sas_device->list);
705 sas_device_put(sas_device);
706 }
600 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 707 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
601} 708}
602 709
@@ -620,6 +727,7 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
620 sas_device->handle, (unsigned long long)sas_device->sas_address)); 727 sas_device->handle, (unsigned long long)sas_device->sas_address));
621 728
622 spin_lock_irqsave(&ioc->sas_device_lock, flags); 729 spin_lock_irqsave(&ioc->sas_device_lock, flags);
730 sas_device_get(sas_device);
623 list_add_tail(&sas_device->list, &ioc->sas_device_list); 731 list_add_tail(&sas_device->list, &ioc->sas_device_list);
624 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 732 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
625 733
@@ -659,6 +767,7 @@ _scsih_sas_device_init_add(struct MPT2SAS_ADAPTER *ioc,
659 sas_device->handle, (unsigned long long)sas_device->sas_address)); 767 sas_device->handle, (unsigned long long)sas_device->sas_address));
660 768
661 spin_lock_irqsave(&ioc->sas_device_lock, flags); 769 spin_lock_irqsave(&ioc->sas_device_lock, flags);
770 sas_device_get(sas_device);
662 list_add_tail(&sas_device->list, &ioc->sas_device_init_list); 771 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
663 _scsih_determine_boot_device(ioc, sas_device, 0); 772 _scsih_determine_boot_device(ioc, sas_device, 0);
664 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 773 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -1208,12 +1317,15 @@ _scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1208 goto not_sata; 1317 goto not_sata;
1209 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) 1318 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1210 goto not_sata; 1319 goto not_sata;
1320
1211 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1321 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1212 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 1322 sas_device = __mpt2sas_get_sdev_from_target(ioc, sas_target_priv_data);
1213 sas_device_priv_data->sas_target->sas_address); 1323 if (sas_device) {
1214 if (sas_device && sas_device->device_info & 1324 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1215 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 1325 max_depth = MPT2SAS_SATA_QUEUE_DEPTH;
1216 max_depth = MPT2SAS_SATA_QUEUE_DEPTH; 1326
1327 sas_device_put(sas_device);
1328 }
1217 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1329 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1218 1330
1219 not_sata: 1331 not_sata:
@@ -1271,18 +1383,20 @@ _scsih_target_alloc(struct scsi_target *starget)
1271 /* sas/sata devices */ 1383 /* sas/sata devices */
1272 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1384 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1273 rphy = dev_to_rphy(starget->dev.parent); 1385 rphy = dev_to_rphy(starget->dev.parent);
1274 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 1386 sas_device = __mpt2sas_get_sdev_by_addr(ioc,
1275 rphy->identify.sas_address); 1387 rphy->identify.sas_address);
1276 1388
1277 if (sas_device) { 1389 if (sas_device) {
1278 sas_target_priv_data->handle = sas_device->handle; 1390 sas_target_priv_data->handle = sas_device->handle;
1279 sas_target_priv_data->sas_address = sas_device->sas_address; 1391 sas_target_priv_data->sas_address = sas_device->sas_address;
1392 sas_target_priv_data->sdev = sas_device;
1280 sas_device->starget = starget; 1393 sas_device->starget = starget;
1281 sas_device->id = starget->id; 1394 sas_device->id = starget->id;
1282 sas_device->channel = starget->channel; 1395 sas_device->channel = starget->channel;
1283 if (test_bit(sas_device->handle, ioc->pd_handles)) 1396 if (test_bit(sas_device->handle, ioc->pd_handles))
1284 sas_target_priv_data->flags |= 1397 sas_target_priv_data->flags |=
1285 MPT_TARGET_FLAGS_RAID_COMPONENT; 1398 MPT_TARGET_FLAGS_RAID_COMPONENT;
1399
1286 } 1400 }
1287 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1401 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1288 1402
@@ -1324,13 +1438,21 @@ _scsih_target_destroy(struct scsi_target *starget)
1324 1438
1325 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1439 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1326 rphy = dev_to_rphy(starget->dev.parent); 1440 rphy = dev_to_rphy(starget->dev.parent);
1327 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 1441 sas_device = __mpt2sas_get_sdev_from_target(ioc, sas_target_priv_data);
1328 rphy->identify.sas_address);
1329 if (sas_device && (sas_device->starget == starget) && 1442 if (sas_device && (sas_device->starget == starget) &&
1330 (sas_device->id == starget->id) && 1443 (sas_device->id == starget->id) &&
1331 (sas_device->channel == starget->channel)) 1444 (sas_device->channel == starget->channel))
1332 sas_device->starget = NULL; 1445 sas_device->starget = NULL;
1333 1446
1447 if (sas_device) {
1448 /*
1449 * Corresponding get() is in _scsih_target_alloc()
1450 */
1451 sas_target_priv_data->sdev = NULL;
1452 sas_device_put(sas_device);
1453
1454 sas_device_put(sas_device);
1455 }
1334 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1456 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1335 1457
1336 out: 1458 out:
@@ -1386,7 +1508,7 @@ _scsih_slave_alloc(struct scsi_device *sdev)
1386 1508
1387 if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { 1509 if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1388 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1510 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1389 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 1511 sas_device = __mpt2sas_get_sdev_by_addr(ioc,
1390 sas_target_priv_data->sas_address); 1512 sas_target_priv_data->sas_address);
1391 if (sas_device && (sas_device->starget == NULL)) { 1513 if (sas_device && (sas_device->starget == NULL)) {
1392 sdev_printk(KERN_INFO, sdev, 1514 sdev_printk(KERN_INFO, sdev,
@@ -1394,6 +1516,10 @@ _scsih_slave_alloc(struct scsi_device *sdev)
1394 __func__, __LINE__); 1516 __func__, __LINE__);
1395 sas_device->starget = starget; 1517 sas_device->starget = starget;
1396 } 1518 }
1519
1520 if (sas_device)
1521 sas_device_put(sas_device);
1522
1397 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1523 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1398 } 1524 }
1399 1525
@@ -1428,10 +1554,13 @@ _scsih_slave_destroy(struct scsi_device *sdev)
1428 1554
1429 if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { 1555 if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1430 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1556 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1431 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 1557 sas_device = __mpt2sas_get_sdev_from_target(ioc,
1432 sas_target_priv_data->sas_address); 1558 sas_target_priv_data);
1433 if (sas_device && !sas_target_priv_data->num_luns) 1559 if (sas_device && !sas_target_priv_data->num_luns)
1434 sas_device->starget = NULL; 1560 sas_device->starget = NULL;
1561
1562 if (sas_device)
1563 sas_device_put(sas_device);
1435 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1564 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1436 } 1565 }
1437 1566
@@ -2078,7 +2207,7 @@ _scsih_slave_configure(struct scsi_device *sdev)
2078 } 2207 }
2079 2208
2080 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2209 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2081 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 2210 sas_device = __mpt2sas_get_sdev_by_addr(ioc,
2082 sas_device_priv_data->sas_target->sas_address); 2211 sas_device_priv_data->sas_target->sas_address);
2083 if (!sas_device) { 2212 if (!sas_device) {
2084 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2213 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -2112,17 +2241,18 @@ _scsih_slave_configure(struct scsi_device *sdev)
2112 (unsigned long long) sas_device->enclosure_logical_id, 2241 (unsigned long long) sas_device->enclosure_logical_id,
2113 sas_device->slot); 2242 sas_device->slot);
2114 2243
2244 sas_device_put(sas_device);
2115 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2245 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2116 if (!ssp_target) 2246 if (!ssp_target)
2117 _scsih_display_sata_capabilities(ioc, handle, sdev); 2247 _scsih_display_sata_capabilities(ioc, handle, sdev);
2118 2248
2119
2120 _scsih_change_queue_depth(sdev, qdepth); 2249 _scsih_change_queue_depth(sdev, qdepth);
2121 2250
2122 if (ssp_target) { 2251 if (ssp_target) {
2123 sas_read_port_mode_page(sdev); 2252 sas_read_port_mode_page(sdev);
2124 _scsih_enable_tlr(ioc, sdev); 2253 _scsih_enable_tlr(ioc, sdev);
2125 } 2254 }
2255
2126 return 0; 2256 return 0;
2127} 2257}
2128 2258
@@ -2509,8 +2639,7 @@ _scsih_tm_display_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
2509 device_str, (unsigned long long)priv_target->sas_address); 2639 device_str, (unsigned long long)priv_target->sas_address);
2510 } else { 2640 } else {
2511 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2641 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2512 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 2642 sas_device = __mpt2sas_get_sdev_from_target(ioc, priv_target);
2513 priv_target->sas_address);
2514 if (sas_device) { 2643 if (sas_device) {
2515 if (priv_target->flags & 2644 if (priv_target->flags &
2516 MPT_TARGET_FLAGS_RAID_COMPONENT) { 2645 MPT_TARGET_FLAGS_RAID_COMPONENT) {
@@ -2529,6 +2658,8 @@ _scsih_tm_display_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
2529 "enclosure_logical_id(0x%016llx), slot(%d)\n", 2658 "enclosure_logical_id(0x%016llx), slot(%d)\n",
2530 (unsigned long long)sas_device->enclosure_logical_id, 2659 (unsigned long long)sas_device->enclosure_logical_id,
2531 sas_device->slot); 2660 sas_device->slot);
2661
2662 sas_device_put(sas_device);
2532 } 2663 }
2533 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2664 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2534 } 2665 }
@@ -2604,12 +2735,12 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
2604{ 2735{
2605 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2736 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2606 struct MPT2SAS_DEVICE *sas_device_priv_data; 2737 struct MPT2SAS_DEVICE *sas_device_priv_data;
2607 struct _sas_device *sas_device; 2738 struct _sas_device *sas_device = NULL;
2608 unsigned long flags;
2609 u16 handle; 2739 u16 handle;
2610 int r; 2740 int r;
2611 2741
2612 struct scsi_target *starget = scmd->device->sdev_target; 2742 struct scsi_target *starget = scmd->device->sdev_target;
2743 struct MPT2SAS_TARGET *target_priv_data = starget->hostdata;
2613 2744
2614 starget_printk(KERN_INFO, starget, "attempting device reset! " 2745 starget_printk(KERN_INFO, starget, "attempting device reset! "
2615 "scmd(%p)\n", scmd); 2746 "scmd(%p)\n", scmd);
@@ -2629,12 +2760,10 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
2629 handle = 0; 2760 handle = 0;
2630 if (sas_device_priv_data->sas_target->flags & 2761 if (sas_device_priv_data->sas_target->flags &
2631 MPT_TARGET_FLAGS_RAID_COMPONENT) { 2762 MPT_TARGET_FLAGS_RAID_COMPONENT) {
2632 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2763 sas_device = mpt2sas_get_sdev_from_target(ioc,
2633 sas_device = _scsih_sas_device_find_by_handle(ioc, 2764 target_priv_data);
2634 sas_device_priv_data->sas_target->handle);
2635 if (sas_device) 2765 if (sas_device)
2636 handle = sas_device->volume_handle; 2766 handle = sas_device->volume_handle;
2637 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2638 } else 2767 } else
2639 handle = sas_device_priv_data->sas_target->handle; 2768 handle = sas_device_priv_data->sas_target->handle;
2640 2769
@@ -2651,6 +2780,10 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
2651 out: 2780 out:
2652 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", 2781 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
2653 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 2782 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
2783
2784 if (sas_device)
2785 sas_device_put(sas_device);
2786
2654 return r; 2787 return r;
2655} 2788}
2656 2789
@@ -2665,11 +2798,11 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
2665{ 2798{
2666 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2799 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2667 struct MPT2SAS_DEVICE *sas_device_priv_data; 2800 struct MPT2SAS_DEVICE *sas_device_priv_data;
2668 struct _sas_device *sas_device; 2801 struct _sas_device *sas_device = NULL;
2669 unsigned long flags;
2670 u16 handle; 2802 u16 handle;
2671 int r; 2803 int r;
2672 struct scsi_target *starget = scmd->device->sdev_target; 2804 struct scsi_target *starget = scmd->device->sdev_target;
2805 struct MPT2SAS_TARGET *target_priv_data = starget->hostdata;
2673 2806
2674 starget_printk(KERN_INFO, starget, "attempting target reset! " 2807 starget_printk(KERN_INFO, starget, "attempting target reset! "
2675 "scmd(%p)\n", scmd); 2808 "scmd(%p)\n", scmd);
@@ -2689,12 +2822,10 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
2689 handle = 0; 2822 handle = 0;
2690 if (sas_device_priv_data->sas_target->flags & 2823 if (sas_device_priv_data->sas_target->flags &
2691 MPT_TARGET_FLAGS_RAID_COMPONENT) { 2824 MPT_TARGET_FLAGS_RAID_COMPONENT) {
2692 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2825 sas_device = mpt2sas_get_sdev_from_target(ioc,
2693 sas_device = _scsih_sas_device_find_by_handle(ioc, 2826 target_priv_data);
2694 sas_device_priv_data->sas_target->handle);
2695 if (sas_device) 2827 if (sas_device)
2696 handle = sas_device->volume_handle; 2828 handle = sas_device->volume_handle;
2697 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2698 } else 2829 } else
2699 handle = sas_device_priv_data->sas_target->handle; 2830 handle = sas_device_priv_data->sas_target->handle;
2700 2831
@@ -2711,6 +2842,10 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
2711 out: 2842 out:
2712 starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", 2843 starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
2713 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 2844 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
2845
2846 if (sas_device)
2847 sas_device_put(sas_device);
2848
2714 return r; 2849 return r;
2715} 2850}
2716 2851
@@ -2768,36 +2903,39 @@ _scsih_fw_event_add(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
2768 return; 2903 return;
2769 2904
2770 spin_lock_irqsave(&ioc->fw_event_lock, flags); 2905 spin_lock_irqsave(&ioc->fw_event_lock, flags);
2906 fw_event_work_get(fw_event);
2771 list_add_tail(&fw_event->list, &ioc->fw_event_list); 2907 list_add_tail(&fw_event->list, &ioc->fw_event_list);
2772 INIT_DELAYED_WORK(&fw_event->delayed_work, _firmware_event_work); 2908 INIT_DELAYED_WORK(&fw_event->delayed_work, _firmware_event_work);
2909 fw_event_work_get(fw_event);
2773 queue_delayed_work(ioc->firmware_event_thread, 2910 queue_delayed_work(ioc->firmware_event_thread,
2774 &fw_event->delayed_work, 0); 2911 &fw_event->delayed_work, 0);
2775 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 2912 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2776} 2913}
2777 2914
2778/** 2915/**
2779 * _scsih_fw_event_free - delete fw_event 2916 * _scsih_fw_event_del_from_list - delete fw_event from the list
2780 * @ioc: per adapter object 2917 * @ioc: per adapter object
2781 * @fw_event: object describing the event 2918 * @fw_event: object describing the event
2782 * Context: This function will acquire ioc->fw_event_lock. 2919 * Context: This function will acquire ioc->fw_event_lock.
2783 * 2920 *
2784 * This removes firmware event object from link list, frees associated memory. 2921 * If the fw_event is on the fw_event_list, remove it and do a put.
2785 * 2922 *
2786 * Return nothing. 2923 * Return nothing.
2787 */ 2924 */
2788static void 2925static void
2789_scsih_fw_event_free(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work 2926_scsih_fw_event_del_from_list(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
2790 *fw_event) 2927 *fw_event)
2791{ 2928{
2792 unsigned long flags; 2929 unsigned long flags;
2793 2930
2794 spin_lock_irqsave(&ioc->fw_event_lock, flags); 2931 spin_lock_irqsave(&ioc->fw_event_lock, flags);
2795 list_del(&fw_event->list); 2932 if (!list_empty(&fw_event->list)) {
2796 kfree(fw_event); 2933 list_del_init(&fw_event->list);
2934 fw_event_work_put(fw_event);
2935 }
2797 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 2936 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2798} 2937}
2799 2938
2800
2801/** 2939/**
2802 * _scsih_error_recovery_delete_devices - remove devices not responding 2940 * _scsih_error_recovery_delete_devices - remove devices not responding
2803 * @ioc: per adapter object 2941 * @ioc: per adapter object
@@ -2812,13 +2950,14 @@ _scsih_error_recovery_delete_devices(struct MPT2SAS_ADAPTER *ioc)
2812 if (ioc->is_driver_loading) 2950 if (ioc->is_driver_loading)
2813 return; 2951 return;
2814 2952
2815 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); 2953 fw_event = alloc_fw_event_work(0);
2816 if (!fw_event) 2954 if (!fw_event)
2817 return; 2955 return;
2818 2956
2819 fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES; 2957 fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES;
2820 fw_event->ioc = ioc; 2958 fw_event->ioc = ioc;
2821 _scsih_fw_event_add(ioc, fw_event); 2959 _scsih_fw_event_add(ioc, fw_event);
2960 fw_event_work_put(fw_event);
2822} 2961}
2823 2962
2824/** 2963/**
@@ -2832,12 +2971,29 @@ mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc)
2832{ 2971{
2833 struct fw_event_work *fw_event; 2972 struct fw_event_work *fw_event;
2834 2973
2835 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); 2974 fw_event = alloc_fw_event_work(0);
2836 if (!fw_event) 2975 if (!fw_event)
2837 return; 2976 return;
2838 fw_event->event = MPT2SAS_PORT_ENABLE_COMPLETE; 2977 fw_event->event = MPT2SAS_PORT_ENABLE_COMPLETE;
2839 fw_event->ioc = ioc; 2978 fw_event->ioc = ioc;
2840 _scsih_fw_event_add(ioc, fw_event); 2979 _scsih_fw_event_add(ioc, fw_event);
2980 fw_event_work_put(fw_event);
2981}
2982
2983static struct fw_event_work *dequeue_next_fw_event(struct MPT2SAS_ADAPTER *ioc)
2984{
2985 unsigned long flags;
2986 struct fw_event_work *fw_event = NULL;
2987
2988 spin_lock_irqsave(&ioc->fw_event_lock, flags);
2989 if (!list_empty(&ioc->fw_event_list)) {
2990 fw_event = list_first_entry(&ioc->fw_event_list,
2991 struct fw_event_work, list);
2992 list_del_init(&fw_event->list);
2993 }
2994 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2995
2996 return fw_event;
2841} 2997}
2842 2998
2843/** 2999/**
@@ -2852,17 +3008,25 @@ mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc)
2852static void 3008static void
2853_scsih_fw_event_cleanup_queue(struct MPT2SAS_ADAPTER *ioc) 3009_scsih_fw_event_cleanup_queue(struct MPT2SAS_ADAPTER *ioc)
2854{ 3010{
2855 struct fw_event_work *fw_event, *next; 3011 struct fw_event_work *fw_event;
2856 3012
2857 if (list_empty(&ioc->fw_event_list) || 3013 if (list_empty(&ioc->fw_event_list) ||
2858 !ioc->firmware_event_thread || in_interrupt()) 3014 !ioc->firmware_event_thread || in_interrupt())
2859 return; 3015 return;
2860 3016
2861 list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) { 3017 while ((fw_event = dequeue_next_fw_event(ioc))) {
2862 if (cancel_delayed_work_sync(&fw_event->delayed_work)) { 3018 /*
2863 _scsih_fw_event_free(ioc, fw_event); 3019 * Wait on the fw_event to complete. If this returns 1, then
2864 continue; 3020 * the event was never executed, and we need a put for the
2865 } 3021 * reference the delayed_work had on the fw_event.
3022 *
3023 * If it did execute, we wait for it to finish, and the put will
3024 * happen from _firmware_event_work()
3025 */
3026 if (cancel_delayed_work_sync(&fw_event->delayed_work))
3027 fw_event_work_put(fw_event);
3028
3029 fw_event_work_put(fw_event);
2866 } 3030 }
2867} 3031}
2868 3032
@@ -3002,15 +3166,15 @@ _scsih_block_io_to_children_attached_to_ex(struct MPT2SAS_ADAPTER *ioc,
3002 3166
3003 list_for_each_entry(mpt2sas_port, 3167 list_for_each_entry(mpt2sas_port,
3004 &sas_expander->sas_port_list, port_list) { 3168 &sas_expander->sas_port_list, port_list) {
3005 if (mpt2sas_port->remote_identify.device_type == 3169 if (mpt2sas_port->remote_identify.device_type == SAS_END_DEVICE) {
3006 SAS_END_DEVICE) {
3007 spin_lock_irqsave(&ioc->sas_device_lock, flags); 3170 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3008 sas_device = 3171 sas_device = __mpt2sas_get_sdev_by_addr(ioc,
3009 mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 3172 mpt2sas_port->remote_identify.sas_address);
3010 mpt2sas_port->remote_identify.sas_address); 3173 if (sas_device) {
3011 if (sas_device)
3012 set_bit(sas_device->handle, 3174 set_bit(sas_device->handle,
3013 ioc->blocking_handles); 3175 ioc->blocking_handles);
3176 sas_device_put(sas_device);
3177 }
3014 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 3178 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3015 } 3179 }
3016 } 3180 }
@@ -3080,7 +3244,7 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3080{ 3244{
3081 Mpi2SCSITaskManagementRequest_t *mpi_request; 3245 Mpi2SCSITaskManagementRequest_t *mpi_request;
3082 u16 smid; 3246 u16 smid;
3083 struct _sas_device *sas_device; 3247 struct _sas_device *sas_device = NULL;
3084 struct MPT2SAS_TARGET *sas_target_priv_data = NULL; 3248 struct MPT2SAS_TARGET *sas_target_priv_data = NULL;
3085 u64 sas_address = 0; 3249 u64 sas_address = 0;
3086 unsigned long flags; 3250 unsigned long flags;
@@ -3110,7 +3274,7 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3110 return; 3274 return;
3111 3275
3112 spin_lock_irqsave(&ioc->sas_device_lock, flags); 3276 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3113 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 3277 sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle);
3114 if (sas_device && sas_device->starget && 3278 if (sas_device && sas_device->starget &&
3115 sas_device->starget->hostdata) { 3279 sas_device->starget->hostdata) {
3116 sas_target_priv_data = sas_device->starget->hostdata; 3280 sas_target_priv_data = sas_device->starget->hostdata;
@@ -3131,14 +3295,14 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3131 if (!smid) { 3295 if (!smid) {
3132 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 3296 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3133 if (!delayed_tr) 3297 if (!delayed_tr)
3134 return; 3298 goto out;
3135 INIT_LIST_HEAD(&delayed_tr->list); 3299 INIT_LIST_HEAD(&delayed_tr->list);
3136 delayed_tr->handle = handle; 3300 delayed_tr->handle = handle;
3137 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); 3301 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
3138 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT 3302 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
3139 "DELAYED:tr:handle(0x%04x), (open)\n", 3303 "DELAYED:tr:handle(0x%04x), (open)\n",
3140 ioc->name, handle)); 3304 ioc->name, handle));
3141 return; 3305 goto out;
3142 } 3306 }
3143 3307
3144 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "tr_send:handle(0x%04x), " 3308 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "tr_send:handle(0x%04x), "
@@ -3150,6 +3314,9 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3150 mpi_request->DevHandle = cpu_to_le16(handle); 3314 mpi_request->DevHandle = cpu_to_le16(handle);
3151 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 3315 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3152 mpt2sas_base_put_smid_hi_priority(ioc, smid); 3316 mpt2sas_base_put_smid_hi_priority(ioc, smid);
3317out:
3318 if (sas_device)
3319 sas_device_put(sas_device);
3153} 3320}
3154 3321
3155 3322
@@ -4068,7 +4235,6 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4068 char *desc_scsi_state = ioc->tmp_string; 4235 char *desc_scsi_state = ioc->tmp_string;
4069 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 4236 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
4070 struct _sas_device *sas_device = NULL; 4237 struct _sas_device *sas_device = NULL;
4071 unsigned long flags;
4072 struct scsi_target *starget = scmd->device->sdev_target; 4238 struct scsi_target *starget = scmd->device->sdev_target;
4073 struct MPT2SAS_TARGET *priv_target = starget->hostdata; 4239 struct MPT2SAS_TARGET *priv_target = starget->hostdata;
4074 char *device_str = NULL; 4240 char *device_str = NULL;
@@ -4200,9 +4366,7 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4200 printk(MPT2SAS_WARN_FMT "\t%s wwid(0x%016llx)\n", ioc->name, 4366 printk(MPT2SAS_WARN_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
4201 device_str, (unsigned long long)priv_target->sas_address); 4367 device_str, (unsigned long long)priv_target->sas_address);
4202 } else { 4368 } else {
4203 spin_lock_irqsave(&ioc->sas_device_lock, flags); 4369 sas_device = mpt2sas_get_sdev_from_target(ioc, priv_target);
4204 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
4205 priv_target->sas_address);
4206 if (sas_device) { 4370 if (sas_device) {
4207 printk(MPT2SAS_WARN_FMT "\tsas_address(0x%016llx), " 4371 printk(MPT2SAS_WARN_FMT "\tsas_address(0x%016llx), "
4208 "phy(%d)\n", ioc->name, sas_device->sas_address, 4372 "phy(%d)\n", ioc->name, sas_device->sas_address,
@@ -4211,8 +4375,9 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4211 "\tenclosure_logical_id(0x%016llx), slot(%d)\n", 4375 "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
4212 ioc->name, sas_device->enclosure_logical_id, 4376 ioc->name, sas_device->enclosure_logical_id,
4213 sas_device->slot); 4377 sas_device->slot);
4378
4379 sas_device_put(sas_device);
4214 } 4380 }
4215 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4216 } 4381 }
4217 4382
4218 printk(MPT2SAS_WARN_FMT "\thandle(0x%04x), ioc_status(%s)(0x%04x), " 4383 printk(MPT2SAS_WARN_FMT "\thandle(0x%04x), ioc_status(%s)(0x%04x), "
@@ -4259,7 +4424,7 @@ _scsih_turn_on_pfa_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4259 Mpi2SepRequest_t mpi_request; 4424 Mpi2SepRequest_t mpi_request;
4260 struct _sas_device *sas_device; 4425 struct _sas_device *sas_device;
4261 4426
4262 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 4427 sas_device = mpt2sas_get_sdev_by_handle(ioc, handle);
4263 if (!sas_device) 4428 if (!sas_device)
4264 return; 4429 return;
4265 4430
@@ -4274,7 +4439,7 @@ _scsih_turn_on_pfa_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4274 &mpi_request)) != 0) { 4439 &mpi_request)) != 0) {
4275 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, 4440 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
4276 __FILE__, __LINE__, __func__); 4441 __FILE__, __LINE__, __func__);
4277 return; 4442 goto out;
4278 } 4443 }
4279 sas_device->pfa_led_on = 1; 4444 sas_device->pfa_led_on = 1;
4280 4445
@@ -4284,8 +4449,10 @@ _scsih_turn_on_pfa_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4284 "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", 4449 "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
4285 ioc->name, le16_to_cpu(mpi_reply.IOCStatus), 4450 ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
4286 le32_to_cpu(mpi_reply.IOCLogInfo))); 4451 le32_to_cpu(mpi_reply.IOCLogInfo)));
4287 return; 4452 goto out;
4288 } 4453 }
4454out:
4455 sas_device_put(sas_device);
4289} 4456}
4290 4457
4291/** 4458/**
@@ -4340,13 +4507,14 @@ _scsih_send_event_to_turn_on_pfa_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4340{ 4507{
4341 struct fw_event_work *fw_event; 4508 struct fw_event_work *fw_event;
4342 4509
4343 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); 4510 fw_event = alloc_fw_event_work(0);
4344 if (!fw_event) 4511 if (!fw_event)
4345 return; 4512 return;
4346 fw_event->event = MPT2SAS_TURN_ON_PFA_LED; 4513 fw_event->event = MPT2SAS_TURN_ON_PFA_LED;
4347 fw_event->device_handle = handle; 4514 fw_event->device_handle = handle;
4348 fw_event->ioc = ioc; 4515 fw_event->ioc = ioc;
4349 _scsih_fw_event_add(ioc, fw_event); 4516 _scsih_fw_event_add(ioc, fw_event);
4517 fw_event_work_put(fw_event);
4350} 4518}
4351 4519
4352/** 4520/**
@@ -4370,19 +4538,17 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4370 4538
4371 /* only handle non-raid devices */ 4539 /* only handle non-raid devices */
4372 spin_lock_irqsave(&ioc->sas_device_lock, flags); 4540 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4373 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 4541 sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle);
4374 if (!sas_device) { 4542 if (!sas_device) {
4375 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4543 goto out_unlock;
4376 return;
4377 } 4544 }
4378 starget = sas_device->starget; 4545 starget = sas_device->starget;
4379 sas_target_priv_data = starget->hostdata; 4546 sas_target_priv_data = starget->hostdata;
4380 4547
4381 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) || 4548 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
4382 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) { 4549 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
4383 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4550 goto out_unlock;
4384 return; 4551
4385 }
4386 starget_printk(KERN_WARNING, starget, "predicted fault\n"); 4552 starget_printk(KERN_WARNING, starget, "predicted fault\n");
4387 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4553 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4388 4554
@@ -4396,7 +4562,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4396 if (!event_reply) { 4562 if (!event_reply) {
4397 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 4563 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
4398 ioc->name, __FILE__, __LINE__, __func__); 4564 ioc->name, __FILE__, __LINE__, __func__);
4399 return; 4565 goto out;
4400 } 4566 }
4401 4567
4402 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 4568 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
@@ -4413,6 +4579,14 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4413 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address); 4579 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
4414 mpt2sas_ctl_add_to_event_log(ioc, event_reply); 4580 mpt2sas_ctl_add_to_event_log(ioc, event_reply);
4415 kfree(event_reply); 4581 kfree(event_reply);
4582out:
4583 if (sas_device)
4584 sas_device_put(sas_device);
4585 return;
4586
4587out_unlock:
4588 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4589 goto out;
4416} 4590}
4417 4591
4418/** 4592/**
@@ -5148,14 +5322,13 @@ _scsih_check_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
5148 5322
5149 spin_lock_irqsave(&ioc->sas_device_lock, flags); 5323 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5150 sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 5324 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5151 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 5325 sas_device = __mpt2sas_get_sdev_by_addr(ioc,
5152 sas_address); 5326 sas_address);
5153 5327
5154 if (!sas_device) { 5328 if (!sas_device) {
5155 printk(MPT2SAS_ERR_FMT "device is not present " 5329 printk(MPT2SAS_ERR_FMT "device is not present "
5156 "handle(0x%04x), no sas_device!!!\n", ioc->name, handle); 5330 "handle(0x%04x), no sas_device!!!\n", ioc->name, handle);
5157 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5331 goto out_unlock;
5158 return;
5159 } 5332 }
5160 5333
5161 if (unlikely(sas_device->handle != handle)) { 5334 if (unlikely(sas_device->handle != handle)) {
@@ -5172,19 +5345,24 @@ _scsih_check_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
5172 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { 5345 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
5173 printk(MPT2SAS_ERR_FMT "device is not present " 5346 printk(MPT2SAS_ERR_FMT "device is not present "
5174 "handle(0x%04x), flags!!!\n", ioc->name, handle); 5347 "handle(0x%04x), flags!!!\n", ioc->name, handle);
5175 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5348 goto out_unlock;
5176 return;
5177 } 5349 }
5178 5350
5179 /* check if there were any issues with discovery */ 5351 /* check if there were any issues with discovery */
5180 if (_scsih_check_access_status(ioc, sas_address, handle, 5352 if (_scsih_check_access_status(ioc, sas_address, handle,
5181 sas_device_pg0.AccessStatus)) { 5353 sas_device_pg0.AccessStatus))
5182 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5354 goto out_unlock;
5183 return; 5355
5184 }
5185 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5356 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5186 _scsih_ublock_io_device(ioc, sas_address); 5357 _scsih_ublock_io_device(ioc, sas_address);
5358 if (sas_device)
5359 sas_device_put(sas_device);
5360 return;
5187 5361
5362out_unlock:
5363 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5364 if (sas_device)
5365 sas_device_put(sas_device);
5188} 5366}
5189 5367
5190/** 5368/**
@@ -5208,7 +5386,6 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
5208 u32 ioc_status; 5386 u32 ioc_status;
5209 __le64 sas_address; 5387 __le64 sas_address;
5210 u32 device_info; 5388 u32 device_info;
5211 unsigned long flags;
5212 5389
5213 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 5390 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5214 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 5391 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
@@ -5250,14 +5427,13 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
5250 return -1; 5427 return -1;
5251 } 5428 }
5252 5429
5253 5430 sas_device = mpt2sas_get_sdev_by_addr(ioc,
5254 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5255 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
5256 sas_address); 5431 sas_address);
5257 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5258 5432
5259 if (sas_device) 5433 if (sas_device) {
5434 sas_device_put(sas_device);
5260 return 0; 5435 return 0;
5436 }
5261 5437
5262 sas_device = kzalloc(sizeof(struct _sas_device), 5438 sas_device = kzalloc(sizeof(struct _sas_device),
5263 GFP_KERNEL); 5439 GFP_KERNEL);
@@ -5267,6 +5443,7 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
5267 return -1; 5443 return -1;
5268 } 5444 }
5269 5445
5446 kref_init(&sas_device->refcount);
5270 sas_device->handle = handle; 5447 sas_device->handle = handle;
5271 if (_scsih_get_sas_address(ioc, le16_to_cpu 5448 if (_scsih_get_sas_address(ioc, le16_to_cpu
5272 (sas_device_pg0.ParentDevHandle), 5449 (sas_device_pg0.ParentDevHandle),
@@ -5296,6 +5473,7 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
5296 else 5473 else
5297 _scsih_sas_device_add(ioc, sas_device); 5474 _scsih_sas_device_add(ioc, sas_device);
5298 5475
5476 sas_device_put(sas_device);
5299 return 0; 5477 return 0;
5300} 5478}
5301 5479
@@ -5344,7 +5522,6 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc,
5344 "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, 5522 "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
5345 sas_device->handle, (unsigned long long) 5523 sas_device->handle, (unsigned long long)
5346 sas_device->sas_address)); 5524 sas_device->sas_address));
5347 kfree(sas_device);
5348} 5525}
5349/** 5526/**
5350 * _scsih_device_remove_by_handle - removing device object by handle 5527 * _scsih_device_remove_by_handle - removing device object by handle
@@ -5363,12 +5540,17 @@ _scsih_device_remove_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
5363 return; 5540 return;
5364 5541
5365 spin_lock_irqsave(&ioc->sas_device_lock, flags); 5542 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5366 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 5543 sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle);
5367 if (sas_device) 5544 if (sas_device) {
5368 list_del(&sas_device->list); 5545 list_del_init(&sas_device->list);
5546 sas_device_put(sas_device);
5547 }
5369 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5548 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5370 if (sas_device) 5549
5550 if (sas_device) {
5371 _scsih_remove_device(ioc, sas_device); 5551 _scsih_remove_device(ioc, sas_device);
5552 sas_device_put(sas_device);
5553 }
5372} 5554}
5373 5555
5374/** 5556/**
@@ -5389,13 +5571,17 @@ mpt2sas_device_remove_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
5389 return; 5571 return;
5390 5572
5391 spin_lock_irqsave(&ioc->sas_device_lock, flags); 5573 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5392 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 5574 sas_device = __mpt2sas_get_sdev_by_addr(ioc, sas_address);
5393 sas_address); 5575 if (sas_device) {
5394 if (sas_device) 5576 list_del_init(&sas_device->list);
5395 list_del(&sas_device->list); 5577 sas_device_put(sas_device);
5578 }
5396 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5579 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5397 if (sas_device) 5580
5581 if (sas_device) {
5398 _scsih_remove_device(ioc, sas_device); 5582 _scsih_remove_device(ioc, sas_device);
5583 sas_device_put(sas_device);
5584 }
5399} 5585}
5400#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 5586#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
5401/** 5587/**
@@ -5716,26 +5902,28 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
5716 5902
5717 spin_lock_irqsave(&ioc->sas_device_lock, flags); 5903 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5718 sas_address = le64_to_cpu(event_data->SASAddress); 5904 sas_address = le64_to_cpu(event_data->SASAddress);
5719 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 5905 sas_device = __mpt2sas_get_sdev_by_addr(ioc,
5720 sas_address); 5906 sas_address);
5721 5907
5722 if (!sas_device || !sas_device->starget) { 5908 if (!sas_device || !sas_device->starget)
5723 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5909 goto out;
5724 return;
5725 }
5726 5910
5727 target_priv_data = sas_device->starget->hostdata; 5911 target_priv_data = sas_device->starget->hostdata;
5728 if (!target_priv_data) { 5912 if (!target_priv_data)
5729 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5913 goto out;
5730 return;
5731 }
5732 5914
5733 if (event_data->ReasonCode == 5915 if (event_data->ReasonCode ==
5734 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) 5916 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
5735 target_priv_data->tm_busy = 1; 5917 target_priv_data->tm_busy = 1;
5736 else 5918 else
5737 target_priv_data->tm_busy = 0; 5919 target_priv_data->tm_busy = 0;
5920
5921out:
5922 if (sas_device)
5923 sas_device_put(sas_device);
5924
5738 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5925 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5926
5739} 5927}
5740 5928
5741#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 5929#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
@@ -6123,7 +6311,7 @@ _scsih_sas_pd_expose(struct MPT2SAS_ADAPTER *ioc,
6123 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 6311 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
6124 6312
6125 spin_lock_irqsave(&ioc->sas_device_lock, flags); 6313 spin_lock_irqsave(&ioc->sas_device_lock, flags);
6126 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 6314 sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle);
6127 if (sas_device) { 6315 if (sas_device) {
6128 sas_device->volume_handle = 0; 6316 sas_device->volume_handle = 0;
6129 sas_device->volume_wwid = 0; 6317 sas_device->volume_wwid = 0;
@@ -6142,6 +6330,8 @@ _scsih_sas_pd_expose(struct MPT2SAS_ADAPTER *ioc,
6142 /* exposing raid component */ 6330 /* exposing raid component */
6143 if (starget) 6331 if (starget)
6144 starget_for_each_device(starget, NULL, _scsih_reprobe_lun); 6332 starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
6333
6334 sas_device_put(sas_device);
6145} 6335}
6146 6336
6147/** 6337/**
@@ -6170,7 +6360,7 @@ _scsih_sas_pd_hide(struct MPT2SAS_ADAPTER *ioc,
6170 &volume_wwid); 6360 &volume_wwid);
6171 6361
6172 spin_lock_irqsave(&ioc->sas_device_lock, flags); 6362 spin_lock_irqsave(&ioc->sas_device_lock, flags);
6173 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 6363 sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle);
6174 if (sas_device) { 6364 if (sas_device) {
6175 set_bit(handle, ioc->pd_handles); 6365 set_bit(handle, ioc->pd_handles);
6176 if (sas_device->starget && sas_device->starget->hostdata) { 6366 if (sas_device->starget && sas_device->starget->hostdata) {
@@ -6189,6 +6379,8 @@ _scsih_sas_pd_hide(struct MPT2SAS_ADAPTER *ioc,
6189 /* hiding raid component */ 6379 /* hiding raid component */
6190 if (starget) 6380 if (starget)
6191 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun); 6381 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
6382
6383 sas_device_put(sas_device);
6192} 6384}
6193 6385
6194/** 6386/**
@@ -6221,7 +6413,6 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc,
6221 Mpi2EventIrConfigElement_t *element) 6413 Mpi2EventIrConfigElement_t *element)
6222{ 6414{
6223 struct _sas_device *sas_device; 6415 struct _sas_device *sas_device;
6224 unsigned long flags;
6225 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 6416 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
6226 Mpi2ConfigReply_t mpi_reply; 6417 Mpi2ConfigReply_t mpi_reply;
6227 Mpi2SasDevicePage0_t sas_device_pg0; 6418 Mpi2SasDevicePage0_t sas_device_pg0;
@@ -6231,11 +6422,11 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc,
6231 6422
6232 set_bit(handle, ioc->pd_handles); 6423 set_bit(handle, ioc->pd_handles);
6233 6424
6234 spin_lock_irqsave(&ioc->sas_device_lock, flags); 6425 sas_device = mpt2sas_get_sdev_by_handle(ioc, handle);
6235 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 6426 if (sas_device) {
6236 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 6427 sas_device_put(sas_device);
6237 if (sas_device)
6238 return; 6428 return;
6429 }
6239 6430
6240 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 6431 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6241 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 6432 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
@@ -6509,7 +6700,6 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
6509 u16 handle, parent_handle; 6700 u16 handle, parent_handle;
6510 u32 state; 6701 u32 state;
6511 struct _sas_device *sas_device; 6702 struct _sas_device *sas_device;
6512 unsigned long flags;
6513 Mpi2ConfigReply_t mpi_reply; 6703 Mpi2ConfigReply_t mpi_reply;
6514 Mpi2SasDevicePage0_t sas_device_pg0; 6704 Mpi2SasDevicePage0_t sas_device_pg0;
6515 u32 ioc_status; 6705 u32 ioc_status;
@@ -6542,12 +6732,11 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
6542 if (!ioc->is_warpdrive) 6732 if (!ioc->is_warpdrive)
6543 set_bit(handle, ioc->pd_handles); 6733 set_bit(handle, ioc->pd_handles);
6544 6734
6545 spin_lock_irqsave(&ioc->sas_device_lock, flags); 6735 sas_device = mpt2sas_get_sdev_by_handle(ioc, handle);
6546 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 6736 if (sas_device) {
6547 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 6737 sas_device_put(sas_device);
6548
6549 if (sas_device)
6550 return; 6738 return;
6739 }
6551 6740
6552 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, 6741 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
6553 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 6742 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
@@ -7015,6 +7204,7 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
7015 struct _raid_device *raid_device, *raid_device_next; 7204 struct _raid_device *raid_device, *raid_device_next;
7016 struct list_head tmp_list; 7205 struct list_head tmp_list;
7017 unsigned long flags; 7206 unsigned long flags;
7207 LIST_HEAD(head);
7018 7208
7019 printk(MPT2SAS_INFO_FMT "removing unresponding devices: start\n", 7209 printk(MPT2SAS_INFO_FMT "removing unresponding devices: start\n",
7020 ioc->name); 7210 ioc->name);
@@ -7022,14 +7212,29 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
7022 /* removing unresponding end devices */ 7212 /* removing unresponding end devices */
7023 printk(MPT2SAS_INFO_FMT "removing unresponding devices: end-devices\n", 7213 printk(MPT2SAS_INFO_FMT "removing unresponding devices: end-devices\n",
7024 ioc->name); 7214 ioc->name);
7215
7216 /*
7217 * Iterate, pulling off devices marked as non-responding. We become the
7218 * owner for the reference the list had on any object we prune.
7219 */
7220 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7025 list_for_each_entry_safe(sas_device, sas_device_next, 7221 list_for_each_entry_safe(sas_device, sas_device_next,
7026 &ioc->sas_device_list, list) { 7222 &ioc->sas_device_list, list) {
7027 if (!sas_device->responding) 7223 if (!sas_device->responding)
7028 mpt2sas_device_remove_by_sas_address(ioc, 7224 list_move_tail(&sas_device->list, &head);
7029 sas_device->sas_address);
7030 else 7225 else
7031 sas_device->responding = 0; 7226 sas_device->responding = 0;
7032 } 7227 }
7228 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7229
7230 /*
7231 * Now, uninitialize and remove the unresponding devices we pruned.
7232 */
7233 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
7234 _scsih_remove_device(ioc, sas_device);
7235 list_del_init(&sas_device->list);
7236 sas_device_put(sas_device);
7237 }
7033 7238
7034 /* removing unresponding volumes */ 7239 /* removing unresponding volumes */
7035 if (ioc->ir_firmware) { 7240 if (ioc->ir_firmware) {
@@ -7179,11 +7384,11 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
7179 } 7384 }
7180 phys_disk_num = pd_pg0.PhysDiskNum; 7385 phys_disk_num = pd_pg0.PhysDiskNum;
7181 handle = le16_to_cpu(pd_pg0.DevHandle); 7386 handle = le16_to_cpu(pd_pg0.DevHandle);
7182 spin_lock_irqsave(&ioc->sas_device_lock, flags); 7387 sas_device = mpt2sas_get_sdev_by_handle(ioc, handle);
7183 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 7388 if (sas_device) {
7184 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7389 sas_device_put(sas_device);
7185 if (sas_device)
7186 continue; 7390 continue;
7391 }
7187 if (mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, 7392 if (mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
7188 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 7393 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
7189 handle) != 0) 7394 handle) != 0)
@@ -7302,12 +7507,12 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
7302 if (!(_scsih_is_end_device( 7507 if (!(_scsih_is_end_device(
7303 le32_to_cpu(sas_device_pg0.DeviceInfo)))) 7508 le32_to_cpu(sas_device_pg0.DeviceInfo))))
7304 continue; 7509 continue;
7305 spin_lock_irqsave(&ioc->sas_device_lock, flags); 7510 sas_device = mpt2sas_get_sdev_by_addr(ioc,
7306 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
7307 le64_to_cpu(sas_device_pg0.SASAddress)); 7511 le64_to_cpu(sas_device_pg0.SASAddress));
7308 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7512 if (sas_device) {
7309 if (sas_device) 7513 sas_device_put(sas_device);
7310 continue; 7514 continue;
7515 }
7311 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 7516 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
7312 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { 7517 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
7313 printk(MPT2SAS_INFO_FMT "\tBEFORE adding end device: " 7518 printk(MPT2SAS_INFO_FMT "\tBEFORE adding end device: "
@@ -7410,17 +7615,27 @@ _firmware_event_work(struct work_struct *work)
7410 struct fw_event_work, delayed_work.work); 7615 struct fw_event_work, delayed_work.work);
7411 struct MPT2SAS_ADAPTER *ioc = fw_event->ioc; 7616 struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
7412 7617
7618 _scsih_fw_event_del_from_list(ioc, fw_event);
7619
7413 /* the queue is being flushed so ignore this event */ 7620 /* the queue is being flushed so ignore this event */
7414 if (ioc->remove_host || 7621 if (ioc->remove_host || ioc->pci_error_recovery) {
7415 ioc->pci_error_recovery) { 7622 fw_event_work_put(fw_event);
7416 _scsih_fw_event_free(ioc, fw_event);
7417 return; 7623 return;
7418 } 7624 }
7419 7625
7420 switch (fw_event->event) { 7626 switch (fw_event->event) {
7421 case MPT2SAS_REMOVE_UNRESPONDING_DEVICES: 7627 case MPT2SAS_REMOVE_UNRESPONDING_DEVICES:
7422 while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery) 7628 while (scsi_host_in_recovery(ioc->shost) ||
7629 ioc->shost_recovery) {
7630 /*
7631 * If we're unloading, bail. Otherwise, this can become
7632 * an infinite loop.
7633 */
7634 if (ioc->remove_host)
7635 goto out;
7636
7423 ssleep(1); 7637 ssleep(1);
7638 }
7424 _scsih_remove_unresponding_sas_devices(ioc); 7639 _scsih_remove_unresponding_sas_devices(ioc);
7425 _scsih_scan_for_devices_after_reset(ioc); 7640 _scsih_scan_for_devices_after_reset(ioc);
7426 break; 7641 break;
@@ -7469,7 +7684,8 @@ _firmware_event_work(struct work_struct *work)
7469 _scsih_sas_ir_operation_status_event(ioc, fw_event); 7684 _scsih_sas_ir_operation_status_event(ioc, fw_event);
7470 break; 7685 break;
7471 } 7686 }
7472 _scsih_fw_event_free(ioc, fw_event); 7687out:
7688 fw_event_work_put(fw_event);
7473} 7689}
7474 7690
7475/** 7691/**
@@ -7607,7 +7823,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
7607 } 7823 }
7608 7824
7609 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; 7825 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
7610 fw_event = kzalloc(sizeof(*fw_event) + sz, GFP_ATOMIC); 7826 fw_event = alloc_fw_event_work(sz);
7611 if (!fw_event) { 7827 if (!fw_event) {
7612 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 7828 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
7613 ioc->name, __FILE__, __LINE__, __func__); 7829 ioc->name, __FILE__, __LINE__, __func__);
@@ -7620,6 +7836,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
7620 fw_event->VP_ID = mpi_reply->VP_ID; 7836 fw_event->VP_ID = mpi_reply->VP_ID;
7621 fw_event->event = event; 7837 fw_event->event = event;
7622 _scsih_fw_event_add(ioc, fw_event); 7838 _scsih_fw_event_add(ioc, fw_event);
7839 fw_event_work_put(fw_event);
7623 return; 7840 return;
7624} 7841}
7625 7842
@@ -7867,7 +8084,9 @@ _scsih_remove(struct pci_dev *pdev)
7867 sas_remove_host(shost); 8084 sas_remove_host(shost);
7868 scsi_remove_host(shost); 8085 scsi_remove_host(shost);
7869 mpt2sas_base_detach(ioc); 8086 mpt2sas_base_detach(ioc);
8087 spin_lock(&gioc_lock);
7870 list_del(&ioc->list); 8088 list_del(&ioc->list);
8089 spin_unlock(&gioc_lock);
7871 scsi_host_put(shost); 8090 scsi_host_put(shost);
7872} 8091}
7873 8092
@@ -7966,6 +8185,48 @@ _scsih_probe_raid(struct MPT2SAS_ADAPTER *ioc)
7966 } 8185 }
7967} 8186}
7968 8187
8188static struct _sas_device *get_next_sas_device(struct MPT2SAS_ADAPTER *ioc)
8189{
8190 struct _sas_device *sas_device = NULL;
8191 unsigned long flags;
8192
8193 spin_lock_irqsave(&ioc->sas_device_lock, flags);
8194 if (!list_empty(&ioc->sas_device_init_list)) {
8195 sas_device = list_first_entry(&ioc->sas_device_init_list,
8196 struct _sas_device, list);
8197 sas_device_get(sas_device);
8198 }
8199 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8200
8201 return sas_device;
8202}
8203
8204static void sas_device_make_active(struct MPT2SAS_ADAPTER *ioc,
8205 struct _sas_device *sas_device)
8206{
8207 unsigned long flags;
8208
8209 spin_lock_irqsave(&ioc->sas_device_lock, flags);
8210
8211 /*
8212 * Since we dropped the lock during the call to port_add(), we need to
8213 * be careful here that somebody else didn't move or delete this item
8214 * while we were busy with other things.
8215 *
8216 * If it was on the list, we need a put() for the reference the list
8217 * had. Either way, we need a get() for the destination list.
8218 */
8219 if (!list_empty(&sas_device->list)) {
8220 list_del_init(&sas_device->list);
8221 sas_device_put(sas_device);
8222 }
8223
8224 sas_device_get(sas_device);
8225 list_add_tail(&sas_device->list, &ioc->sas_device_list);
8226
8227 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8228}
8229
7969/** 8230/**
7970 * _scsih_probe_sas - reporting sas devices to sas transport 8231 * _scsih_probe_sas - reporting sas devices to sas transport
7971 * @ioc: per adapter object 8232 * @ioc: per adapter object
@@ -7975,34 +8236,30 @@ _scsih_probe_raid(struct MPT2SAS_ADAPTER *ioc)
7975static void 8236static void
7976_scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc) 8237_scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
7977{ 8238{
7978 struct _sas_device *sas_device, *next; 8239 struct _sas_device *sas_device;
7979 unsigned long flags;
7980
7981 /* SAS Device List */
7982 list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list,
7983 list) {
7984 8240
7985 if (ioc->hide_drives) 8241 if (ioc->hide_drives)
7986 continue; 8242 return;
7987 8243
8244 while ((sas_device = get_next_sas_device(ioc))) {
7988 if (!mpt2sas_transport_port_add(ioc, sas_device->handle, 8245 if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
7989 sas_device->sas_address_parent)) { 8246 sas_device->sas_address_parent)) {
7990 list_del(&sas_device->list); 8247 _scsih_sas_device_remove(ioc, sas_device);
7991 kfree(sas_device); 8248 sas_device_put(sas_device);
7992 continue; 8249 continue;
7993 } else if (!sas_device->starget) { 8250 } else if (!sas_device->starget) {
7994 if (!ioc->is_driver_loading) { 8251 if (!ioc->is_driver_loading) {
7995 mpt2sas_transport_port_remove(ioc, 8252 mpt2sas_transport_port_remove(ioc,
7996 sas_device->sas_address, 8253 sas_device->sas_address,
7997 sas_device->sas_address_parent); 8254 sas_device->sas_address_parent);
7998 list_del(&sas_device->list); 8255 _scsih_sas_device_remove(ioc, sas_device);
7999 kfree(sas_device); 8256 sas_device_put(sas_device);
8000 continue; 8257 continue;
8001 } 8258 }
8002 } 8259 }
8003 spin_lock_irqsave(&ioc->sas_device_lock, flags); 8260
8004 list_move_tail(&sas_device->list, &ioc->sas_device_list); 8261 sas_device_make_active(ioc, sas_device);
8005 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 8262 sas_device_put(sas_device);
8006 } 8263 }
8007} 8264}
8008 8265
@@ -8142,7 +8399,9 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8142 ioc = shost_priv(shost); 8399 ioc = shost_priv(shost);
8143 memset(ioc, 0, sizeof(struct MPT2SAS_ADAPTER)); 8400 memset(ioc, 0, sizeof(struct MPT2SAS_ADAPTER));
8144 INIT_LIST_HEAD(&ioc->list); 8401 INIT_LIST_HEAD(&ioc->list);
8402 spin_lock(&gioc_lock);
8145 list_add_tail(&ioc->list, &mpt2sas_ioc_list); 8403 list_add_tail(&ioc->list, &mpt2sas_ioc_list);
8404 spin_unlock(&gioc_lock);
8146 ioc->shost = shost; 8405 ioc->shost = shost;
8147 ioc->id = mpt_ids++; 8406 ioc->id = mpt_ids++;
8148 sprintf(ioc->name, "%s%d", MPT2SAS_DRIVER_NAME, ioc->id); 8407 sprintf(ioc->name, "%s%d", MPT2SAS_DRIVER_NAME, ioc->id);
@@ -8167,6 +8426,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8167 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds; 8426 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
8168 /* misc semaphores and spin locks */ 8427 /* misc semaphores and spin locks */
8169 mutex_init(&ioc->reset_in_progress_mutex); 8428 mutex_init(&ioc->reset_in_progress_mutex);
8429 /* initializing pci_access_mutex lock */
8430 mutex_init(&ioc->pci_access_mutex);
8170 spin_lock_init(&ioc->ioc_reset_in_progress_lock); 8431 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
8171 spin_lock_init(&ioc->scsi_lookup_lock); 8432 spin_lock_init(&ioc->scsi_lookup_lock);
8172 spin_lock_init(&ioc->sas_device_lock); 8433 spin_lock_init(&ioc->sas_device_lock);
@@ -8269,7 +8530,9 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8269 out_attach_fail: 8530 out_attach_fail:
8270 destroy_workqueue(ioc->firmware_event_thread); 8531 destroy_workqueue(ioc->firmware_event_thread);
8271 out_thread_fail: 8532 out_thread_fail:
8533 spin_lock(&gioc_lock);
8272 list_del(&ioc->list); 8534 list_del(&ioc->list);
8535 spin_unlock(&gioc_lock);
8273 scsi_host_put(shost); 8536 scsi_host_put(shost);
8274 return rv; 8537 return rv;
8275} 8538}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index ff2500ab9ba4..af868009395d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1323,15 +1323,17 @@ _transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
1323 int rc; 1323 int rc;
1324 1324
1325 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1325 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1326 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 1326 sas_device = __mpt2sas_get_sdev_by_addr(ioc,
1327 rphy->identify.sas_address); 1327 rphy->identify.sas_address);
1328 if (sas_device) { 1328 if (sas_device) {
1329 *identifier = sas_device->enclosure_logical_id; 1329 *identifier = sas_device->enclosure_logical_id;
1330 rc = 0; 1330 rc = 0;
1331 sas_device_put(sas_device);
1331 } else { 1332 } else {
1332 *identifier = 0; 1333 *identifier = 0;
1333 rc = -ENXIO; 1334 rc = -ENXIO;
1334 } 1335 }
1336
1335 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1337 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1336 return rc; 1338 return rc;
1337} 1339}
@@ -1351,12 +1353,14 @@ _transport_get_bay_identifier(struct sas_rphy *rphy)
1351 int rc; 1353 int rc;
1352 1354
1353 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1355 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1354 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 1356 sas_device = __mpt2sas_get_sdev_by_addr(ioc,
1355 rphy->identify.sas_address); 1357 rphy->identify.sas_address);
1356 if (sas_device) 1358 if (sas_device) {
1357 rc = sas_device->slot; 1359 rc = sas_device->slot;
1358 else 1360 sas_device_put(sas_device);
1361 } else {
1359 rc = -ENXIO; 1362 rc = -ENXIO;
1363 }
1360 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1364 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1361 return rc; 1365 return rc;
1362} 1366}
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index c34c1157907b..ec27ad2d186f 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
8 * scatter/gather formats. 8 * scatter/gather formats.
9 * Creation Date: June 21, 2006 9 * Creation Date: June 21, 2006
10 * 10 *
11 * mpi2.h Version: 02.00.31 11 * mpi2.h Version: 02.00.35
12 * 12 *
13 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 13 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
14 * prefix are for use only on MPI v2.5 products, and must not be used 14 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -88,6 +88,10 @@
88 * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET. 88 * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET.
89 * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT. 89 * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT.
90 * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT. 90 * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT.
91 * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT.
92 * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT.
93 * 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT
94 * 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT.
91 * -------------------------------------------------------------------------- 95 * --------------------------------------------------------------------------
92 */ 96 */
93 97
@@ -121,7 +125,7 @@
121#define MPI2_VERSION_02_05 (0x0205) 125#define MPI2_VERSION_02_05 (0x0205)
122 126
123/*Unit and Dev versioning for this MPI header set */ 127/*Unit and Dev versioning for this MPI header set */
124#define MPI2_HEADER_VERSION_UNIT (0x1F) 128#define MPI2_HEADER_VERSION_UNIT (0x23)
125#define MPI2_HEADER_VERSION_DEV (0x00) 129#define MPI2_HEADER_VERSION_DEV (0x00)
126#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) 130#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
127#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) 131#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index e261a3153bb3..581fdb375db5 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
6 * Title: MPI Configuration messages and pages 6 * Title: MPI Configuration messages and pages
7 * Creation Date: November 10, 2006 7 * Creation Date: November 10, 2006
8 * 8 *
9 * mpi2_cnfg.h Version: 02.00.26 9 * mpi2_cnfg.h Version: 02.00.29
10 * 10 *
11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
12 * prefix are for use only on MPI v2.5 products, and must not be used 12 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -165,6 +165,20 @@
165 * match the specification. 165 * match the specification.
166 * 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for 166 * 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for
167 * future use. 167 * future use.
168 * 12-05-13 02.00.27 Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for
169 * MPI2_CONFIG_PAGE_MAN_7.
170 * Added EnclosureLevel and ConnectorName fields to
171 * MPI2_CONFIG_PAGE_SAS_DEV_0.
172 * Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for
173 * MPI2_CONFIG_PAGE_SAS_DEV_0.
174 * Added EnclosureLevel field to
175 * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
176 * Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for
177 * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
178 * 01-08-14 02.00.28 Added more defines for the BiosOptions field of
179 * MPI2_CONFIG_PAGE_BIOS_1.
180 * 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and
181 * more defines for the BiosOptions field..
168 * -------------------------------------------------------------------------- 182 * --------------------------------------------------------------------------
169 */ 183 */
170 184
@@ -724,6 +738,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7 {
724#define MPI2_MANUFACTURING7_PAGEVERSION (0x01) 738#define MPI2_MANUFACTURING7_PAGEVERSION (0x01)
725 739
726/*defines for the Flags field */ 740/*defines for the Flags field */
741#define MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL (0x00000008)
727#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002) 742#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002)
728#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) 743#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
729 744
@@ -1311,7 +1326,9 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 {
1311 MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ 1326 MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
1312 U32 BiosOptions; /*0x04 */ 1327 U32 BiosOptions; /*0x04 */
1313 U32 IOCSettings; /*0x08 */ 1328 U32 IOCSettings; /*0x08 */
1314 U32 Reserved1; /*0x0C */ 1329 U8 SSUTimeout; /*0x0C */
1330 U8 Reserved1; /*0x0D */
1331 U16 Reserved2; /*0x0E */
1315 U32 DeviceSettings; /*0x10 */ 1332 U32 DeviceSettings; /*0x10 */
1316 U16 NumberOfDevices; /*0x14 */ 1333 U16 NumberOfDevices; /*0x14 */
1317 U16 UEFIVersion; /*0x16 */ 1334 U16 UEFIVersion; /*0x16 */
@@ -1323,9 +1340,24 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 {
1323 *PTR_MPI2_CONFIG_PAGE_BIOS_1, 1340 *PTR_MPI2_CONFIG_PAGE_BIOS_1,
1324 Mpi2BiosPage1_t, *pMpi2BiosPage1_t; 1341 Mpi2BiosPage1_t, *pMpi2BiosPage1_t;
1325 1342
1326#define MPI2_BIOSPAGE1_PAGEVERSION (0x05) 1343#define MPI2_BIOSPAGE1_PAGEVERSION (0x07)
1327 1344
1328/*values for BIOS Page 1 BiosOptions field */ 1345/*values for BIOS Page 1 BiosOptions field */
1346#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800)
1347#define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL (0x00000000)
1348#define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE (0x00000800)
1349#define MPI2_BIOSPAGE1_OPTIONS_PNS_LWWID (0x00001000)
1350#define MPI2_BIOSPAGE1_OPTIONS_PNS_PSENS (0x00001800)
1351#define MPI2_BIOSPAGE1_OPTIONS_PNS_ESPHY (0x00002000)
1352
1353#define MPI2_BIOSPAGE1_OPTIONS_X86_DISABLE_BIOS (0x00000400)
1354
1355#define MPI2_BIOSPAGE1_OPTIONS_MASK_REGISTRATION_UEFI_BSD (0x00000300)
1356#define MPI2_BIOSPAGE1_OPTIONS_USE_BIT0_REGISTRATION_UEFI_BSD (0x00000000)
1357#define MPI2_BIOSPAGE1_OPTIONS_FULL_REGISTRATION_UEFI_BSD (0x00000100)
1358#define MPI2_BIOSPAGE1_OPTIONS_ADAPTER_REGISTRATION_UEFI_BSD (0x00000200)
1359#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_REGISTRATION_UEFI_BSD (0x00000300)
1360
1329#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0) 1361#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0)
1330#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000) 1362#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000)
1331 1363
@@ -2633,9 +2665,9 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 {
2633 U8 2665 U8
2634 ControlGroup; /*0x2E */ 2666 ControlGroup; /*0x2E */
2635 U8 2667 U8
2636 Reserved1; /*0x2F */ 2668 EnclosureLevel; /*0x2F */
2637 U32 2669 U32
2638 Reserved2; /*0x30 */ 2670 ConnectorName[4]; /*0x30 */
2639 U32 2671 U32
2640 Reserved3; /*0x34 */ 2672 Reserved3; /*0x34 */
2641} MPI2_CONFIG_PAGE_SAS_DEV_0, 2673} MPI2_CONFIG_PAGE_SAS_DEV_0,
@@ -2643,7 +2675,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 {
2643 Mpi2SasDevicePage0_t, 2675 Mpi2SasDevicePage0_t,
2644 *pMpi2SasDevicePage0_t; 2676 *pMpi2SasDevicePage0_t;
2645 2677
2646#define MPI2_SASDEVICE0_PAGEVERSION (0x08) 2678#define MPI2_SASDEVICE0_PAGEVERSION (0x09)
2647 2679
2648/*values for SAS Device Page 0 AccessStatus field */ 2680/*values for SAS Device Page 0 AccessStatus field */
2649#define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00) 2681#define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00)
@@ -2683,6 +2715,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 {
2683#define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020) 2715#define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020)
2684#define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010) 2716#define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010)
2685#define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008) 2717#define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008)
2718#define MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002)
2686#define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) 2719#define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
2687 2720
2688 2721
@@ -3019,8 +3052,10 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
3019 NumSlots; /*0x18 */ 3052 NumSlots; /*0x18 */
3020 U16 3053 U16
3021 StartSlot; /*0x1A */ 3054 StartSlot; /*0x1A */
3022 U16 3055 U8
3023 Reserved2; /*0x1C */ 3056 Reserved2; /*0x1C */
3057 U8
3058 EnclosureLevel; /*0x1D */
3024 U16 3059 U16
3025 SEPDevHandle; /*0x1E */ 3060 SEPDevHandle; /*0x1E */
3026 U32 3061 U32
@@ -3031,9 +3066,10 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
3031 *PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, 3066 *PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
3032 Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t; 3067 Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t;
3033 3068
3034#define MPI2_SASENCLOSURE0_PAGEVERSION (0x03) 3069#define MPI2_SASENCLOSURE0_PAGEVERSION (0x04)
3035 3070
3036/*values for SAS Enclosure Page 0 Flags field */ 3071/*values for SAS Enclosure Page 0 Flags field */
3072#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010)
3037#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F) 3073#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F)
3038#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000) 3074#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
3039#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001) 3075#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index 490830957806..d7598cc4bb8e 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
7 * Creation Date: October 11, 2006 7 * Creation Date: October 11, 2006
8 * 8 *
9 * mpi2_ioc.h Version: 02.00.23 9 * mpi2_ioc.h Version: 02.00.24
10 * 10 *
11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
12 * prefix are for use only on MPI v2.5 products, and must not be used 12 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -132,6 +132,7 @@
132 * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE. 132 * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE.
133 * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY. 133 * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY.
134 * Added Encrypted Hash Extended Image. 134 * Added Encrypted Hash Extended Image.
135 * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS.
135 * -------------------------------------------------------------------------- 136 * --------------------------------------------------------------------------
136 */ 137 */
137 138
@@ -1598,6 +1599,7 @@ Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t;
1598/* values for HashImageType */ 1599/* values for HashImageType */
1599#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00) 1600#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00)
1600#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01) 1601#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01)
1602#define MPI25_HASH_IMAGE_TYPE_BIOS (0x02)
1601 1603
1602/* values for HashAlgorithm */ 1604/* values for HashAlgorithm */
1603#define MPI25_HASH_ALGORITHM_UNUSED (0x00) 1605#define MPI25_HASH_ALGORITHM_UNUSED (0x00)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
index 904910d8a737..1629e5bce7e1 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -6,7 +6,7 @@
6 * Title: MPI diagnostic tool structures and definitions 6 * Title: MPI diagnostic tool structures and definitions
7 * Creation Date: March 26, 2007 7 * Creation Date: March 26, 2007
8 * 8 *
9 * mpi2_tool.h Version: 02.00.11 9 * mpi2_tool.h Version: 02.00.12
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -33,6 +33,7 @@
33 * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that 33 * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that
34 * it uses MPI Chain SGE as well as MPI Simple SGE. 34 * it uses MPI Chain SGE as well as MPI Simple SGE.
35 * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. 35 * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
36 * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC.
36 * -------------------------------------------------------------------------- 37 * --------------------------------------------------------------------------
37 */ 38 */
38 39
@@ -100,6 +101,7 @@ typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST {
100#define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000) 101#define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000)
101#define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000) 102#define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000)
102#define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000) 103#define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000)
104#define MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC (0x04000000)
103#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000) 105#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000)
104#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000) 106#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000)
105#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004) 107#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 43f87e904b98..d4f1dcdb8361 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -83,10 +83,10 @@ static int msix_disable = -1;
83module_param(msix_disable, int, 0); 83module_param(msix_disable, int, 0);
84MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); 84MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
85 85
86static int max_msix_vectors = 8; 86static int max_msix_vectors = -1;
87module_param(max_msix_vectors, int, 0); 87module_param(max_msix_vectors, int, 0);
88MODULE_PARM_DESC(max_msix_vectors, 88MODULE_PARM_DESC(max_msix_vectors,
89 " max msix vectors - (default=8)"); 89 " max msix vectors");
90 90
91static int mpt3sas_fwfault_debug; 91static int mpt3sas_fwfault_debug;
92MODULE_PARM_DESC(mpt3sas_fwfault_debug, 92MODULE_PARM_DESC(mpt3sas_fwfault_debug,
@@ -1009,8 +1009,30 @@ _base_interrupt(int irq, void *bus_id)
1009 } 1009 }
1010 1010
1011 wmb(); 1011 wmb();
1012 writel(reply_q->reply_post_host_index | (msix_index << 1012
1013 MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex); 1013 /* Update Reply Post Host Index.
1014 * For those HBA's which support combined reply queue feature
1015 * 1. Get the correct Supplemental Reply Post Host Index Register.
1016 * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1017 * Index Register address bank i.e replyPostRegisterIndex[],
1018 * 2. Then update this register with new reply host index value
1019 * in ReplyPostIndex field and the MSIxIndex field with
1020 * msix_index value reduced to a value between 0 and 7,
1021 * using a modulo 8 operation. Since each Supplemental Reply Post
1022 * Host Index Register supports 8 MSI-X vectors.
1023 *
1024 * For other HBA's just update the Reply Post Host Index register with
1025 * new reply host index value in ReplyPostIndex Field and msix_index
1026 * value in MSIxIndex field.
1027 */
1028 if (ioc->msix96_vector)
1029 writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
1030 MPI2_RPHI_MSIX_INDEX_SHIFT),
1031 ioc->replyPostRegisterIndex[msix_index/8]);
1032 else
1033 writel(reply_q->reply_post_host_index | (msix_index <<
1034 MPI2_RPHI_MSIX_INDEX_SHIFT),
1035 &ioc->chip->ReplyPostHostIndex);
1014 atomic_dec(&reply_q->busy); 1036 atomic_dec(&reply_q->busy);
1015 return IRQ_HANDLED; 1037 return IRQ_HANDLED;
1016} 1038}
@@ -1338,7 +1360,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
1338 1360
1339 sg_scmd = scsi_sglist(scmd); 1361 sg_scmd = scsi_sglist(scmd);
1340 sges_left = scsi_dma_map(scmd); 1362 sges_left = scsi_dma_map(scmd);
1341 if (!sges_left) { 1363 if (sges_left < 0) {
1342 sdev_printk(KERN_ERR, scmd->device, 1364 sdev_printk(KERN_ERR, scmd->device,
1343 "pci_map_sg failed: request for %d bytes!\n", 1365 "pci_map_sg failed: request for %d bytes!\n",
1344 scsi_bufflen(scmd)); 1366 scsi_bufflen(scmd));
@@ -1407,7 +1429,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
1407 fill_in_last_segment: 1429 fill_in_last_segment:
1408 1430
1409 /* fill the last segment */ 1431 /* fill the last segment */
1410 while (sges_left) { 1432 while (sges_left > 0) {
1411 if (sges_left == 1) 1433 if (sges_left == 1)
1412 _base_add_sg_single_ieee(sg_local, 1434 _base_add_sg_single_ieee(sg_local,
1413 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd), 1435 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
@@ -1560,8 +1582,6 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1560 1582
1561 pci_read_config_word(ioc->pdev, base + 2, &message_control); 1583 pci_read_config_word(ioc->pdev, base + 2, &message_control);
1562 ioc->msix_vector_count = (message_control & 0x3FF) + 1; 1584 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1563 if (ioc->msix_vector_count > 8)
1564 ioc->msix_vector_count = 8;
1565 dinitprintk(ioc, pr_info(MPT3SAS_FMT 1585 dinitprintk(ioc, pr_info(MPT3SAS_FMT
1566 "msix is supported, vector_count(%d)\n", 1586 "msix is supported, vector_count(%d)\n",
1567 ioc->name, ioc->msix_vector_count)); 1587 ioc->name, ioc->msix_vector_count));
@@ -1793,6 +1813,36 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1793} 1813}
1794 1814
1795/** 1815/**
1816 * mpt3sas_base_unmap_resources - free controller resources
1817 * @ioc: per adapter object
1818 */
1819void
1820mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
1821{
1822 struct pci_dev *pdev = ioc->pdev;
1823
1824 dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n",
1825 ioc->name, __func__));
1826
1827 _base_free_irq(ioc);
1828 _base_disable_msix(ioc);
1829
1830 if (ioc->msix96_vector)
1831 kfree(ioc->replyPostRegisterIndex);
1832
1833 if (ioc->chip_phys) {
1834 iounmap(ioc->chip);
1835 ioc->chip_phys = 0;
1836 }
1837
1838 if (pci_is_enabled(pdev)) {
1839 pci_release_selected_regions(ioc->pdev, ioc->bars);
1840 pci_disable_pcie_error_reporting(pdev);
1841 pci_disable_device(pdev);
1842 }
1843}
1844
1845/**
1796 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap) 1846 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
1797 * @ioc: per adapter object 1847 * @ioc: per adapter object
1798 * 1848 *
@@ -1882,6 +1932,36 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
1882 if (r) 1932 if (r)
1883 goto out_fail; 1933 goto out_fail;
1884 1934
1935 /* Use the Combined reply queue feature only for SAS3 C0 & higher
1936 * revision HBAs and also only when reply queue count is greater than 8
1937 */
1938 if (ioc->msix96_vector && ioc->reply_queue_count > 8) {
1939 /* Determine the Supplemental Reply Post Host Index Registers
1940 * Addresse. Supplemental Reply Post Host Index Registers
1941 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
1942 * each register is at offset bytes of
1943 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
1944 */
1945 ioc->replyPostRegisterIndex = kcalloc(
1946 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT,
1947 sizeof(resource_size_t *), GFP_KERNEL);
1948 if (!ioc->replyPostRegisterIndex) {
1949 dfailprintk(ioc, printk(MPT3SAS_FMT
1950 "allocation for reply Post Register Index failed!!!\n",
1951 ioc->name));
1952 r = -ENOMEM;
1953 goto out_fail;
1954 }
1955
1956 for (i = 0; i < MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT; i++) {
1957 ioc->replyPostRegisterIndex[i] = (resource_size_t *)
1958 ((u8 *)&ioc->chip->Doorbell +
1959 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
1960 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
1961 }
1962 } else
1963 ioc->msix96_vector = 0;
1964
1885 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) 1965 list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
1886 pr_info(MPT3SAS_FMT "%s: IRQ %d\n", 1966 pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
1887 reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : 1967 reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
@@ -1897,12 +1977,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
1897 return 0; 1977 return 0;
1898 1978
1899 out_fail: 1979 out_fail:
1900 if (ioc->chip_phys) 1980 mpt3sas_base_unmap_resources(ioc);
1901 iounmap(ioc->chip);
1902 ioc->chip_phys = 0;
1903 pci_release_selected_regions(ioc->pdev, ioc->bars);
1904 pci_disable_pcie_error_reporting(pdev);
1905 pci_disable_device(pdev);
1906 return r; 1981 return r;
1907} 1982}
1908 1983
@@ -2292,6 +2367,99 @@ _base_display_intel_branding(struct MPT3SAS_ADAPTER *ioc)
2292 2367
2293 2368
2294/** 2369/**
2370 * _base_display_dell_branding - Display branding string
2371 * @ioc: per adapter object
2372 *
2373 * Return nothing.
2374 */
2375static void
2376_base_display_dell_branding(struct MPT3SAS_ADAPTER *ioc)
2377{
2378 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_DELL)
2379 return;
2380
2381 switch (ioc->pdev->device) {
2382 case MPI25_MFGPAGE_DEVID_SAS3008:
2383 switch (ioc->pdev->subsystem_device) {
2384 case MPT3SAS_DELL_12G_HBA_SSDID:
2385 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2386 MPT3SAS_DELL_12G_HBA_BRANDING);
2387 break;
2388 default:
2389 pr_info(MPT3SAS_FMT
2390 "Dell 12Gbps HBA: Subsystem ID: 0x%X\n", ioc->name,
2391 ioc->pdev->subsystem_device);
2392 break;
2393 }
2394 break;
2395 default:
2396 pr_info(MPT3SAS_FMT
2397 "Dell 12Gbps HBA: Subsystem ID: 0x%X\n", ioc->name,
2398 ioc->pdev->subsystem_device);
2399 break;
2400 }
2401}
2402
2403/**
2404 * _base_display_cisco_branding - Display branding string
2405 * @ioc: per adapter object
2406 *
2407 * Return nothing.
2408 */
2409static void
2410_base_display_cisco_branding(struct MPT3SAS_ADAPTER *ioc)
2411{
2412 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_CISCO)
2413 return;
2414
2415 switch (ioc->pdev->device) {
2416 case MPI25_MFGPAGE_DEVID_SAS3008:
2417 switch (ioc->pdev->subsystem_device) {
2418 case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
2419 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2420 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
2421 break;
2422 case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
2423 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2424 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
2425 break;
2426 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
2427 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2428 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
2429 break;
2430 default:
2431 pr_info(MPT3SAS_FMT
2432 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
2433 ioc->name, ioc->pdev->subsystem_device);
2434 break;
2435 }
2436 break;
2437 case MPI25_MFGPAGE_DEVID_SAS3108_1:
2438 switch (ioc->pdev->subsystem_device) {
2439 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
2440 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2441 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
2442 break;
2443 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
2444 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2445 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
2446 break;
2447 default:
2448 pr_info(MPT3SAS_FMT
2449 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
2450 ioc->name, ioc->pdev->subsystem_device);
2451 break;
2452 }
2453 break;
2454 default:
2455 pr_info(MPT3SAS_FMT
2456 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
2457 ioc->name, ioc->pdev->subsystem_device);
2458 break;
2459 }
2460}
2461
2462/**
2295 * _base_display_ioc_capabilities - Disply IOC's capabilities. 2463 * _base_display_ioc_capabilities - Disply IOC's capabilities.
2296 * @ioc: per adapter object 2464 * @ioc: per adapter object
2297 * 2465 *
@@ -2321,6 +2489,8 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
2321 bios_version & 0x000000FF); 2489 bios_version & 0x000000FF);
2322 2490
2323 _base_display_intel_branding(ioc); 2491 _base_display_intel_branding(ioc);
2492 _base_display_dell_branding(ioc);
2493 _base_display_cisco_branding(ioc);
2324 2494
2325 pr_info(MPT3SAS_FMT "Protocol=(", ioc->name); 2495 pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
2326 2496
@@ -3139,6 +3309,9 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
3139 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. 3309 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
3140 */ 3310 */
3141static int 3311static int
3312_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
3313
3314static int
3142_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout, 3315_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
3143 int sleep_flag) 3316 int sleep_flag)
3144{ 3317{
@@ -3681,6 +3854,64 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
3681} 3854}
3682 3855
3683/** 3856/**
3857 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
3858 * @ioc: per adapter object
3859 * @timeout:
3860 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3861 *
3862 * Returns 0 for success, non-zero for failure.
3863 */
3864static int
3865_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout,
3866 int sleep_flag)
3867{
3868 u32 ioc_state;
3869 int rc;
3870
3871 dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name,
3872 __func__));
3873
3874 if (ioc->pci_error_recovery) {
3875 dfailprintk(ioc, printk(MPT3SAS_FMT
3876 "%s: host in pci error recovery\n", ioc->name, __func__));
3877 return -EFAULT;
3878 }
3879
3880 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3881 dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
3882 ioc->name, __func__, ioc_state));
3883
3884 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
3885 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
3886 return 0;
3887
3888 if (ioc_state & MPI2_DOORBELL_USED) {
3889 dhsprintk(ioc, printk(MPT3SAS_FMT
3890 "unexpected doorbell active!\n", ioc->name));
3891 goto issue_diag_reset;
3892 }
3893
3894 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3895 mpt3sas_base_fault_info(ioc, ioc_state &
3896 MPI2_DOORBELL_DATA_MASK);
3897 goto issue_diag_reset;
3898 }
3899
3900 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
3901 timeout, sleep_flag);
3902 if (ioc_state) {
3903 dfailprintk(ioc, printk(MPT3SAS_FMT
3904 "%s: failed going to ready state (ioc_state=0x%x)\n",
3905 ioc->name, __func__, ioc_state));
3906 return -EFAULT;
3907 }
3908
3909 issue_diag_reset:
3910 rc = _base_diag_reset(ioc, sleep_flag);
3911 return rc;
3912}
3913
3914/**
3684 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc 3915 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
3685 * @ioc: per adapter object 3916 * @ioc: per adapter object
3686 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3917 * @sleep_flag: CAN_SLEEP or NO_SLEEP
@@ -3698,6 +3929,13 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3698 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3929 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3699 __func__)); 3930 __func__));
3700 3931
3932 r = _base_wait_for_iocstate(ioc, 10, sleep_flag);
3933 if (r) {
3934 dfailprintk(ioc, printk(MPT3SAS_FMT
3935 "%s: failed getting to correct state\n",
3936 ioc->name, __func__));
3937 return r;
3938 }
3701 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); 3939 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
3702 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t); 3940 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
3703 memset(&mpi_request, 0, mpi_request_sz); 3941 memset(&mpi_request, 0, mpi_request_sz);
@@ -3783,7 +4021,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3783 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER; 4021 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
3784 mpi_request.VF_ID = 0; /* TODO */ 4022 mpi_request.VF_ID = 0; /* TODO */
3785 mpi_request.VP_ID = 0; 4023 mpi_request.VP_ID = 0;
3786 mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION); 4024 mpi_request.MsgVersion = cpu_to_le16(MPI25_VERSION);
3787 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 4025 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
3788 4026
3789 if (_base_is_controller_msix_enabled(ioc)) 4027 if (_base_is_controller_msix_enabled(ioc))
@@ -4524,8 +4762,15 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4524 4762
4525 /* initialize reply post host index */ 4763 /* initialize reply post host index */
4526 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 4764 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4527 writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT, 4765 if (ioc->msix96_vector)
4528 &ioc->chip->ReplyPostHostIndex); 4766 writel((reply_q->msix_index & 7)<<
4767 MPI2_RPHI_MSIX_INDEX_SHIFT,
4768 ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
4769 else
4770 writel(reply_q->msix_index <<
4771 MPI2_RPHI_MSIX_INDEX_SHIFT,
4772 &ioc->chip->ReplyPostHostIndex);
4773
4529 if (!_base_is_controller_msix_enabled(ioc)) 4774 if (!_base_is_controller_msix_enabled(ioc))
4530 goto skip_init_reply_post_host_index; 4775 goto skip_init_reply_post_host_index;
4531 } 4776 }
@@ -4564,8 +4809,6 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4564void 4809void
4565mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) 4810mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
4566{ 4811{
4567 struct pci_dev *pdev = ioc->pdev;
4568
4569 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4812 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4570 __func__)); 4813 __func__));
4571 4814
@@ -4576,18 +4819,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
4576 ioc->shost_recovery = 0; 4819 ioc->shost_recovery = 0;
4577 } 4820 }
4578 4821
4579 _base_free_irq(ioc); 4822 mpt3sas_base_unmap_resources(ioc);
4580 _base_disable_msix(ioc);
4581
4582 if (ioc->chip_phys && ioc->chip)
4583 iounmap(ioc->chip);
4584 ioc->chip_phys = 0;
4585
4586 if (pci_is_enabled(pdev)) {
4587 pci_release_selected_regions(ioc->pdev, ioc->bars);
4588 pci_disable_pcie_error_reporting(pdev);
4589 pci_disable_device(pdev);
4590 }
4591 return; 4823 return;
4592} 4824}
4593 4825
@@ -4602,6 +4834,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
4602{ 4834{
4603 int r, i; 4835 int r, i;
4604 int cpu_id, last_cpu_id = 0; 4836 int cpu_id, last_cpu_id = 0;
4837 u8 revision;
4605 4838
4606 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4839 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4607 __func__)); 4840 __func__));
@@ -4621,6 +4854,20 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
4621 goto out_free_resources; 4854 goto out_free_resources;
4622 } 4855 }
4623 4856
4857 /* Check whether the controller revision is C0 or above.
4858 * only C0 and above revision controllers support 96 MSI-X vectors.
4859 */
4860 revision = ioc->pdev->revision;
4861
4862 if ((ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3004 ||
4863 ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3008 ||
4864 ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3108_1 ||
4865 ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3108_2 ||
4866 ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3108_5 ||
4867 ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3108_6) &&
4868 (revision >= 0x02))
4869 ioc->msix96_vector = 1;
4870
4624 ioc->rdpq_array_enable_assigned = 0; 4871 ioc->rdpq_array_enable_assigned = 0;
4625 ioc->dma_mask = 0; 4872 ioc->dma_mask = 0;
4626 r = mpt3sas_base_map_resources(ioc); 4873 r = mpt3sas_base_map_resources(ioc);
@@ -4643,7 +4890,6 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
4643 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee; 4890 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
4644 ioc->build_sg = &_base_build_sg_ieee; 4891 ioc->build_sg = &_base_build_sg_ieee;
4645 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee; 4892 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
4646 ioc->mpi25 = 1;
4647 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t); 4893 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
4648 4894
4649 /* 4895 /*
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index afa881682bef..f0e462b0880d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -71,8 +71,8 @@
71#define MPT3SAS_DRIVER_NAME "mpt3sas" 71#define MPT3SAS_DRIVER_NAME "mpt3sas"
72#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" 72#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
73#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" 73#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
74#define MPT3SAS_DRIVER_VERSION "04.100.00.00" 74#define MPT3SAS_DRIVER_VERSION "09.100.00.00"
75#define MPT3SAS_MAJOR_VERSION 4 75#define MPT3SAS_MAJOR_VERSION 9
76#define MPT3SAS_MINOR_VERSION 100 76#define MPT3SAS_MINOR_VERSION 100
77#define MPT3SAS_BUILD_VERSION 0 77#define MPT3SAS_BUILD_VERSION 0
78#define MPT3SAS_RELEASE_VERSION 00 78#define MPT3SAS_RELEASE_VERSION 00
@@ -152,12 +152,49 @@
152#define MPT3SAS_INTEL_RS3UC080_SSDID 0x3524 152#define MPT3SAS_INTEL_RS3UC080_SSDID 0x3524
153 153
154/* 154/*
155 * Dell HBA branding
156 */
157#define MPT3SAS_DELL_12G_HBA_BRANDING \
158 "Dell 12Gbps HBA"
159
160/*
161 * Dell HBA SSDIDs
162 */
163#define MPT3SAS_DELL_12G_HBA_SSDID 0x1F46
164
165/*
166 * Cisco HBA branding
167 */
168#define MPT3SAS_CISCO_12G_8E_HBA_BRANDING \
169 "Cisco 9300-8E 12G SAS HBA"
170#define MPT3SAS_CISCO_12G_8I_HBA_BRANDING \
171 "Cisco 9300-8i 12G SAS HBA"
172#define MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING \
173 "Cisco 12G Modular SAS Pass through Controller"
174#define MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING \
175 "UCS C3X60 12G SAS Pass through Controller"
176/*
177 * Cisco HBA SSSDIDs
178 */
179#define MPT3SAS_CISCO_12G_8E_HBA_SSDID 0x14C
180#define MPT3SAS_CISCO_12G_8I_HBA_SSDID 0x154
181#define MPT3SAS_CISCO_12G_AVILA_HBA_SSDID 0x155
182#define MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID 0x156
183
184/*
155 * status bits for ioc->diag_buffer_status 185 * status bits for ioc->diag_buffer_status
156 */ 186 */
157#define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01) 187#define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01)
158#define MPT3_DIAG_BUFFER_IS_RELEASED (0x02) 188#define MPT3_DIAG_BUFFER_IS_RELEASED (0x02)
159#define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04) 189#define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04)
160 190
191/*
192 * Combined Reply Queue constants,
193 * There are twelve Supplemental Reply Post Host Index Registers
194 * and each register is at offset 0x10 bytes from the previous one.
195 */
196#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT 12
197#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
161 198
162/* OEM Identifiers */ 199/* OEM Identifiers */
163#define MFG10_OEM_ID_INVALID (0x00000000) 200#define MFG10_OEM_ID_INVALID (0x00000000)
@@ -173,6 +210,8 @@
173#define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008) 210#define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008)
174#define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010) 211#define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010)
175 212
213#define VIRTUAL_IO_FAILED_RETRY (0x32010081)
214
176/* OEM Specific Flags will come from OEM specific header files */ 215/* OEM Specific Flags will come from OEM specific header files */
177struct Mpi2ManufacturingPage10_t { 216struct Mpi2ManufacturingPage10_t {
178 MPI2_CONFIG_PAGE_HEADER Header; /* 00h */ 217 MPI2_CONFIG_PAGE_HEADER Header; /* 00h */
@@ -294,7 +333,8 @@ struct _internal_cmd {
294 * @responding: used in _scsih_sas_device_mark_responding 333 * @responding: used in _scsih_sas_device_mark_responding
295 * @fast_path: fast path feature enable bit 334 * @fast_path: fast path feature enable bit
296 * @pfa_led_on: flag for PFA LED status 335 * @pfa_led_on: flag for PFA LED status
297 * 336 * @pend_sas_rphy_add: flag to check if device is in sas_rphy_add()
337 * addition routine.
298 */ 338 */
299struct _sas_device { 339struct _sas_device {
300 struct list_head list; 340 struct list_head list;
@@ -315,6 +355,9 @@ struct _sas_device {
315 u8 responding; 355 u8 responding;
316 u8 fast_path; 356 u8 fast_path;
317 u8 pfa_led_on; 357 u8 pfa_led_on;
358 u8 pend_sas_rphy_add;
359 u8 enclosure_level;
360 u8 connector_name[4];
318}; 361};
319 362
320/** 363/**
@@ -728,7 +771,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
728 * is assigned only ones 771 * is assigned only ones
729 * @reply_queue_count: number of reply queue's 772 * @reply_queue_count: number of reply queue's
730 * @reply_queue_list: link list contaning the reply queue info 773 * @reply_queue_list: link list contaning the reply queue info
731 * @reply_post_host_index: head index in the pool where FW completes IO 774 * @msix96_vector: 96 MSI-X vector support
775 * @replyPostRegisterIndex: index of next position in Reply Desc Post Queue
732 * @delayed_tr_list: target reset link list 776 * @delayed_tr_list: target reset link list
733 * @delayed_tr_volume_list: volume target reset link list 777 * @delayed_tr_volume_list: volume target reset link list
734 * @@temp_sensors_count: flag to carry the number of temperature sensors 778 * @@temp_sensors_count: flag to carry the number of temperature sensors
@@ -814,7 +858,6 @@ struct MPT3SAS_ADAPTER {
814 MPT_BUILD_SG_SCMD build_sg_scmd; 858 MPT_BUILD_SG_SCMD build_sg_scmd;
815 MPT_BUILD_SG build_sg; 859 MPT_BUILD_SG build_sg;
816 MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge; 860 MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge;
817 u8 mpi25;
818 u16 sge_size_ieee; 861 u16 sge_size_ieee;
819 862
820 /* function ptr for MPI sg elements only */ 863 /* function ptr for MPI sg elements only */
@@ -937,6 +980,10 @@ struct MPT3SAS_ADAPTER {
937 u8 reply_queue_count; 980 u8 reply_queue_count;
938 struct list_head reply_queue_list; 981 struct list_head reply_queue_list;
939 982
983 u8 msix96_vector;
984 /* reply post register index */
985 resource_size_t **replyPostRegisterIndex;
986
940 struct list_head delayed_tr_list; 987 struct list_head delayed_tr_list;
941 struct list_head delayed_tr_volume_list; 988 struct list_head delayed_tr_volume_list;
942 u8 temp_sensors_count; 989 u8 temp_sensors_count;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 5a97e3286719..8ccef38523fa 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -585,6 +585,22 @@ _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
585 585
586 if (!sas_device) 586 if (!sas_device)
587 return; 587 return;
588 pr_info(MPT3SAS_FMT
589 "removing handle(0x%04x), sas_addr(0x%016llx)\n",
590 ioc->name, sas_device->handle,
591 (unsigned long long) sas_device->sas_address);
592
593 if (sas_device->enclosure_handle != 0)
594 pr_info(MPT3SAS_FMT
595 "removing enclosure logical id(0x%016llx), slot(%d)\n",
596 ioc->name, (unsigned long long)
597 sas_device->enclosure_logical_id, sas_device->slot);
598
599 if (sas_device->connector_name[0] != '\0')
600 pr_info(MPT3SAS_FMT
601 "removing enclosure level(0x%04x), connector name( %s)\n",
602 ioc->name, sas_device->enclosure_level,
603 sas_device->connector_name);
588 604
589 spin_lock_irqsave(&ioc->sas_device_lock, flags); 605 spin_lock_irqsave(&ioc->sas_device_lock, flags);
590 list_del(&sas_device->list); 606 list_del(&sas_device->list);
@@ -663,6 +679,18 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
663 ioc->name, __func__, sas_device->handle, 679 ioc->name, __func__, sas_device->handle,
664 (unsigned long long)sas_device->sas_address)); 680 (unsigned long long)sas_device->sas_address));
665 681
682 if (sas_device->enclosure_handle != 0)
683 dewtprintk(ioc, pr_info(MPT3SAS_FMT
684 "%s: enclosure logical id(0x%016llx), slot( %d)\n",
685 ioc->name, __func__, (unsigned long long)
686 sas_device->enclosure_logical_id, sas_device->slot));
687
688 if (sas_device->connector_name[0] != '\0')
689 dewtprintk(ioc, pr_info(MPT3SAS_FMT
690 "%s: enclosure level(0x%04x), connector name( %s)\n",
691 ioc->name, __func__,
692 sas_device->enclosure_level, sas_device->connector_name));
693
666 spin_lock_irqsave(&ioc->sas_device_lock, flags); 694 spin_lock_irqsave(&ioc->sas_device_lock, flags);
667 list_add_tail(&sas_device->list, &ioc->sas_device_list); 695 list_add_tail(&sas_device->list, &ioc->sas_device_list);
668 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 696 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -704,6 +732,18 @@ _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
704 __func__, sas_device->handle, 732 __func__, sas_device->handle,
705 (unsigned long long)sas_device->sas_address)); 733 (unsigned long long)sas_device->sas_address));
706 734
735 if (sas_device->enclosure_handle != 0)
736 dewtprintk(ioc, pr_info(MPT3SAS_FMT
737 "%s: enclosure logical id(0x%016llx), slot( %d)\n",
738 ioc->name, __func__, (unsigned long long)
739 sas_device->enclosure_logical_id, sas_device->slot));
740
741 if (sas_device->connector_name[0] != '\0')
742 dewtprintk(ioc, pr_info(MPT3SAS_FMT
743 "%s: enclosure level(0x%04x), connector name( %s)\n",
744 ioc->name, __func__, sas_device->enclosure_level,
745 sas_device->connector_name));
746
707 spin_lock_irqsave(&ioc->sas_device_lock, flags); 747 spin_lock_irqsave(&ioc->sas_device_lock, flags);
708 list_add_tail(&sas_device->list, &ioc->sas_device_init_list); 748 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
709 _scsih_determine_boot_device(ioc, sas_device, 0); 749 _scsih_determine_boot_device(ioc, sas_device, 0);
@@ -1772,10 +1812,16 @@ _scsih_slave_configure(struct scsi_device *sdev)
1772 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", 1812 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
1773 ds, handle, (unsigned long long)sas_device->sas_address, 1813 ds, handle, (unsigned long long)sas_device->sas_address,
1774 sas_device->phy, (unsigned long long)sas_device->device_name); 1814 sas_device->phy, (unsigned long long)sas_device->device_name);
1775 sdev_printk(KERN_INFO, sdev, 1815 if (sas_device->enclosure_handle != 0)
1776 "%s: enclosure_logical_id(0x%016llx), slot(%d)\n", 1816 sdev_printk(KERN_INFO, sdev,
1777 ds, (unsigned long long) 1817 "%s: enclosure_logical_id(0x%016llx), slot(%d)\n",
1778 sas_device->enclosure_logical_id, sas_device->slot); 1818 ds, (unsigned long long)
1819 sas_device->enclosure_logical_id, sas_device->slot);
1820 if (sas_device->connector_name[0] != '\0')
1821 sdev_printk(KERN_INFO, sdev,
1822 "%s: enclosure level(0x%04x), connector name( %s)\n",
1823 ds, sas_device->enclosure_level,
1824 sas_device->connector_name);
1779 1825
1780 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1826 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1781 1827
@@ -2189,10 +2235,17 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
2189 sas_device->handle, 2235 sas_device->handle,
2190 (unsigned long long)sas_device->sas_address, 2236 (unsigned long long)sas_device->sas_address,
2191 sas_device->phy); 2237 sas_device->phy);
2192 starget_printk(KERN_INFO, starget, 2238 if (sas_device->enclosure_handle != 0)
2193 "enclosure_logical_id(0x%016llx), slot(%d)\n", 2239 starget_printk(KERN_INFO, starget,
2194 (unsigned long long)sas_device->enclosure_logical_id, 2240 "enclosure_logical_id(0x%016llx), slot(%d)\n",
2195 sas_device->slot); 2241 (unsigned long long)
2242 sas_device->enclosure_logical_id,
2243 sas_device->slot);
2244 if (sas_device->connector_name)
2245 starget_printk(KERN_INFO, starget,
2246 "enclosure level(0x%04x),connector name(%s)\n",
2247 sas_device->enclosure_level,
2248 sas_device->connector_name);
2196 } 2249 }
2197 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2250 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2198 } 2251 }
@@ -2552,6 +2605,75 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
2552} 2605}
2553 2606
2554/** 2607/**
2608 * _scsih_internal_device_block - block the sdev device
2609 * @sdev: per device object
2610 * @sas_device_priv_data : per device driver private data
2611 *
2612 * make sure device is blocked without error, if not
2613 * print an error
2614 */
2615static void
2616_scsih_internal_device_block(struct scsi_device *sdev,
2617 struct MPT3SAS_DEVICE *sas_device_priv_data)
2618{
2619 int r = 0;
2620
2621 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
2622 sas_device_priv_data->sas_target->handle);
2623 sas_device_priv_data->block = 1;
2624
2625 r = scsi_internal_device_block(sdev);
2626 if (r == -EINVAL)
2627 sdev_printk(KERN_WARNING, sdev,
2628 "device_block failed with return(%d) for handle(0x%04x)\n",
2629 sas_device_priv_data->sas_target->handle, r);
2630}
2631
2632/**
2633 * _scsih_internal_device_unblock - unblock the sdev device
2634 * @sdev: per device object
2635 * @sas_device_priv_data : per device driver private data
2636 * make sure device is unblocked without error, if not retry
2637 * by blocking and then unblocking
2638 */
2639
2640static void
2641_scsih_internal_device_unblock(struct scsi_device *sdev,
2642 struct MPT3SAS_DEVICE *sas_device_priv_data)
2643{
2644 int r = 0;
2645
2646 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
2647 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
2648 sas_device_priv_data->block = 0;
2649 r = scsi_internal_device_unblock(sdev, SDEV_RUNNING);
2650 if (r == -EINVAL) {
2651 /* The device has been set to SDEV_RUNNING by SD layer during
2652 * device addition but the request queue is still stopped by
2653 * our earlier block call. We need to perform a block again
2654 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
2655
2656 sdev_printk(KERN_WARNING, sdev,
2657 "device_unblock failed with return(%d) for handle(0x%04x) "
2658 "performing a block followed by an unblock\n",
2659 sas_device_priv_data->sas_target->handle, r);
2660 sas_device_priv_data->block = 1;
2661 r = scsi_internal_device_block(sdev);
2662 if (r)
2663 sdev_printk(KERN_WARNING, sdev, "retried device_block "
2664 "failed with return(%d) for handle(0x%04x)\n",
2665 sas_device_priv_data->sas_target->handle, r);
2666
2667 sas_device_priv_data->block = 0;
2668 r = scsi_internal_device_unblock(sdev, SDEV_RUNNING);
2669 if (r)
2670 sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
2671 " failed with return(%d) for handle(0x%04x)\n",
2672 sas_device_priv_data->sas_target->handle, r);
2673 }
2674}
2675
2676/**
2555 * _scsih_ublock_io_all_device - unblock every device 2677 * _scsih_ublock_io_all_device - unblock every device
2556 * @ioc: per adapter object 2678 * @ioc: per adapter object
2557 * 2679 *
@@ -2570,11 +2692,10 @@ _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
2570 if (!sas_device_priv_data->block) 2692 if (!sas_device_priv_data->block)
2571 continue; 2693 continue;
2572 2694
2573 sas_device_priv_data->block = 0;
2574 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, 2695 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
2575 "device_running, handle(0x%04x)\n", 2696 "device_running, handle(0x%04x)\n",
2576 sas_device_priv_data->sas_target->handle)); 2697 sas_device_priv_data->sas_target->handle));
2577 scsi_internal_device_unblock(sdev, SDEV_RUNNING); 2698 _scsih_internal_device_unblock(sdev, sas_device_priv_data);
2578 } 2699 }
2579} 2700}
2580 2701
@@ -2599,10 +2720,9 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
2599 if (sas_device_priv_data->sas_target->sas_address 2720 if (sas_device_priv_data->sas_target->sas_address
2600 != sas_address) 2721 != sas_address)
2601 continue; 2722 continue;
2602 if (sas_device_priv_data->block) { 2723 if (sas_device_priv_data->block)
2603 sas_device_priv_data->block = 0; 2724 _scsih_internal_device_unblock(sdev,
2604 scsi_internal_device_unblock(sdev, SDEV_RUNNING); 2725 sas_device_priv_data);
2605 }
2606 } 2726 }
2607} 2727}
2608 2728
@@ -2625,10 +2745,7 @@ _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
2625 continue; 2745 continue;
2626 if (sas_device_priv_data->block) 2746 if (sas_device_priv_data->block)
2627 continue; 2747 continue;
2628 sas_device_priv_data->block = 1; 2748 _scsih_internal_device_block(sdev, sas_device_priv_data);
2629 scsi_internal_device_block(sdev);
2630 sdev_printk(KERN_INFO, sdev, "device_blocked, handle(0x%04x)\n",
2631 sas_device_priv_data->sas_target->handle);
2632 } 2749 }
2633} 2750}
2634 2751
@@ -2644,6 +2761,11 @@ _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2644{ 2761{
2645 struct MPT3SAS_DEVICE *sas_device_priv_data; 2762 struct MPT3SAS_DEVICE *sas_device_priv_data;
2646 struct scsi_device *sdev; 2763 struct scsi_device *sdev;
2764 struct _sas_device *sas_device;
2765
2766 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
2767 if (!sas_device)
2768 return;
2647 2769
2648 shost_for_each_device(sdev, ioc->shost) { 2770 shost_for_each_device(sdev, ioc->shost) {
2649 sas_device_priv_data = sdev->hostdata; 2771 sas_device_priv_data = sdev->hostdata;
@@ -2653,10 +2775,9 @@ _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2653 continue; 2775 continue;
2654 if (sas_device_priv_data->block) 2776 if (sas_device_priv_data->block)
2655 continue; 2777 continue;
2656 sas_device_priv_data->block = 1; 2778 if (sas_device->pend_sas_rphy_add)
2657 scsi_internal_device_block(sdev); 2779 continue;
2658 sdev_printk(KERN_INFO, sdev, 2780 _scsih_internal_device_block(sdev, sas_device_priv_data);
2659 "device_blocked, handle(0x%04x)\n", handle);
2660 } 2781 }
2661} 2782}
2662 2783
@@ -2806,6 +2927,18 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2806 "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", 2927 "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
2807 ioc->name, handle, 2928 ioc->name, handle,
2808 (unsigned long long)sas_address)); 2929 (unsigned long long)sas_address));
2930 if (sas_device->enclosure_handle != 0)
2931 dewtprintk(ioc, pr_info(MPT3SAS_FMT
2932 "setting delete flag:enclosure logical id(0x%016llx),"
2933 " slot(%d)\n", ioc->name, (unsigned long long)
2934 sas_device->enclosure_logical_id,
2935 sas_device->slot));
2936 if (sas_device->connector_name)
2937 dewtprintk(ioc, pr_info(MPT3SAS_FMT
2938 "setting delete flag: enclosure level(0x%04x),"
2939 " connector name( %s)\n", ioc->name,
2940 sas_device->enclosure_level,
2941 sas_device->connector_name));
2809 _scsih_ublock_io_device(ioc, sas_address); 2942 _scsih_ublock_io_device(ioc, sas_address);
2810 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 2943 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
2811 } 2944 }
@@ -3821,10 +3954,19 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3821 "\tsas_address(0x%016llx), phy(%d)\n", 3954 "\tsas_address(0x%016llx), phy(%d)\n",
3822 ioc->name, (unsigned long long) 3955 ioc->name, (unsigned long long)
3823 sas_device->sas_address, sas_device->phy); 3956 sas_device->sas_address, sas_device->phy);
3824 pr_warn(MPT3SAS_FMT 3957 if (sas_device->enclosure_handle != 0)
3825 "\tenclosure_logical_id(0x%016llx), slot(%d)\n", 3958 pr_warn(MPT3SAS_FMT
3826 ioc->name, (unsigned long long) 3959 "\tenclosure_logical_id(0x%016llx),"
3827 sas_device->enclosure_logical_id, sas_device->slot); 3960 "slot(%d)\n", ioc->name,
3961 (unsigned long long)
3962 sas_device->enclosure_logical_id,
3963 sas_device->slot);
3964 if (sas_device->connector_name[0])
3965 pr_warn(MPT3SAS_FMT
3966 "\tenclosure level(0x%04x),"
3967 " connector name( %s)\n", ioc->name,
3968 sas_device->enclosure_level,
3969 sas_device->connector_name);
3828 } 3970 }
3829 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 3971 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3830 } 3972 }
@@ -3999,7 +4141,16 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3999 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4141 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4000 return; 4142 return;
4001 } 4143 }
4002 starget_printk(KERN_WARNING, starget, "predicted fault\n"); 4144 if (sas_device->enclosure_handle != 0)
4145 starget_printk(KERN_INFO, starget, "predicted fault, "
4146 "enclosure logical id(0x%016llx), slot(%d)\n",
4147 (unsigned long long)sas_device->enclosure_logical_id,
4148 sas_device->slot);
4149 if (sas_device->connector_name[0] != '\0')
4150 starget_printk(KERN_WARNING, starget, "predicted fault, "
4151 "enclosure level(0x%04x), connector name( %s)\n",
4152 sas_device->enclosure_level,
4153 sas_device->connector_name);
4003 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4154 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4004 4155
4005 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) 4156 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
@@ -4119,8 +4270,15 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4119 _scsih_smart_predicted_fault(ioc, 4270 _scsih_smart_predicted_fault(ioc,
4120 le16_to_cpu(mpi_reply->DevHandle)); 4271 le16_to_cpu(mpi_reply->DevHandle));
4121 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq); 4272 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
4122 }
4123 4273
4274#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
4275 if (!(ioc->logging_level & MPT_DEBUG_REPLY) &&
4276 ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
4277 (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
4278 (scmd->sense_buffer[2] == HARDWARE_ERROR)))
4279 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
4280#endif
4281 }
4124 switch (ioc_status) { 4282 switch (ioc_status) {
4125 case MPI2_IOCSTATUS_BUSY: 4283 case MPI2_IOCSTATUS_BUSY:
4126 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: 4284 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
@@ -4146,6 +4304,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4146 scmd->device->expecting_cc_ua = 1; 4304 scmd->device->expecting_cc_ua = 1;
4147 } 4305 }
4148 break; 4306 break;
4307 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
4308 scmd->result = DID_RESET << 16;
4309 break;
4149 } 4310 }
4150 scmd->result = DID_SOFT_ERROR << 16; 4311 scmd->result = DID_SOFT_ERROR << 16;
4151 break; 4312 break;
@@ -4788,6 +4949,16 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
4788 sas_device->handle, handle); 4949 sas_device->handle, handle);
4789 sas_target_priv_data->handle = handle; 4950 sas_target_priv_data->handle = handle;
4790 sas_device->handle = handle; 4951 sas_device->handle = handle;
4952 if (sas_device_pg0.Flags &
4953 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
4954 sas_device->enclosure_level =
4955 le16_to_cpu(sas_device_pg0.EnclosureLevel);
4956 memcpy(&sas_device->connector_name[0],
4957 &sas_device_pg0.ConnectorName[0], 4);
4958 } else {
4959 sas_device->enclosure_level = 0;
4960 sas_device->connector_name[0] = '\0';
4961 }
4791 } 4962 }
4792 4963
4793 /* check if device is present */ 4964 /* check if device is present */
@@ -4894,14 +5065,24 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
4894 ioc->name, __FILE__, __LINE__, __func__); 5065 ioc->name, __FILE__, __LINE__, __func__);
4895 sas_device->enclosure_handle = 5066 sas_device->enclosure_handle =
4896 le16_to_cpu(sas_device_pg0.EnclosureHandle); 5067 le16_to_cpu(sas_device_pg0.EnclosureHandle);
4897 sas_device->slot = 5068 if (sas_device->enclosure_handle != 0)
4898 le16_to_cpu(sas_device_pg0.Slot); 5069 sas_device->slot =
5070 le16_to_cpu(sas_device_pg0.Slot);
4899 sas_device->device_info = device_info; 5071 sas_device->device_info = device_info;
4900 sas_device->sas_address = sas_address; 5072 sas_device->sas_address = sas_address;
4901 sas_device->phy = sas_device_pg0.PhyNum; 5073 sas_device->phy = sas_device_pg0.PhyNum;
4902 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & 5074 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
4903 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; 5075 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
4904 5076
5077 if (sas_device_pg0.Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
5078 sas_device->enclosure_level =
5079 le16_to_cpu(sas_device_pg0.EnclosureLevel);
5080 memcpy(&sas_device->connector_name[0],
5081 &sas_device_pg0.ConnectorName[0], 4);
5082 } else {
5083 sas_device->enclosure_level = 0;
5084 sas_device->connector_name[0] = '\0';
5085 }
4905 /* get enclosure_logical_id */ 5086 /* get enclosure_logical_id */
4906 if (sas_device->enclosure_handle && !(mpt3sas_config_get_enclosure_pg0( 5087 if (sas_device->enclosure_handle && !(mpt3sas_config_get_enclosure_pg0(
4907 ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, 5088 ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
@@ -4943,6 +5124,18 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
4943 ioc->name, __func__, 5124 ioc->name, __func__,
4944 sas_device->handle, (unsigned long long) 5125 sas_device->handle, (unsigned long long)
4945 sas_device->sas_address)); 5126 sas_device->sas_address));
5127 if (sas_device->enclosure_handle != 0)
5128 dewtprintk(ioc, pr_info(MPT3SAS_FMT
5129 "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
5130 ioc->name, __func__,
5131 (unsigned long long)sas_device->enclosure_logical_id,
5132 sas_device->slot));
5133 if (sas_device->connector_name[0] != '\0')
5134 dewtprintk(ioc, pr_info(MPT3SAS_FMT
5135 "%s: enter: enclosure level(0x%04x), connector name( %s)\n",
5136 ioc->name, __func__,
5137 sas_device->enclosure_level,
5138 sas_device->connector_name));
4946 5139
4947 if (sas_device->starget && sas_device->starget->hostdata) { 5140 if (sas_device->starget && sas_device->starget->hostdata) {
4948 sas_target_priv_data = sas_device->starget->hostdata; 5141 sas_target_priv_data = sas_device->starget->hostdata;
@@ -4959,12 +5152,34 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
4959 "removing handle(0x%04x), sas_addr(0x%016llx)\n", 5152 "removing handle(0x%04x), sas_addr(0x%016llx)\n",
4960 ioc->name, sas_device->handle, 5153 ioc->name, sas_device->handle,
4961 (unsigned long long) sas_device->sas_address); 5154 (unsigned long long) sas_device->sas_address);
5155 if (sas_device->enclosure_handle != 0)
5156 pr_info(MPT3SAS_FMT
5157 "removing : enclosure logical id(0x%016llx), slot(%d)\n",
5158 ioc->name,
5159 (unsigned long long)sas_device->enclosure_logical_id,
5160 sas_device->slot);
5161 if (sas_device->connector_name[0] != '\0')
5162 pr_info(MPT3SAS_FMT
5163 "removing enclosure level(0x%04x), connector name( %s)\n",
5164 ioc->name, sas_device->enclosure_level,
5165 sas_device->connector_name);
4962 5166
4963 dewtprintk(ioc, pr_info(MPT3SAS_FMT 5167 dewtprintk(ioc, pr_info(MPT3SAS_FMT
4964 "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", 5168 "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
4965 ioc->name, __func__, 5169 ioc->name, __func__,
4966 sas_device->handle, (unsigned long long) 5170 sas_device->handle, (unsigned long long)
4967 sas_device->sas_address)); 5171 sas_device->sas_address));
5172 if (sas_device->enclosure_handle != 0)
5173 dewtprintk(ioc, pr_info(MPT3SAS_FMT
5174 "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
5175 ioc->name, __func__,
5176 (unsigned long long)sas_device->enclosure_logical_id,
5177 sas_device->slot));
5178 if (sas_device->connector_name[0] != '\0')
5179 dewtprintk(ioc, pr_info(MPT3SAS_FMT
5180 "%s: exit: enclosure level(0x%04x), connector name(%s)\n",
5181 ioc->name, __func__, sas_device->enclosure_level,
5182 sas_device->connector_name));
4968 5183
4969 kfree(sas_device); 5184 kfree(sas_device);
4970} 5185}
@@ -6357,9 +6572,7 @@ _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
6357/** 6572/**
6358 * _scsih_mark_responding_sas_device - mark a sas_devices as responding 6573 * _scsih_mark_responding_sas_device - mark a sas_devices as responding
6359 * @ioc: per adapter object 6574 * @ioc: per adapter object
6360 * @sas_address: sas address 6575 * @sas_device_pg0: SAS Device page 0
6361 * @slot: enclosure slot id
6362 * @handle: device handle
6363 * 6576 *
6364 * After host reset, find out whether devices are still responding. 6577 * After host reset, find out whether devices are still responding.
6365 * Used in _scsih_remove_unresponsive_sas_devices. 6578 * Used in _scsih_remove_unresponsive_sas_devices.
@@ -6367,8 +6580,8 @@ _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
6367 * Return nothing. 6580 * Return nothing.
6368 */ 6581 */
6369static void 6582static void
6370_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, 6583_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
6371 u16 slot, u16 handle) 6584Mpi2SasDevicePage0_t *sas_device_pg0)
6372{ 6585{
6373 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 6586 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
6374 struct scsi_target *starget; 6587 struct scsi_target *starget;
@@ -6377,8 +6590,8 @@ _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
6377 6590
6378 spin_lock_irqsave(&ioc->sas_device_lock, flags); 6591 spin_lock_irqsave(&ioc->sas_device_lock, flags);
6379 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 6592 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
6380 if (sas_device->sas_address == sas_address && 6593 if ((sas_device->sas_address == sas_device_pg0->SASAddress) &&
6381 sas_device->slot == slot) { 6594 (sas_device->slot == sas_device_pg0->Slot)) {
6382 sas_device->responding = 1; 6595 sas_device->responding = 1;
6383 starget = sas_device->starget; 6596 starget = sas_device->starget;
6384 if (starget && starget->hostdata) { 6597 if (starget && starget->hostdata) {
@@ -6387,22 +6600,40 @@ _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
6387 sas_target_priv_data->deleted = 0; 6600 sas_target_priv_data->deleted = 0;
6388 } else 6601 } else
6389 sas_target_priv_data = NULL; 6602 sas_target_priv_data = NULL;
6390 if (starget) 6603 if (starget) {
6391 starget_printk(KERN_INFO, starget, 6604 starget_printk(KERN_INFO, starget,
6392 "handle(0x%04x), sas_addr(0x%016llx), " 6605 "handle(0x%04x), sas_addr(0x%016llx)\n",
6393 "enclosure logical id(0x%016llx), " 6606 sas_device_pg0->DevHandle,
6394 "slot(%d)\n", handle,
6395 (unsigned long long)sas_device->sas_address,
6396 (unsigned long long) 6607 (unsigned long long)
6397 sas_device->enclosure_logical_id, 6608 sas_device->sas_address);
6398 sas_device->slot); 6609
6399 if (sas_device->handle == handle) 6610 if (sas_device->enclosure_handle != 0)
6611 starget_printk(KERN_INFO, starget,
6612 "enclosure logical id(0x%016llx),"
6613 " slot(%d)\n",
6614 (unsigned long long)
6615 sas_device->enclosure_logical_id,
6616 sas_device->slot);
6617 }
6618 if (sas_device_pg0->Flags &
6619 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
6620 sas_device->enclosure_level =
6621 le16_to_cpu(sas_device_pg0->EnclosureLevel);
6622 memcpy(&sas_device->connector_name[0],
6623 &sas_device_pg0->ConnectorName[0], 4);
6624 } else {
6625 sas_device->enclosure_level = 0;
6626 sas_device->connector_name[0] = '\0';
6627 }
6628
6629 if (sas_device->handle == sas_device_pg0->DevHandle)
6400 goto out; 6630 goto out;
6401 pr_info("\thandle changed from(0x%04x)!!!\n", 6631 pr_info("\thandle changed from(0x%04x)!!!\n",
6402 sas_device->handle); 6632 sas_device->handle);
6403 sas_device->handle = handle; 6633 sas_device->handle = sas_device_pg0->DevHandle;
6404 if (sas_target_priv_data) 6634 if (sas_target_priv_data)
6405 sas_target_priv_data->handle = handle; 6635 sas_target_priv_data->handle =
6636 sas_device_pg0->DevHandle;
6406 goto out; 6637 goto out;
6407 } 6638 }
6408 } 6639 }
@@ -6441,13 +6672,15 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
6441 MPI2_IOCSTATUS_MASK; 6672 MPI2_IOCSTATUS_MASK;
6442 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 6673 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6443 break; 6674 break;
6444 handle = le16_to_cpu(sas_device_pg0.DevHandle); 6675 handle = sas_device_pg0.DevHandle =
6676 le16_to_cpu(sas_device_pg0.DevHandle);
6445 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 6677 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
6446 if (!(_scsih_is_end_device(device_info))) 6678 if (!(_scsih_is_end_device(device_info)))
6447 continue; 6679 continue;
6448 _scsih_mark_responding_sas_device(ioc, 6680 sas_device_pg0.SASAddress =
6449 le64_to_cpu(sas_device_pg0.SASAddress), 6681 le64_to_cpu(sas_device_pg0.SASAddress);
6450 le16_to_cpu(sas_device_pg0.Slot), handle); 6682 sas_device_pg0.Slot = le16_to_cpu(sas_device_pg0.Slot);
6683 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
6451 } 6684 }
6452 6685
6453 out: 6686 out:
@@ -7854,8 +8087,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
7854 /* event thread */ 8087 /* event thread */
7855 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), 8088 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
7856 "fw_event%d", ioc->id); 8089 "fw_event%d", ioc->id);
7857 ioc->firmware_event_thread = create_singlethread_workqueue( 8090 ioc->firmware_event_thread = alloc_ordered_workqueue(
7858 ioc->firmware_event_name); 8091 ioc->firmware_event_name, WQ_MEM_RECLAIM);
7859 if (!ioc->firmware_event_thread) { 8092 if (!ioc->firmware_event_thread) {
7860 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 8093 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
7861 ioc->name, __FILE__, __LINE__, __func__); 8094 ioc->name, __FILE__, __LINE__, __func__);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index efb98afc46e0..70fd019e7ee5 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -649,6 +649,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
649 unsigned long flags; 649 unsigned long flags;
650 struct _sas_node *sas_node; 650 struct _sas_node *sas_node;
651 struct sas_rphy *rphy; 651 struct sas_rphy *rphy;
652 struct _sas_device *sas_device = NULL;
652 int i; 653 int i;
653 struct sas_port *port; 654 struct sas_port *port;
654 655
@@ -731,10 +732,27 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
731 mpt3sas_port->remote_identify.device_type); 732 mpt3sas_port->remote_identify.device_type);
732 733
733 rphy->identify = mpt3sas_port->remote_identify; 734 rphy->identify = mpt3sas_port->remote_identify;
735
736 if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
737 sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
738 mpt3sas_port->remote_identify.sas_address);
739 if (!sas_device) {
740 dfailprintk(ioc, printk(MPT3SAS_FMT
741 "failure at %s:%d/%s()!\n",
742 ioc->name, __FILE__, __LINE__, __func__));
743 goto out_fail;
744 }
745 sas_device->pend_sas_rphy_add = 1;
746 }
747
734 if ((sas_rphy_add(rphy))) { 748 if ((sas_rphy_add(rphy))) {
735 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 749 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
736 ioc->name, __FILE__, __LINE__, __func__); 750 ioc->name, __FILE__, __LINE__, __func__);
737 } 751 }
752
753 if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE)
754 sas_device->pend_sas_rphy_add = 0;
755
738 if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) 756 if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
739 dev_printk(KERN_INFO, &rphy->dev, 757 dev_printk(KERN_INFO, &rphy->dev,
740 "add: handle(0x%04x), sas_addr(0x%016llx)\n", 758 "add: handle(0x%04x), sas_addr(0x%016llx)\n",
@@ -1946,7 +1964,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1946 } else { 1964 } else {
1947 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), 1965 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
1948 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); 1966 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
1949 if (!dma_addr_out) { 1967 if (pci_dma_mapping_error(ioc->pdev, dma_addr_out)) {
1950 pr_info(MPT3SAS_FMT "%s(): DMA Addr out = NULL\n", 1968 pr_info(MPT3SAS_FMT "%s(): DMA Addr out = NULL\n",
1951 ioc->name, __func__); 1969 ioc->name, __func__);
1952 rc = -ENOMEM; 1970 rc = -ENOMEM;
@@ -1968,7 +1986,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1968 } else { 1986 } else {
1969 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), 1987 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
1970 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); 1988 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
1971 if (!dma_addr_in) { 1989 if (pci_dma_mapping_error(ioc->pdev, dma_addr_in)) {
1972 pr_info(MPT3SAS_FMT "%s(): DMA Addr in = NULL\n", 1990 pr_info(MPT3SAS_FMT "%s(): DMA Addr in = NULL\n",
1973 ioc->name, __func__); 1991 ioc->name, __func__);
1974 rc = -ENOMEM; 1992 rc = -ENOMEM;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 39306b1e704c..04e67a190652 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -2642,6 +2642,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2642 ts->resp = SAS_TASK_COMPLETE; 2642 ts->resp = SAS_TASK_COMPLETE;
2643 ts->stat = SAS_OPEN_REJECT; 2643 ts->stat = SAS_OPEN_REJECT;
2644 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2644 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2645 break;
2645 default: 2646 default:
2646 PM8001_IO_DBG(pm8001_ha, 2647 PM8001_IO_DBG(pm8001_ha,
2647 pm8001_printk("Unknown status 0x%x\n", status)); 2648 pm8001_printk("Unknown status 0x%x\n", status));
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 0e1628f2018e..9a389f1508de 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2337,6 +2337,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2337 ts->resp = SAS_TASK_COMPLETE; 2337 ts->resp = SAS_TASK_COMPLETE;
2338 ts->stat = SAS_OPEN_REJECT; 2338 ts->stat = SAS_OPEN_REJECT;
2339 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2339 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2340 break;
2340 default: 2341 default:
2341 PM8001_IO_DBG(pm8001_ha, 2342 PM8001_IO_DBG(pm8001_ha,
2342 pm8001_printk("Unknown status 0x%x\n", status)); 2343 pm8001_printk("Unknown status 0x%x\n", status));
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 33f60c92e20e..a0f732b138e4 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -32,10 +32,10 @@ config SCSI_QLA_FC
32 They are also included in the linux-firmware tree as well. 32 They are also included in the linux-firmware tree as well.
33 33
34config TCM_QLA2XXX 34config TCM_QLA2XXX
35 tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs" 35 tristate "TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs"
36 depends on SCSI_QLA_FC && TARGET_CORE 36 depends on SCSI_QLA_FC && TARGET_CORE
37 depends on LIBFC 37 depends on LIBFC
38 select BTREE 38 select BTREE
39 default n 39 default n
40 ---help--- 40 ---help---
41 Say Y here to enable the TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs 41 Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 7ed7bae6172b..ac65cb7b4886 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1359,9 +1359,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
1359 struct qla_hw_data *ha = tgt->ha; 1359 struct qla_hw_data *ha = tgt->ha;
1360 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 1360 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1361 struct se_session *se_sess; 1361 struct se_session *se_sess;
1362 struct se_node_acl *se_nacl;
1363 struct tcm_qla2xxx_lport *lport; 1362 struct tcm_qla2xxx_lport *lport;
1364 struct tcm_qla2xxx_nacl *nacl;
1365 1363
1366 BUG_ON(in_interrupt()); 1364 BUG_ON(in_interrupt());
1367 1365
@@ -1371,8 +1369,6 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
1371 dump_stack(); 1369 dump_stack();
1372 return; 1370 return;
1373 } 1371 }
1374 se_nacl = se_sess->se_node_acl;
1375 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1376 1372
1377 lport = vha->vha_tgt.target_lport_ptr; 1373 lport = vha->vha_tgt.target_lport_ptr;
1378 if (!lport) { 1374 if (!lport) {
@@ -1680,7 +1676,6 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
1680 (struct tcm_qla2xxx_lport *)target_lport_ptr; 1676 (struct tcm_qla2xxx_lport *)target_lport_ptr;
1681 struct tcm_qla2xxx_lport *base_lport = 1677 struct tcm_qla2xxx_lport *base_lport =
1682 (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr; 1678 (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr;
1683 struct tcm_qla2xxx_tpg *base_tpg;
1684 struct fc_vport_identifiers vport_id; 1679 struct fc_vport_identifiers vport_id;
1685 1680
1686 if (!qla_tgt_mode_enabled(base_vha)) { 1681 if (!qla_tgt_mode_enabled(base_vha)) {
@@ -1693,7 +1688,6 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
1693 pr_err("qla2xxx base_lport or tpg_1 not available\n"); 1688 pr_err("qla2xxx base_lport or tpg_1 not available\n");
1694 return -EPERM; 1689 return -EPERM;
1695 } 1690 }
1696 base_tpg = base_lport->tpg_1;
1697 1691
1698 memset(&vport_id, 0, sizeof(vport_id)); 1692 memset(&vport_id, 0, sizeof(vport_id));
1699 vport_id.port_name = npiv_wwpn; 1693 vport_id.port_name = npiv_wwpn;
@@ -1810,6 +1804,11 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
1810 .module = THIS_MODULE, 1804 .module = THIS_MODULE,
1811 .name = "qla2xxx", 1805 .name = "qla2xxx",
1812 .node_acl_size = sizeof(struct tcm_qla2xxx_nacl), 1806 .node_acl_size = sizeof(struct tcm_qla2xxx_nacl),
1807 /*
1808 * XXX: Limit assumes single page per scatter-gather-list entry.
1809 * Current maximum is ~4.9 MB per se_cmd->t_data_sg with PAGE_SIZE=4096
1810 */
1811 .max_data_sg_nents = 1200,
1813 .get_fabric_name = tcm_qla2xxx_get_fabric_name, 1812 .get_fabric_name = tcm_qla2xxx_get_fabric_name,
1814 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, 1813 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
1815 .tpg_get_tag = tcm_qla2xxx_get_tag, 1814 .tpg_get_tag = tcm_qla2xxx_get_tag,
@@ -1958,7 +1957,7 @@ static void __exit tcm_qla2xxx_exit(void)
1958 tcm_qla2xxx_deregister_configfs(); 1957 tcm_qla2xxx_deregister_configfs();
1959} 1958}
1960 1959
1961MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver"); 1960MODULE_DESCRIPTION("TCM QLA24XX+ series NPIV enabled fabric driver");
1962MODULE_LICENSE("GPL"); 1961MODULE_LICENSE("GPL");
1963module_init(tcm_qla2xxx_init); 1962module_init(tcm_qla2xxx_init);
1964module_exit(tcm_qla2xxx_exit); 1963module_exit(tcm_qla2xxx_exit);
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index 2ff092252b76..c126966130ab 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -5,6 +5,8 @@
5#include <linux/bug.h> 5#include <linux/bug.h>
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/string.h> 7#include <linux/string.h>
8#include <linux/errno.h>
9#include <asm/unaligned.h>
8#include <scsi/scsi_common.h> 10#include <scsi/scsi_common.h>
9 11
10/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. 12/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
@@ -176,3 +178,110 @@ bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
176 return true; 178 return true;
177} 179}
178EXPORT_SYMBOL(scsi_normalize_sense); 180EXPORT_SYMBOL(scsi_normalize_sense);
181
182/**
183 * scsi_sense_desc_find - search for a given descriptor type in descriptor sense data format.
184 * @sense_buffer: byte array of descriptor format sense data
185 * @sb_len: number of valid bytes in sense_buffer
186 * @desc_type: value of descriptor type to find
187 * (e.g. 0 -> information)
188 *
189 * Notes:
190 * only valid when sense data is in descriptor format
191 *
192 * Return value:
193 * pointer to start of (first) descriptor if found else NULL
194 */
195const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
196 int desc_type)
197{
198 int add_sen_len, add_len, desc_len, k;
199 const u8 * descp;
200
201 if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7])))
202 return NULL;
203 if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73))
204 return NULL;
205 add_sen_len = (add_sen_len < (sb_len - 8)) ?
206 add_sen_len : (sb_len - 8);
207 descp = &sense_buffer[8];
208 for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) {
209 descp += desc_len;
210 add_len = (k < (add_sen_len - 1)) ? descp[1]: -1;
211 desc_len = add_len + 2;
212 if (descp[0] == desc_type)
213 return descp;
214 if (add_len < 0) // short descriptor ??
215 break;
216 }
217 return NULL;
218}
219EXPORT_SYMBOL(scsi_sense_desc_find);
220
221/**
222 * scsi_build_sense_buffer - build sense data in a buffer
223 * @desc: Sense format (non zero == descriptor format,
224 * 0 == fixed format)
225 * @buf: Where to build sense data
226 * @key: Sense key
227 * @asc: Additional sense code
228 * @ascq: Additional sense code qualifier
229 *
230 **/
231void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
232{
233 if (desc) {
234 buf[0] = 0x72; /* descriptor, current */
235 buf[1] = key;
236 buf[2] = asc;
237 buf[3] = ascq;
238 buf[7] = 0;
239 } else {
240 buf[0] = 0x70; /* fixed, current */
241 buf[2] = key;
242 buf[7] = 0xa;
243 buf[12] = asc;
244 buf[13] = ascq;
245 }
246}
247EXPORT_SYMBOL(scsi_build_sense_buffer);
248
249/**
250 * scsi_set_sense_information - set the information field in a
251 * formatted sense data buffer
252 * @buf: Where to build sense data
253 * @buf_len: buffer length
254 * @info: 64-bit information value to be set
255 *
256 * Return value:
257 * 0 on success or EINVAL for invalid sense buffer length
258 **/
259int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
260{
261 if ((buf[0] & 0x7f) == 0x72) {
262 u8 *ucp, len;
263
264 len = buf[7];
265 ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
266 if (!ucp) {
267 buf[7] = len + 0xc;
268 ucp = buf + 8 + len;
269 }
270
271 if (buf_len < len + 0xc)
272 /* Not enough room for info */
273 return -EINVAL;
274
275 ucp[0] = 0;
276 ucp[1] = 0xa;
277 ucp[2] = 0x80; /* Valid bit */
278 ucp[3] = 0;
279 put_unaligned_be64(info, &ucp[4]);
280 } else if ((buf[0] & 0x7f) == 0x70) {
281 buf[0] |= 0x80;
282 put_unaligned_be64(info, &buf[3]);
283 }
284
285 return 0;
286}
287EXPORT_SYMBOL(scsi_set_sense_information);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 30268bb2ddb6..dfcc45bb03b1 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -25,6 +25,9 @@
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221] 25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
26 */ 26 */
27 27
28
29#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
30
28#include <linux/module.h> 31#include <linux/module.h>
29 32
30#include <linux/kernel.h> 33#include <linux/kernel.h>
@@ -201,7 +204,6 @@ static const char *scsi_debug_version_date = "20141022";
201/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1) 204/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
202 * or "peripheral device" addressing (value 0) */ 205 * or "peripheral device" addressing (value 0) */
203#define SAM2_LUN_ADDRESS_METHOD 0 206#define SAM2_LUN_ADDRESS_METHOD 0
204#define SAM2_WLUN_REPORT_LUNS 0xc101
205 207
206/* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued 208/* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
207 * (for response) at one time. Can be reduced by max_queue option. Command 209 * (for response) at one time. Can be reduced by max_queue option. Command
@@ -698,7 +700,7 @@ static void sdebug_max_tgts_luns(void)
698 else 700 else
699 hpnt->max_id = scsi_debug_num_tgts; 701 hpnt->max_id = scsi_debug_num_tgts;
700 /* scsi_debug_max_luns; */ 702 /* scsi_debug_max_luns; */
701 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; 703 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
702 } 704 }
703 spin_unlock(&sdebug_host_list_lock); 705 spin_unlock(&sdebug_host_list_lock);
704} 706}
@@ -1288,7 +1290,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1288 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC); 1290 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1289 if (! arr) 1291 if (! arr)
1290 return DID_REQUEUE << 16; 1292 return DID_REQUEUE << 16;
1291 have_wlun = (scp->device->lun == SAM2_WLUN_REPORT_LUNS); 1293 have_wlun = (scp->device->lun == SCSI_W_LUN_REPORT_LUNS);
1292 if (have_wlun) 1294 if (have_wlun)
1293 pq_pdt = 0x1e; /* present, wlun */ 1295 pq_pdt = 0x1e; /* present, wlun */
1294 else if (scsi_debug_no_lun_0 && (0 == devip->lun)) 1296 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
@@ -1427,12 +1429,11 @@ static int resp_requests(struct scsi_cmnd * scp,
1427 unsigned char * sbuff; 1429 unsigned char * sbuff;
1428 unsigned char *cmd = scp->cmnd; 1430 unsigned char *cmd = scp->cmnd;
1429 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; 1431 unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1430 bool dsense, want_dsense; 1432 bool dsense;
1431 int len = 18; 1433 int len = 18;
1432 1434
1433 memset(arr, 0, sizeof(arr)); 1435 memset(arr, 0, sizeof(arr));
1434 dsense = !!(cmd[1] & 1); 1436 dsense = !!(cmd[1] & 1);
1435 want_dsense = dsense || scsi_debug_dsense;
1436 sbuff = scp->sense_buffer; 1437 sbuff = scp->sense_buffer;
1437 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) { 1438 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1438 if (dsense) { 1439 if (dsense) {
@@ -2446,8 +2447,7 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2446 __be16 csum = dif_compute_csum(data, scsi_debug_sector_size); 2447 __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
2447 2448
2448 if (sdt->guard_tag != csum) { 2449 if (sdt->guard_tag != csum) {
2449 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n", 2450 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2450 __func__,
2451 (unsigned long)sector, 2451 (unsigned long)sector,
2452 be16_to_cpu(sdt->guard_tag), 2452 be16_to_cpu(sdt->guard_tag),
2453 be16_to_cpu(csum)); 2453 be16_to_cpu(csum));
@@ -2455,14 +2455,14 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2455 } 2455 }
2456 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION && 2456 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
2457 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 2457 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2458 pr_err("%s: REF check failed on sector %lu\n", 2458 pr_err("REF check failed on sector %lu\n",
2459 __func__, (unsigned long)sector); 2459 (unsigned long)sector);
2460 return 0x03; 2460 return 0x03;
2461 } 2461 }
2462 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && 2462 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2463 be32_to_cpu(sdt->ref_tag) != ei_lba) { 2463 be32_to_cpu(sdt->ref_tag) != ei_lba) {
2464 pr_err("%s: REF check failed on sector %lu\n", 2464 pr_err("REF check failed on sector %lu\n",
2465 __func__, (unsigned long)sector); 2465 (unsigned long)sector);
2466 return 0x03; 2466 return 0x03;
2467 } 2467 }
2468 return 0; 2468 return 0;
@@ -2680,7 +2680,7 @@ resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2680 return 0; 2680 return 0;
2681} 2681}
2682 2682
2683void dump_sector(unsigned char *buf, int len) 2683static void dump_sector(unsigned char *buf, int len)
2684{ 2684{
2685 int i, j, n; 2685 int i, j, n;
2686 2686
@@ -3365,8 +3365,8 @@ static int resp_report_luns(struct scsi_cmnd * scp,
3365 one_lun[i].scsi_lun[1] = lun & 0xff; 3365 one_lun[i].scsi_lun[1] = lun & 0xff;
3366 } 3366 }
3367 if (want_wlun) { 3367 if (want_wlun) {
3368 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff; 3368 one_lun[i].scsi_lun[0] = (SCSI_W_LUN_REPORT_LUNS >> 8) & 0xff;
3369 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff; 3369 one_lun[i].scsi_lun[1] = SCSI_W_LUN_REPORT_LUNS & 0xff;
3370 i++; 3370 i++;
3371 } 3371 }
3372 alloc_len = (unsigned char *)(one_lun + i) - arr; 3372 alloc_len = (unsigned char *)(one_lun + i) - arr;
@@ -3449,7 +3449,7 @@ static void sdebug_q_cmd_complete(unsigned long indx)
3449 atomic_inc(&sdebug_completions); 3449 atomic_inc(&sdebug_completions);
3450 qa_indx = indx; 3450 qa_indx = indx;
3451 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) { 3451 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3452 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx); 3452 pr_err("wild qa_indx=%d\n", qa_indx);
3453 return; 3453 return;
3454 } 3454 }
3455 spin_lock_irqsave(&queued_arr_lock, iflags); 3455 spin_lock_irqsave(&queued_arr_lock, iflags);
@@ -3457,21 +3457,21 @@ static void sdebug_q_cmd_complete(unsigned long indx)
3457 scp = sqcp->a_cmnd; 3457 scp = sqcp->a_cmnd;
3458 if (NULL == scp) { 3458 if (NULL == scp) {
3459 spin_unlock_irqrestore(&queued_arr_lock, iflags); 3459 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3460 pr_err("%s: scp is NULL\n", __func__); 3460 pr_err("scp is NULL\n");
3461 return; 3461 return;
3462 } 3462 }
3463 devip = (struct sdebug_dev_info *)scp->device->hostdata; 3463 devip = (struct sdebug_dev_info *)scp->device->hostdata;
3464 if (devip) 3464 if (devip)
3465 atomic_dec(&devip->num_in_q); 3465 atomic_dec(&devip->num_in_q);
3466 else 3466 else
3467 pr_err("%s: devip=NULL\n", __func__); 3467 pr_err("devip=NULL\n");
3468 if (atomic_read(&retired_max_queue) > 0) 3468 if (atomic_read(&retired_max_queue) > 0)
3469 retiring = 1; 3469 retiring = 1;
3470 3470
3471 sqcp->a_cmnd = NULL; 3471 sqcp->a_cmnd = NULL;
3472 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) { 3472 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3473 spin_unlock_irqrestore(&queued_arr_lock, iflags); 3473 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3474 pr_err("%s: Unexpected completion\n", __func__); 3474 pr_err("Unexpected completion\n");
3475 return; 3475 return;
3476 } 3476 }
3477 3477
@@ -3481,7 +3481,7 @@ static void sdebug_q_cmd_complete(unsigned long indx)
3481 retval = atomic_read(&retired_max_queue); 3481 retval = atomic_read(&retired_max_queue);
3482 if (qa_indx >= retval) { 3482 if (qa_indx >= retval) {
3483 spin_unlock_irqrestore(&queued_arr_lock, iflags); 3483 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3484 pr_err("%s: index %d too large\n", __func__, retval); 3484 pr_err("index %d too large\n", retval);
3485 return; 3485 return;
3486 } 3486 }
3487 k = find_last_bit(queued_in_use_bm, retval); 3487 k = find_last_bit(queued_in_use_bm, retval);
@@ -3509,7 +3509,7 @@ sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3509 atomic_inc(&sdebug_completions); 3509 atomic_inc(&sdebug_completions);
3510 qa_indx = sd_hrtp->qa_indx; 3510 qa_indx = sd_hrtp->qa_indx;
3511 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) { 3511 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3512 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx); 3512 pr_err("wild qa_indx=%d\n", qa_indx);
3513 goto the_end; 3513 goto the_end;
3514 } 3514 }
3515 spin_lock_irqsave(&queued_arr_lock, iflags); 3515 spin_lock_irqsave(&queued_arr_lock, iflags);
@@ -3517,21 +3517,21 @@ sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3517 scp = sqcp->a_cmnd; 3517 scp = sqcp->a_cmnd;
3518 if (NULL == scp) { 3518 if (NULL == scp) {
3519 spin_unlock_irqrestore(&queued_arr_lock, iflags); 3519 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3520 pr_err("%s: scp is NULL\n", __func__); 3520 pr_err("scp is NULL\n");
3521 goto the_end; 3521 goto the_end;
3522 } 3522 }
3523 devip = (struct sdebug_dev_info *)scp->device->hostdata; 3523 devip = (struct sdebug_dev_info *)scp->device->hostdata;
3524 if (devip) 3524 if (devip)
3525 atomic_dec(&devip->num_in_q); 3525 atomic_dec(&devip->num_in_q);
3526 else 3526 else
3527 pr_err("%s: devip=NULL\n", __func__); 3527 pr_err("devip=NULL\n");
3528 if (atomic_read(&retired_max_queue) > 0) 3528 if (atomic_read(&retired_max_queue) > 0)
3529 retiring = 1; 3529 retiring = 1;
3530 3530
3531 sqcp->a_cmnd = NULL; 3531 sqcp->a_cmnd = NULL;
3532 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) { 3532 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3533 spin_unlock_irqrestore(&queued_arr_lock, iflags); 3533 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3534 pr_err("%s: Unexpected completion\n", __func__); 3534 pr_err("Unexpected completion\n");
3535 goto the_end; 3535 goto the_end;
3536 } 3536 }
3537 3537
@@ -3541,7 +3541,7 @@ sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3541 retval = atomic_read(&retired_max_queue); 3541 retval = atomic_read(&retired_max_queue);
3542 if (qa_indx >= retval) { 3542 if (qa_indx >= retval) {
3543 spin_unlock_irqrestore(&queued_arr_lock, iflags); 3543 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3544 pr_err("%s: index %d too large\n", __func__, retval); 3544 pr_err("index %d too large\n", retval);
3545 goto the_end; 3545 goto the_end;
3546 } 3546 }
3547 k = find_last_bit(queued_in_use_bm, retval); 3547 k = find_last_bit(queued_in_use_bm, retval);
@@ -3580,7 +3580,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
3580 return devip; 3580 return devip;
3581 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host); 3581 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3582 if (!sdbg_host) { 3582 if (!sdbg_host) {
3583 pr_err("%s: Host info NULL\n", __func__); 3583 pr_err("Host info NULL\n");
3584 return NULL; 3584 return NULL;
3585 } 3585 }
3586 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { 3586 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
@@ -3596,8 +3596,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
3596 if (!open_devip) { /* try and make a new one */ 3596 if (!open_devip) { /* try and make a new one */
3597 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC); 3597 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3598 if (!open_devip) { 3598 if (!open_devip) {
3599 printk(KERN_ERR "%s: out of memory at line %d\n", 3599 pr_err("out of memory at line %d\n", __LINE__);
3600 __func__, __LINE__);
3601 return NULL; 3600 return NULL;
3602 } 3601 }
3603 } 3602 }
@@ -3615,7 +3614,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
3615static int scsi_debug_slave_alloc(struct scsi_device *sdp) 3614static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3616{ 3615{
3617 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 3616 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3618 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n", 3617 pr_info("slave_alloc <%u %u %u %llu>\n",
3619 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 3618 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3620 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue); 3619 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3621 return 0; 3620 return 0;
@@ -3626,7 +3625,7 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
3626 struct sdebug_dev_info *devip; 3625 struct sdebug_dev_info *devip;
3627 3626
3628 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 3627 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3629 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n", 3628 pr_info("slave_configure <%u %u %u %llu>\n",
3630 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 3629 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3631 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN) 3630 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
3632 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN; 3631 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
@@ -3646,7 +3645,7 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3646 (struct sdebug_dev_info *)sdp->hostdata; 3645 (struct sdebug_dev_info *)sdp->hostdata;
3647 3646
3648 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 3647 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3649 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n", 3648 pr_info("slave_destroy <%u %u %u %llu>\n",
3650 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 3649 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3651 if (devip) { 3650 if (devip) {
3652 /* make this slot available for re-use */ 3651 /* make this slot available for re-use */
@@ -3897,8 +3896,7 @@ static void __init sdebug_build_parts(unsigned char *ramp,
3897 return; 3896 return;
3898 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) { 3897 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
3899 scsi_debug_num_parts = SDEBUG_MAX_PARTS; 3898 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
3900 pr_warn("%s: reducing partitions to %d\n", __func__, 3899 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
3901 SDEBUG_MAX_PARTS);
3902 } 3900 }
3903 num_sectors = (int)sdebug_store_sectors; 3901 num_sectors = (int)sdebug_store_sectors;
3904 sectors_per_part = (num_sectors - sdebug_sectors_per) 3902 sectors_per_part = (num_sectors - sdebug_sectors_per)
@@ -3942,14 +3940,20 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3942 unsigned long iflags; 3940 unsigned long iflags;
3943 int k, num_in_q, qdepth, inject; 3941 int k, num_in_q, qdepth, inject;
3944 struct sdebug_queued_cmd *sqcp = NULL; 3942 struct sdebug_queued_cmd *sqcp = NULL;
3945 struct scsi_device *sdp = cmnd->device; 3943 struct scsi_device *sdp;
3944
3945 /* this should never happen */
3946 if (WARN_ON(!cmnd))
3947 return SCSI_MLQUEUE_HOST_BUSY;
3946 3948
3947 if (NULL == cmnd || NULL == devip) { 3949 if (NULL == devip) {
3948 pr_warn("%s: called with NULL cmnd or devip pointer\n", 3950 pr_warn("called devip == NULL\n");
3949 __func__);
3950 /* no particularly good error to report back */ 3951 /* no particularly good error to report back */
3951 return SCSI_MLQUEUE_HOST_BUSY; 3952 return SCSI_MLQUEUE_HOST_BUSY;
3952 } 3953 }
3954
3955 sdp = cmnd->device;
3956
3953 if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 3957 if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3954 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n", 3958 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3955 __func__, scsi_result); 3959 __func__, scsi_result);
@@ -4383,8 +4387,7 @@ static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4383 4387
4384 fake_storep = vmalloc(sz); 4388 fake_storep = vmalloc(sz);
4385 if (NULL == fake_storep) { 4389 if (NULL == fake_storep) {
4386 pr_err("%s: out of memory, 9\n", 4390 pr_err("out of memory, 9\n");
4387 __func__);
4388 return -ENOMEM; 4391 return -ENOMEM;
4389 } 4392 }
4390 memset(fake_storep, 0, sz); 4393 memset(fake_storep, 0, sz);
@@ -4784,8 +4787,7 @@ static int __init scsi_debug_init(void)
4784 atomic_set(&retired_max_queue, 0); 4787 atomic_set(&retired_max_queue, 0);
4785 4788
4786 if (scsi_debug_ndelay >= 1000000000) { 4789 if (scsi_debug_ndelay >= 1000000000) {
4787 pr_warn("%s: ndelay must be less than 1 second, ignored\n", 4790 pr_warn("ndelay must be less than 1 second, ignored\n");
4788 __func__);
4789 scsi_debug_ndelay = 0; 4791 scsi_debug_ndelay = 0;
4790 } else if (scsi_debug_ndelay > 0) 4792 } else if (scsi_debug_ndelay > 0)
4791 scsi_debug_delay = DELAY_OVERRIDDEN; 4793 scsi_debug_delay = DELAY_OVERRIDDEN;
@@ -4797,8 +4799,7 @@ static int __init scsi_debug_init(void)
4797 case 4096: 4799 case 4096:
4798 break; 4800 break;
4799 default: 4801 default:
4800 pr_err("%s: invalid sector_size %d\n", __func__, 4802 pr_err("invalid sector_size %d\n", scsi_debug_sector_size);
4801 scsi_debug_sector_size);
4802 return -EINVAL; 4803 return -EINVAL;
4803 } 4804 }
4804 4805
@@ -4811,29 +4812,28 @@ static int __init scsi_debug_init(void)
4811 break; 4812 break;
4812 4813
4813 default: 4814 default:
4814 pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__); 4815 pr_err("dif must be 0, 1, 2 or 3\n");
4815 return -EINVAL; 4816 return -EINVAL;
4816 } 4817 }
4817 4818
4818 if (scsi_debug_guard > 1) { 4819 if (scsi_debug_guard > 1) {
4819 pr_err("%s: guard must be 0 or 1\n", __func__); 4820 pr_err("guard must be 0 or 1\n");
4820 return -EINVAL; 4821 return -EINVAL;
4821 } 4822 }
4822 4823
4823 if (scsi_debug_ato > 1) { 4824 if (scsi_debug_ato > 1) {
4824 pr_err("%s: ato must be 0 or 1\n", __func__); 4825 pr_err("ato must be 0 or 1\n");
4825 return -EINVAL; 4826 return -EINVAL;
4826 } 4827 }
4827 4828
4828 if (scsi_debug_physblk_exp > 15) { 4829 if (scsi_debug_physblk_exp > 15) {
4829 pr_err("%s: invalid physblk_exp %u\n", __func__, 4830 pr_err("invalid physblk_exp %u\n", scsi_debug_physblk_exp);
4830 scsi_debug_physblk_exp);
4831 return -EINVAL; 4831 return -EINVAL;
4832 } 4832 }
4833 4833
4834 if (scsi_debug_lowest_aligned > 0x3fff) { 4834 if (scsi_debug_lowest_aligned > 0x3fff) {
4835 pr_err("%s: lowest_aligned too big: %u\n", __func__, 4835 pr_err("lowest_aligned too big: %u\n",
4836 scsi_debug_lowest_aligned); 4836 scsi_debug_lowest_aligned);
4837 return -EINVAL; 4837 return -EINVAL;
4838 } 4838 }
4839 4839
@@ -4863,7 +4863,7 @@ static int __init scsi_debug_init(void)
4863 if (0 == scsi_debug_fake_rw) { 4863 if (0 == scsi_debug_fake_rw) {
4864 fake_storep = vmalloc(sz); 4864 fake_storep = vmalloc(sz);
4865 if (NULL == fake_storep) { 4865 if (NULL == fake_storep) {
4866 pr_err("%s: out of memory, 1\n", __func__); 4866 pr_err("out of memory, 1\n");
4867 return -ENOMEM; 4867 return -ENOMEM;
4868 } 4868 }
4869 memset(fake_storep, 0, sz); 4869 memset(fake_storep, 0, sz);
@@ -4877,11 +4877,10 @@ static int __init scsi_debug_init(void)
4877 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple); 4877 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
4878 dif_storep = vmalloc(dif_size); 4878 dif_storep = vmalloc(dif_size);
4879 4879
4880 pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size, 4880 pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
4881 dif_storep);
4882 4881
4883 if (dif_storep == NULL) { 4882 if (dif_storep == NULL) {
4884 pr_err("%s: out of mem. (DIX)\n", __func__); 4883 pr_err("out of mem. (DIX)\n");
4885 ret = -ENOMEM; 4884 ret = -ENOMEM;
4886 goto free_vm; 4885 goto free_vm;
4887 } 4886 }
@@ -4903,18 +4902,17 @@ static int __init scsi_debug_init(void)
4903 if (scsi_debug_unmap_alignment && 4902 if (scsi_debug_unmap_alignment &&
4904 scsi_debug_unmap_granularity <= 4903 scsi_debug_unmap_granularity <=
4905 scsi_debug_unmap_alignment) { 4904 scsi_debug_unmap_alignment) {
4906 pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n", 4905 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
4907 __func__);
4908 return -EINVAL; 4906 return -EINVAL;
4909 } 4907 }
4910 4908
4911 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1; 4909 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
4912 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long)); 4910 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
4913 4911
4914 pr_info("%s: %lu provisioning blocks\n", __func__, map_size); 4912 pr_info("%lu provisioning blocks\n", map_size);
4915 4913
4916 if (map_storep == NULL) { 4914 if (map_storep == NULL) {
4917 pr_err("%s: out of mem. (MAP)\n", __func__); 4915 pr_err("out of mem. (MAP)\n");
4918 ret = -ENOMEM; 4916 ret = -ENOMEM;
4919 goto free_vm; 4917 goto free_vm;
4920 } 4918 }
@@ -4928,18 +4926,18 @@ static int __init scsi_debug_init(void)
4928 4926
4929 pseudo_primary = root_device_register("pseudo_0"); 4927 pseudo_primary = root_device_register("pseudo_0");
4930 if (IS_ERR(pseudo_primary)) { 4928 if (IS_ERR(pseudo_primary)) {
4931 pr_warn("%s: root_device_register() error\n", __func__); 4929 pr_warn("root_device_register() error\n");
4932 ret = PTR_ERR(pseudo_primary); 4930 ret = PTR_ERR(pseudo_primary);
4933 goto free_vm; 4931 goto free_vm;
4934 } 4932 }
4935 ret = bus_register(&pseudo_lld_bus); 4933 ret = bus_register(&pseudo_lld_bus);
4936 if (ret < 0) { 4934 if (ret < 0) {
4937 pr_warn("%s: bus_register error: %d\n", __func__, ret); 4935 pr_warn("bus_register error: %d\n", ret);
4938 goto dev_unreg; 4936 goto dev_unreg;
4939 } 4937 }
4940 ret = driver_register(&sdebug_driverfs_driver); 4938 ret = driver_register(&sdebug_driverfs_driver);
4941 if (ret < 0) { 4939 if (ret < 0) {
4942 pr_warn("%s: driver_register error: %d\n", __func__, ret); 4940 pr_warn("driver_register error: %d\n", ret);
4943 goto bus_unreg; 4941 goto bus_unreg;
4944 } 4942 }
4945 4943
@@ -4948,16 +4946,14 @@ static int __init scsi_debug_init(void)
4948 4946
4949 for (k = 0; k < host_to_add; k++) { 4947 for (k = 0; k < host_to_add; k++) {
4950 if (sdebug_add_adapter()) { 4948 if (sdebug_add_adapter()) {
4951 pr_err("%s: sdebug_add_adapter failed k=%d\n", 4949 pr_err("sdebug_add_adapter failed k=%d\n", k);
4952 __func__, k);
4953 break; 4950 break;
4954 } 4951 }
4955 } 4952 }
4956 4953
4957 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) { 4954 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4958 pr_info("%s: built %d host(s)\n", __func__, 4955 pr_info("built %d host(s)\n", scsi_debug_add_host);
4959 scsi_debug_add_host); 4956
4960 }
4961 return 0; 4957 return 0;
4962 4958
4963bus_unreg: 4959bus_unreg:
@@ -4965,10 +4961,8 @@ bus_unreg:
4965dev_unreg: 4961dev_unreg:
4966 root_device_unregister(pseudo_primary); 4962 root_device_unregister(pseudo_primary);
4967free_vm: 4963free_vm:
4968 if (map_storep) 4964 vfree(map_storep);
4969 vfree(map_storep); 4965 vfree(dif_storep);
4970 if (dif_storep)
4971 vfree(dif_storep);
4972 vfree(fake_storep); 4966 vfree(fake_storep);
4973 4967
4974 return ret; 4968 return ret;
@@ -4986,9 +4980,7 @@ static void __exit scsi_debug_exit(void)
4986 bus_unregister(&pseudo_lld_bus); 4980 bus_unregister(&pseudo_lld_bus);
4987 root_device_unregister(pseudo_primary); 4981 root_device_unregister(pseudo_primary);
4988 4982
4989 if (dif_storep) 4983 vfree(dif_storep);
4990 vfree(dif_storep);
4991
4992 vfree(fake_storep); 4984 vfree(fake_storep);
4993} 4985}
4994 4986
@@ -5012,8 +5004,7 @@ static int sdebug_add_adapter(void)
5012 5004
5013 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL); 5005 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
5014 if (NULL == sdbg_host) { 5006 if (NULL == sdbg_host) {
5015 printk(KERN_ERR "%s: out of memory at line %d\n", 5007 pr_err("out of memory at line %d\n", __LINE__);
5016 __func__, __LINE__);
5017 return -ENOMEM; 5008 return -ENOMEM;
5018 } 5009 }
5019 5010
@@ -5023,8 +5014,7 @@ static int sdebug_add_adapter(void)
5023 for (k = 0; k < devs_per_host; k++) { 5014 for (k = 0; k < devs_per_host; k++) {
5024 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL); 5015 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5025 if (!sdbg_devinfo) { 5016 if (!sdbg_devinfo) {
5026 printk(KERN_ERR "%s: out of memory at line %d\n", 5017 pr_err("out of memory at line %d\n", __LINE__);
5027 __func__, __LINE__);
5028 error = -ENOMEM; 5018 error = -ENOMEM;
5029 goto clean; 5019 goto clean;
5030 } 5020 }
@@ -5178,7 +5168,7 @@ scsi_debug_queuecommand(struct scsi_cmnd *scp)
5178 } 5168 }
5179 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b); 5169 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
5180 } 5170 }
5181 has_wlun_rl = (sdp->lun == SAM2_WLUN_REPORT_LUNS); 5171 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5182 if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl) 5172 if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl)
5183 return schedule_resp(scp, NULL, errsts_no_connect, 0); 5173 return schedule_resp(scp, NULL, errsts_no_connect, 0);
5184 5174
@@ -5338,7 +5328,7 @@ static int sdebug_driver_probe(struct device * dev)
5338 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING; 5328 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5339 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); 5329 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5340 if (NULL == hpnt) { 5330 if (NULL == hpnt) {
5341 pr_err("%s: scsi_host_alloc failed\n", __func__); 5331 pr_err("scsi_host_alloc failed\n");
5342 error = -ENODEV; 5332 error = -ENODEV;
5343 return error; 5333 return error;
5344 } 5334 }
@@ -5349,7 +5339,8 @@ static int sdebug_driver_probe(struct device * dev)
5349 hpnt->max_id = scsi_debug_num_tgts + 1; 5339 hpnt->max_id = scsi_debug_num_tgts + 1;
5350 else 5340 else
5351 hpnt->max_id = scsi_debug_num_tgts; 5341 hpnt->max_id = scsi_debug_num_tgts;
5352 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */ 5342 /* = scsi_debug_max_luns; */
5343 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5353 5344
5354 host_prot = 0; 5345 host_prot = 0;
5355 5346
@@ -5381,7 +5372,7 @@ static int sdebug_driver_probe(struct device * dev)
5381 5372
5382 scsi_host_set_prot(hpnt, host_prot); 5373 scsi_host_set_prot(hpnt, host_prot);
5383 5374
5384 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n", 5375 pr_info("host protection%s%s%s%s%s%s%s\n",
5385 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 5376 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5386 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 5377 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5387 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", 5378 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
@@ -5409,7 +5400,7 @@ static int sdebug_driver_probe(struct device * dev)
5409 5400
5410 error = scsi_add_host(hpnt, &sdbg_host->dev); 5401 error = scsi_add_host(hpnt, &sdbg_host->dev);
5411 if (error) { 5402 if (error) {
5412 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__); 5403 pr_err("scsi_add_host failed\n");
5413 error = -ENODEV; 5404 error = -ENODEV;
5414 scsi_host_put(hpnt); 5405 scsi_host_put(hpnt);
5415 } else 5406 } else
@@ -5426,8 +5417,7 @@ static int sdebug_driver_remove(struct device * dev)
5426 sdbg_host = to_sdebug_host(dev); 5417 sdbg_host = to_sdebug_host(dev);
5427 5418
5428 if (!sdbg_host) { 5419 if (!sdbg_host) {
5429 printk(KERN_ERR "%s: Unable to locate host info\n", 5420 pr_err("Unable to locate host info\n");
5430 __func__);
5431 return -ENODEV; 5421 return -ENODEV;
5432 } 5422 }
5433 5423
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
new file mode 100644
index 000000000000..edb044a7b56d
--- /dev/null
+++ b/drivers/scsi/scsi_dh.c
@@ -0,0 +1,437 @@
1/*
2 * SCSI device handler infrastruture.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2007
19 * Authors:
20 * Chandra Seetharaman <sekharan@us.ibm.com>
21 * Mike Anderson <andmike@linux.vnet.ibm.com>
22 */
23
24#include <linux/slab.h>
25#include <linux/module.h>
26#include <scsi/scsi_dh.h>
27#include "scsi_priv.h"
28
29static DEFINE_SPINLOCK(list_lock);
30static LIST_HEAD(scsi_dh_list);
31
32struct scsi_dh_blist {
33 const char *vendor;
34 const char *model;
35 const char *driver;
36};
37
38static const struct scsi_dh_blist scsi_dh_blist[] = {
39 {"DGC", "RAID", "clariion" },
40 {"DGC", "DISK", "clariion" },
41 {"DGC", "VRAID", "clariion" },
42
43 {"COMPAQ", "MSA1000 VOLUME", "hp_sw" },
44 {"COMPAQ", "HSV110", "hp_sw" },
45 {"HP", "HSV100", "hp_sw"},
46 {"DEC", "HSG80", "hp_sw"},
47
48 {"IBM", "1722", "rdac", },
49 {"IBM", "1724", "rdac", },
50 {"IBM", "1726", "rdac", },
51 {"IBM", "1742", "rdac", },
52 {"IBM", "1745", "rdac", },
53 {"IBM", "1746", "rdac", },
54 {"IBM", "1813", "rdac", },
55 {"IBM", "1814", "rdac", },
56 {"IBM", "1815", "rdac", },
57 {"IBM", "1818", "rdac", },
58 {"IBM", "3526", "rdac", },
59 {"SGI", "TP9", "rdac", },
60 {"SGI", "IS", "rdac", },
61 {"STK", "OPENstorage D280", "rdac", },
62 {"STK", "FLEXLINE 380", "rdac", },
63 {"SUN", "CSM", "rdac", },
64 {"SUN", "LCSM100", "rdac", },
65 {"SUN", "STK6580_6780", "rdac", },
66 {"SUN", "SUN_6180", "rdac", },
67 {"SUN", "ArrayStorage", "rdac", },
68 {"DELL", "MD3", "rdac", },
69 {"NETAPP", "INF-01-00", "rdac", },
70 {"LSI", "INF-01-00", "rdac", },
71 {"ENGENIO", "INF-01-00", "rdac", },
72 {NULL, NULL, NULL },
73};
74
75static const char *
76scsi_dh_find_driver(struct scsi_device *sdev)
77{
78 const struct scsi_dh_blist *b;
79
80 if (scsi_device_tpgs(sdev))
81 return "alua";
82
83 for (b = scsi_dh_blist; b->vendor; b++) {
84 if (!strncmp(sdev->vendor, b->vendor, strlen(b->vendor)) &&
85 !strncmp(sdev->model, b->model, strlen(b->model))) {
86 return b->driver;
87 }
88 }
89 return NULL;
90}
91
92
93static struct scsi_device_handler *__scsi_dh_lookup(const char *name)
94{
95 struct scsi_device_handler *tmp, *found = NULL;
96
97 spin_lock(&list_lock);
98 list_for_each_entry(tmp, &scsi_dh_list, list) {
99 if (!strncmp(tmp->name, name, strlen(tmp->name))) {
100 found = tmp;
101 break;
102 }
103 }
104 spin_unlock(&list_lock);
105 return found;
106}
107
108static struct scsi_device_handler *scsi_dh_lookup(const char *name)
109{
110 struct scsi_device_handler *dh;
111
112 dh = __scsi_dh_lookup(name);
113 if (!dh) {
114 request_module(name);
115 dh = __scsi_dh_lookup(name);
116 }
117
118 return dh;
119}
120
121/*
122 * scsi_dh_handler_attach - Attach a device handler to a device
123 * @sdev - SCSI device the device handler should attach to
124 * @scsi_dh - The device handler to attach
125 */
126static int scsi_dh_handler_attach(struct scsi_device *sdev,
127 struct scsi_device_handler *scsi_dh)
128{
129 int error;
130
131 if (!try_module_get(scsi_dh->module))
132 return -EINVAL;
133
134 error = scsi_dh->attach(sdev);
135 if (error) {
136 sdev_printk(KERN_ERR, sdev, "%s: Attach failed (%d)\n",
137 scsi_dh->name, error);
138 module_put(scsi_dh->module);
139 } else
140 sdev->handler = scsi_dh;
141
142 return error;
143}
144
145/*
146 * scsi_dh_handler_detach - Detach a device handler from a device
147 * @sdev - SCSI device the device handler should be detached from
148 */
149static void scsi_dh_handler_detach(struct scsi_device *sdev)
150{
151 sdev->handler->detach(sdev);
152 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", sdev->handler->name);
153 module_put(sdev->handler->module);
154}
155
156/*
157 * Functions for sysfs attribute 'dh_state'
158 */
159static ssize_t
160store_dh_state(struct device *dev, struct device_attribute *attr,
161 const char *buf, size_t count)
162{
163 struct scsi_device *sdev = to_scsi_device(dev);
164 struct scsi_device_handler *scsi_dh;
165 int err = -EINVAL;
166
167 if (sdev->sdev_state == SDEV_CANCEL ||
168 sdev->sdev_state == SDEV_DEL)
169 return -ENODEV;
170
171 if (!sdev->handler) {
172 /*
173 * Attach to a device handler
174 */
175 scsi_dh = scsi_dh_lookup(buf);
176 if (!scsi_dh)
177 return err;
178 err = scsi_dh_handler_attach(sdev, scsi_dh);
179 } else {
180 if (!strncmp(buf, "detach", 6)) {
181 /*
182 * Detach from a device handler
183 */
184 sdev_printk(KERN_WARNING, sdev,
185 "can't detach handler %s.\n",
186 sdev->handler->name);
187 err = -EINVAL;
188 } else if (!strncmp(buf, "activate", 8)) {
189 /*
190 * Activate a device handler
191 */
192 if (sdev->handler->activate)
193 err = sdev->handler->activate(sdev, NULL, NULL);
194 else
195 err = 0;
196 }
197 }
198
199 return err<0?err:count;
200}
201
202static ssize_t
203show_dh_state(struct device *dev, struct device_attribute *attr, char *buf)
204{
205 struct scsi_device *sdev = to_scsi_device(dev);
206
207 if (!sdev->handler)
208 return snprintf(buf, 20, "detached\n");
209
210 return snprintf(buf, 20, "%s\n", sdev->handler->name);
211}
212
213static struct device_attribute scsi_dh_state_attr =
214 __ATTR(dh_state, S_IRUGO | S_IWUSR, show_dh_state,
215 store_dh_state);
216
217int scsi_dh_add_device(struct scsi_device *sdev)
218{
219 struct scsi_device_handler *devinfo = NULL;
220 const char *drv;
221 int err;
222
223 err = device_create_file(&sdev->sdev_gendev, &scsi_dh_state_attr);
224 if (err)
225 return err;
226
227 drv = scsi_dh_find_driver(sdev);
228 if (drv)
229 devinfo = scsi_dh_lookup(drv);
230 if (devinfo)
231 err = scsi_dh_handler_attach(sdev, devinfo);
232 return err;
233}
234
235void scsi_dh_remove_device(struct scsi_device *sdev)
236{
237 if (sdev->handler)
238 scsi_dh_handler_detach(sdev);
239 device_remove_file(&sdev->sdev_gendev, &scsi_dh_state_attr);
240}
241
242/*
243 * scsi_register_device_handler - register a device handler personality
244 * module.
245 * @scsi_dh - device handler to be registered.
246 *
247 * Returns 0 on success, -EBUSY if handler already registered.
248 */
249int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
250{
251 if (__scsi_dh_lookup(scsi_dh->name))
252 return -EBUSY;
253
254 if (!scsi_dh->attach || !scsi_dh->detach)
255 return -EINVAL;
256
257 spin_lock(&list_lock);
258 list_add(&scsi_dh->list, &scsi_dh_list);
259 spin_unlock(&list_lock);
260
261 printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
262
263 return SCSI_DH_OK;
264}
265EXPORT_SYMBOL_GPL(scsi_register_device_handler);
266
267/*
268 * scsi_unregister_device_handler - register a device handler personality
269 * module.
270 * @scsi_dh - device handler to be unregistered.
271 *
272 * Returns 0 on success, -ENODEV if handler not registered.
273 */
274int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
275{
276 if (!__scsi_dh_lookup(scsi_dh->name))
277 return -ENODEV;
278
279 spin_lock(&list_lock);
280 list_del(&scsi_dh->list);
281 spin_unlock(&list_lock);
282 printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name);
283
284 return SCSI_DH_OK;
285}
286EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
287
288static struct scsi_device *get_sdev_from_queue(struct request_queue *q)
289{
290 struct scsi_device *sdev;
291 unsigned long flags;
292
293 spin_lock_irqsave(q->queue_lock, flags);
294 sdev = q->queuedata;
295 if (!sdev || !get_device(&sdev->sdev_gendev))
296 sdev = NULL;
297 spin_unlock_irqrestore(q->queue_lock, flags);
298
299 return sdev;
300}
301
302/*
303 * scsi_dh_activate - activate the path associated with the scsi_device
304 * corresponding to the given request queue.
305 * Returns immediately without waiting for activation to be completed.
306 * @q - Request queue that is associated with the scsi_device to be
307 * activated.
308 * @fn - Function to be called upon completion of the activation.
309 * Function fn is called with data (below) and the error code.
310 * Function fn may be called from the same calling context. So,
311 * do not hold the lock in the caller which may be needed in fn.
312 * @data - data passed to the function fn upon completion.
313 *
314 */
315int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
316{
317 struct scsi_device *sdev;
318 int err = SCSI_DH_NOSYS;
319
320 sdev = get_sdev_from_queue(q);
321 if (!sdev) {
322 if (fn)
323 fn(data, err);
324 return err;
325 }
326
327 if (!sdev->handler)
328 goto out_fn;
329 err = SCSI_DH_NOTCONN;
330 if (sdev->sdev_state == SDEV_CANCEL ||
331 sdev->sdev_state == SDEV_DEL)
332 goto out_fn;
333
334 err = SCSI_DH_DEV_OFFLINED;
335 if (sdev->sdev_state == SDEV_OFFLINE)
336 goto out_fn;
337
338 if (sdev->handler->activate)
339 err = sdev->handler->activate(sdev, fn, data);
340
341out_put_device:
342 put_device(&sdev->sdev_gendev);
343 return err;
344
345out_fn:
346 if (fn)
347 fn(data, err);
348 goto out_put_device;
349}
350EXPORT_SYMBOL_GPL(scsi_dh_activate);
351
352/*
353 * scsi_dh_set_params - set the parameters for the device as per the
354 * string specified in params.
355 * @q - Request queue that is associated with the scsi_device for
356 * which the parameters to be set.
357 * @params - parameters in the following format
358 * "no_of_params\0param1\0param2\0param3\0...\0"
359 * for example, string for 2 parameters with value 10 and 21
360 * is specified as "2\010\021\0".
361 */
362int scsi_dh_set_params(struct request_queue *q, const char *params)
363{
364 struct scsi_device *sdev;
365 int err = -SCSI_DH_NOSYS;
366
367 sdev = get_sdev_from_queue(q);
368 if (!sdev)
369 return err;
370
371 if (sdev->handler && sdev->handler->set_params)
372 err = sdev->handler->set_params(sdev, params);
373 put_device(&sdev->sdev_gendev);
374 return err;
375}
376EXPORT_SYMBOL_GPL(scsi_dh_set_params);
377
378/*
379 * scsi_dh_attach - Attach device handler
380 * @q - Request queue that is associated with the scsi_device
381 * the handler should be attached to
382 * @name - name of the handler to attach
383 */
384int scsi_dh_attach(struct request_queue *q, const char *name)
385{
386 struct scsi_device *sdev;
387 struct scsi_device_handler *scsi_dh;
388 int err = 0;
389
390 sdev = get_sdev_from_queue(q);
391 if (!sdev)
392 return -ENODEV;
393
394 scsi_dh = scsi_dh_lookup(name);
395 if (!scsi_dh) {
396 err = -EINVAL;
397 goto out_put_device;
398 }
399
400 if (sdev->handler) {
401 if (sdev->handler != scsi_dh)
402 err = -EBUSY;
403 goto out_put_device;
404 }
405
406 err = scsi_dh_handler_attach(sdev, scsi_dh);
407
408out_put_device:
409 put_device(&sdev->sdev_gendev);
410 return err;
411}
412EXPORT_SYMBOL_GPL(scsi_dh_attach);
413
414/*
415 * scsi_dh_attached_handler_name - Get attached device handler's name
416 * @q - Request queue that is associated with the scsi_device
417 * that may have a device handler attached
418 * @gfp - the GFP mask used in the kmalloc() call when allocating memory
419 *
420 * Returns name of attached handler, NULL if no handler is attached.
421 * Caller must take care to free the returned string.
422 */
423const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
424{
425 struct scsi_device *sdev;
426 const char *handler_name = NULL;
427
428 sdev = get_sdev_from_queue(q);
429 if (!sdev)
430 return NULL;
431
432 if (sdev->handler)
433 handler_name = kstrdup(sdev->handler->name, gfp);
434 put_device(&sdev->sdev_gendev);
435 return handler_name;
436}
437EXPORT_SYMBOL_GPL(scsi_dh_attached_handler_name);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index afd34a608fe7..66a96cd98b97 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -33,9 +33,11 @@
33#include <scsi/scsi_device.h> 33#include <scsi/scsi_device.h>
34#include <scsi/scsi_driver.h> 34#include <scsi/scsi_driver.h>
35#include <scsi/scsi_eh.h> 35#include <scsi/scsi_eh.h>
36#include <scsi/scsi_common.h>
36#include <scsi/scsi_transport.h> 37#include <scsi/scsi_transport.h>
37#include <scsi/scsi_host.h> 38#include <scsi/scsi_host.h>
38#include <scsi/scsi_ioctl.h> 39#include <scsi/scsi_ioctl.h>
40#include <scsi/scsi_dh.h>
39#include <scsi/sg.h> 41#include <scsi/sg.h>
40 42
41#include "scsi_priv.h" 43#include "scsi_priv.h"
@@ -463,11 +465,10 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
463 if (scsi_sense_is_deferred(&sshdr)) 465 if (scsi_sense_is_deferred(&sshdr))
464 return NEEDS_RETRY; 466 return NEEDS_RETRY;
465 467
466 if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh && 468 if (sdev->handler && sdev->handler->check_sense) {
467 sdev->scsi_dh_data->scsi_dh->check_sense) {
468 int rc; 469 int rc;
469 470
470 rc = sdev->scsi_dh_data->scsi_dh->check_sense(sdev, &sshdr); 471 rc = sdev->handler->check_sense(sdev, &sshdr);
471 if (rc != SCSI_RETURN_NOT_HANDLED) 472 if (rc != SCSI_RETURN_NOT_HANDLED)
472 return rc; 473 return rc;
473 /* handler does not care. Drop down to default handling */ 474 /* handler does not care. Drop down to default handling */
@@ -2178,8 +2179,17 @@ int scsi_error_handler(void *data)
2178 * We never actually get interrupted because kthread_run 2179 * We never actually get interrupted because kthread_run
2179 * disables signal delivery for the created thread. 2180 * disables signal delivery for the created thread.
2180 */ 2181 */
2181 while (!kthread_should_stop()) { 2182 while (true) {
2183 /*
2184 * The sequence in kthread_stop() sets the stop flag first
2185 * then wakes the process. To avoid missed wakeups, the task
2186 * should always be in a non running state before the stop
2187 * flag is checked
2188 */
2182 set_current_state(TASK_INTERRUPTIBLE); 2189 set_current_state(TASK_INTERRUPTIBLE);
2190 if (kthread_should_stop())
2191 break;
2192
2183 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) || 2193 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
2184 shost->host_failed != atomic_read(&shost->host_busy)) { 2194 shost->host_failed != atomic_read(&shost->host_busy)) {
2185 SCSI_LOG_ERROR_RECOVERY(1, 2195 SCSI_LOG_ERROR_RECOVERY(1,
@@ -2416,45 +2426,6 @@ bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
2416EXPORT_SYMBOL(scsi_command_normalize_sense); 2426EXPORT_SYMBOL(scsi_command_normalize_sense);
2417 2427
2418/** 2428/**
2419 * scsi_sense_desc_find - search for a given descriptor type in descriptor sense data format.
2420 * @sense_buffer: byte array of descriptor format sense data
2421 * @sb_len: number of valid bytes in sense_buffer
2422 * @desc_type: value of descriptor type to find
2423 * (e.g. 0 -> information)
2424 *
2425 * Notes:
2426 * only valid when sense data is in descriptor format
2427 *
2428 * Return value:
2429 * pointer to start of (first) descriptor if found else NULL
2430 */
2431const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
2432 int desc_type)
2433{
2434 int add_sen_len, add_len, desc_len, k;
2435 const u8 * descp;
2436
2437 if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7])))
2438 return NULL;
2439 if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73))
2440 return NULL;
2441 add_sen_len = (add_sen_len < (sb_len - 8)) ?
2442 add_sen_len : (sb_len - 8);
2443 descp = &sense_buffer[8];
2444 for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) {
2445 descp += desc_len;
2446 add_len = (k < (add_sen_len - 1)) ? descp[1]: -1;
2447 desc_len = add_len + 2;
2448 if (descp[0] == desc_type)
2449 return descp;
2450 if (add_len < 0) // short descriptor ??
2451 break;
2452 }
2453 return NULL;
2454}
2455EXPORT_SYMBOL(scsi_sense_desc_find);
2456
2457/**
2458 * scsi_get_sense_info_fld - get information field from sense data (either fixed or descriptor format) 2429 * scsi_get_sense_info_fld - get information field from sense data (either fixed or descriptor format)
2459 * @sense_buffer: byte array of sense data 2430 * @sense_buffer: byte array of sense data
2460 * @sb_len: number of valid bytes in sense_buffer 2431 * @sb_len: number of valid bytes in sense_buffer
@@ -2503,31 +2474,3 @@ int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
2503 } 2474 }
2504} 2475}
2505EXPORT_SYMBOL(scsi_get_sense_info_fld); 2476EXPORT_SYMBOL(scsi_get_sense_info_fld);
2506
2507/**
2508 * scsi_build_sense_buffer - build sense data in a buffer
2509 * @desc: Sense format (non zero == descriptor format,
2510 * 0 == fixed format)
2511 * @buf: Where to build sense data
2512 * @key: Sense key
2513 * @asc: Additional sense code
2514 * @ascq: Additional sense code qualifier
2515 *
2516 **/
2517void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
2518{
2519 if (desc) {
2520 buf[0] = 0x72; /* descriptor, current */
2521 buf[1] = key;
2522 buf[2] = asc;
2523 buf[3] = ascq;
2524 buf[7] = 0;
2525 } else {
2526 buf[0] = 0x70; /* fixed, current */
2527 buf[2] = key;
2528 buf[7] = 0xa;
2529 buf[12] = asc;
2530 buf[13] = ascq;
2531 }
2532}
2533EXPORT_SYMBOL(scsi_build_sense_buffer);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 882864f5cbae..cbfc5990052b 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -31,6 +31,7 @@
31#include <scsi/scsi_driver.h> 31#include <scsi/scsi_driver.h>
32#include <scsi/scsi_eh.h> 32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34#include <scsi/scsi_dh.h>
34 35
35#include <trace/events/scsi.h> 36#include <trace/events/scsi.h>
36 37
@@ -1248,9 +1249,8 @@ static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1248{ 1249{
1249 struct scsi_cmnd *cmd = req->special; 1250 struct scsi_cmnd *cmd = req->special;
1250 1251
1251 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh 1252 if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
1252 && sdev->scsi_dh_data->scsi_dh->prep_fn)) { 1253 int ret = sdev->handler->prep_fn(sdev, req);
1253 int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1254 if (ret != BLKPREP_OK) 1254 if (ret != BLKPREP_OK)
1255 return ret; 1255 return ret;
1256 } 1256 }
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index e3902fc66278..644bb7339b55 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -170,6 +170,15 @@ static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
170extern struct async_domain scsi_sd_pm_domain; 170extern struct async_domain scsi_sd_pm_domain;
171extern struct async_domain scsi_sd_probe_domain; 171extern struct async_domain scsi_sd_probe_domain;
172 172
173/* scsi_dh.c */
174#ifdef CONFIG_SCSI_DH
175int scsi_dh_add_device(struct scsi_device *sdev);
176void scsi_dh_remove_device(struct scsi_device *sdev);
177#else
178static inline int scsi_dh_add_device(struct scsi_device *sdev) { return 0; }
179static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
180#endif
181
173/* 182/*
174 * internal scsi timeout functions: for use by mid-layer and transport 183 * internal scsi timeout functions: for use by mid-layer and transport
175 * classes. 184 * classes.
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 9ad41168d26d..b333389f248f 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1030,11 +1030,20 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
1030 "failed to add device: %d\n", error); 1030 "failed to add device: %d\n", error);
1031 return error; 1031 return error;
1032 } 1032 }
1033
1034 error = scsi_dh_add_device(sdev);
1035 if (error) {
1036 sdev_printk(KERN_INFO, sdev,
1037 "failed to add device handler: %d\n", error);
1038 return error;
1039 }
1040
1033 device_enable_async_suspend(&sdev->sdev_dev); 1041 device_enable_async_suspend(&sdev->sdev_dev);
1034 error = device_add(&sdev->sdev_dev); 1042 error = device_add(&sdev->sdev_dev);
1035 if (error) { 1043 if (error) {
1036 sdev_printk(KERN_INFO, sdev, 1044 sdev_printk(KERN_INFO, sdev,
1037 "failed to add class device: %d\n", error); 1045 "failed to add class device: %d\n", error);
1046 scsi_dh_remove_device(sdev);
1038 device_del(&sdev->sdev_gendev); 1047 device_del(&sdev->sdev_gendev);
1039 return error; 1048 return error;
1040 } 1049 }
@@ -1074,6 +1083,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
1074 bsg_unregister_queue(sdev->request_queue); 1083 bsg_unregister_queue(sdev->request_queue);
1075 device_unregister(&sdev->sdev_dev); 1084 device_unregister(&sdev->sdev_dev);
1076 transport_remove_device(dev); 1085 transport_remove_device(dev);
1086 scsi_dh_remove_device(sdev);
1077 device_del(dev); 1087 device_del(dev);
1078 } else 1088 } else
1079 put_device(&sdev->sdev_dev); 1089 put_device(&sdev->sdev_dev);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 9a058194b9bd..30d26e345dcc 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1222,13 +1222,6 @@ show_sas_rphy_enclosure_identifier(struct device *dev,
1222 u64 identifier; 1222 u64 identifier;
1223 int error; 1223 int error;
1224 1224
1225 /*
1226 * Only devices behind an expander are supported, because the
1227 * enclosure identifier is a SMP feature.
1228 */
1229 if (scsi_is_sas_phy_local(phy))
1230 return -EINVAL;
1231
1232 error = i->f->get_enclosure_identifier(rphy, &identifier); 1225 error = i->f->get_enclosure_identifier(rphy, &identifier);
1233 if (error) 1226 if (error)
1234 return error; 1227 return error;
@@ -1248,9 +1241,6 @@ show_sas_rphy_bay_identifier(struct device *dev,
1248 struct sas_internal *i = to_sas_internal(shost->transportt); 1241 struct sas_internal *i = to_sas_internal(shost->transportt);
1249 int val; 1242 int val;
1250 1243
1251 if (scsi_is_sas_phy_local(phy))
1252 return -EINVAL;
1253
1254 val = i->f->get_bay_identifier(rphy); 1244 val = i->f->get_bay_identifier(rphy);
1255 if (val < 0) 1245 if (val < 0)
1256 return val; 1246 return val;
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 043419dcee92..8e72bcbd3d6d 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -65,7 +65,7 @@ void intc_set_prio_level(unsigned int irq, unsigned int level)
65 raw_spin_unlock_irqrestore(&intc_big_lock, flags); 65 raw_spin_unlock_irqrestore(&intc_big_lock, flags);
66} 66}
67 67
68static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) 68static void intc_redirect_irq(struct irq_desc *desc)
69{ 69{
70 generic_handle_irq((unsigned int)irq_desc_get_handler_data(desc)); 70 generic_handle_irq((unsigned int)irq_desc_get_handler_data(desc));
71} 71}
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h
index 7dff08e2a071..6ce7f0d26dcf 100644
--- a/drivers/sh/intc/internals.h
+++ b/drivers/sh/intc/internals.h
@@ -99,15 +99,7 @@ static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
99 */ 99 */
100static inline void activate_irq(int irq) 100static inline void activate_irq(int irq)
101{ 101{
102#ifdef CONFIG_ARM 102 irq_modify_status(irq, IRQ_NOREQUEST, IRQ_NOPROBE);
103 /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
104 * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
105 */
106 set_irq_flags(irq, IRQF_VALID);
107#else
108 /* same effect on other architectures */
109 irq_set_noprobe(irq);
110#endif
111} 103}
112 104
113static inline int intc_handle_int_cmp(const void *a, const void *b) 105static inline int intc_handle_int_cmp(const void *a, const void *b)
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c
index bafc51c6f0ba..e7899624aa0b 100644
--- a/drivers/sh/intc/virq.c
+++ b/drivers/sh/intc/virq.c
@@ -109,7 +109,7 @@ static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
109 return 0; 109 return 0;
110} 110}
111 111
112static void intc_virq_handler(unsigned int __irq, struct irq_desc *desc) 112static void intc_virq_handler(struct irq_desc *desc)
113{ 113{
114 unsigned int irq = irq_desc_get_irq(desc); 114 unsigned int irq = irq_desc_get_irq(desc);
115 struct irq_data *data = irq_desc_get_irq_data(desc); 115 struct irq_data *data = irq_desc_get_irq_data(desc);
@@ -127,7 +127,7 @@ static void intc_virq_handler(unsigned int __irq, struct irq_desc *desc)
127 handle = (unsigned long)irq_desc_get_handler_data(vdesc); 127 handle = (unsigned long)irq_desc_get_handler_data(vdesc);
128 addr = INTC_REG(d, _INTC_ADDR_E(handle), 0); 128 addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
129 if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0)) 129 if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
130 generic_handle_irq_desc(entry->irq, vdesc); 130 generic_handle_irq_desc(vdesc);
131 } 131 }
132 } 132 }
133 133
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index d3d1891cda3c..25abd4eb7d10 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -35,20 +35,11 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
35static int __init sh_pm_runtime_init(void) 35static int __init sh_pm_runtime_init(void)
36{ 36{
37 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { 37 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
38 if (!of_machine_is_compatible("renesas,emev2") && 38 if (!of_find_compatible_node(NULL, NULL,
39 !of_machine_is_compatible("renesas,r7s72100") && 39 "renesas,cpg-mstp-clocks"))
40#ifndef CONFIG_PM_GENERIC_DOMAINS_OF 40 return 0;
41 !of_machine_is_compatible("renesas,r8a73a4") && 41 if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS_OF) &&
42 !of_machine_is_compatible("renesas,r8a7740") && 42 of_find_node_with_property(NULL, "#power-domain-cells"))
43 !of_machine_is_compatible("renesas,sh73a0") &&
44#endif
45 !of_machine_is_compatible("renesas,r8a7778") &&
46 !of_machine_is_compatible("renesas,r8a7779") &&
47 !of_machine_is_compatible("renesas,r8a7790") &&
48 !of_machine_is_compatible("renesas,r8a7791") &&
49 !of_machine_is_compatible("renesas,r8a7792") &&
50 !of_machine_is_compatible("renesas,r8a7793") &&
51 !of_machine_is_compatible("renesas,r8a7794"))
52 return 0; 43 return 0;
53 } 44 }
54 45
diff --git a/drivers/soc/dove/pmu.c b/drivers/soc/dove/pmu.c
index 6792aae9e2e5..052aecf29893 100644
--- a/drivers/soc/dove/pmu.c
+++ b/drivers/soc/dove/pmu.c
@@ -222,9 +222,9 @@ static void __pmu_domain_register(struct pmu_domain *domain,
222} 222}
223 223
224/* PMU IRQ controller */ 224/* PMU IRQ controller */
225static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc) 225static void pmu_irq_handler(struct irq_desc *desc)
226{ 226{
227 struct pmu_data *pmu = irq_get_handler_data(irq); 227 struct pmu_data *pmu = irq_desc_get_handler_data(desc);
228 struct irq_chip_generic *gc = pmu->irq_gc; 228 struct irq_chip_generic *gc = pmu->irq_gc;
229 struct irq_domain *domain = pmu->irq_domain; 229 struct irq_domain *domain = pmu->irq_domain;
230 void __iomem *base = gc->reg_base; 230 void __iomem *base = gc->reg_base;
@@ -232,7 +232,7 @@ static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc)
232 u32 done = ~0; 232 u32 done = ~0;
233 233
234 if (stat == 0) { 234 if (stat == 0) {
235 handle_bad_irq(irq, desc); 235 handle_bad_irq(desc);
236 return; 236 return;
237 } 237 }
238 238
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index bf9ed380bb1c..63318e2afba1 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1720,6 +1720,7 @@ static int atmel_spi_runtime_resume(struct device *dev)
1720 return clk_prepare_enable(as->clk); 1720 return clk_prepare_enable(as->clk);
1721} 1721}
1722 1722
1723#ifdef CONFIG_PM_SLEEP
1723static int atmel_spi_suspend(struct device *dev) 1724static int atmel_spi_suspend(struct device *dev)
1724{ 1725{
1725 struct spi_master *master = dev_get_drvdata(dev); 1726 struct spi_master *master = dev_get_drvdata(dev);
@@ -1756,6 +1757,7 @@ static int atmel_spi_resume(struct device *dev)
1756 1757
1757 return ret; 1758 return ret;
1758} 1759}
1760#endif
1759 1761
1760static const struct dev_pm_ops atmel_spi_pm_ops = { 1762static const struct dev_pm_ops atmel_spi_pm_ops = {
1761 SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume) 1763 SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume)
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index e7874a6171ec..3e8eeb23d4e9 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -386,14 +386,14 @@ static bool bcm2835_spi_can_dma(struct spi_master *master,
386 /* otherwise we only allow transfers within the same page 386 /* otherwise we only allow transfers within the same page
387 * to avoid wasting time on dma_mapping when it is not practical 387 * to avoid wasting time on dma_mapping when it is not practical
388 */ 388 */
389 if (((size_t)tfr->tx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) { 389 if (((size_t)tfr->tx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) {
390 dev_warn_once(&spi->dev, 390 dev_warn_once(&spi->dev,
391 "Unaligned spi tx-transfer bridging page\n"); 391 "Unaligned spi tx-transfer bridging page\n");
392 return false; 392 return false;
393 } 393 }
394 if (((size_t)tfr->rx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) { 394 if (((size_t)tfr->rx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) {
395 dev_warn_once(&spi->dev, 395 dev_warn_once(&spi->dev,
396 "Unaligned spi tx-transfer bridging page\n"); 396 "Unaligned spi rx-transfer bridging page\n");
397 return false; 397 return false;
398 } 398 }
399 399
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
index 5468fc70dbf8..2465259f6241 100644
--- a/drivers/spi/spi-meson-spifc.c
+++ b/drivers/spi/spi-meson-spifc.c
@@ -444,6 +444,7 @@ static const struct of_device_id meson_spifc_dt_match[] = {
444 { .compatible = "amlogic,meson6-spifc", }, 444 { .compatible = "amlogic,meson6-spifc", },
445 { }, 445 { },
446}; 446};
447MODULE_DEVICE_TABLE(of, meson_spifc_dt_match);
447 448
448static struct platform_driver meson_spifc_driver = { 449static struct platform_driver meson_spifc_driver = {
449 .probe = meson_spifc_probe, 450 .probe = meson_spifc_probe,
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 5f6315c47920..ecb6c58238c4 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -85,7 +85,7 @@ struct mtk_spi {
85 void __iomem *base; 85 void __iomem *base;
86 u32 state; 86 u32 state;
87 u32 pad_sel; 87 u32 pad_sel;
88 struct clk *spi_clk, *parent_clk; 88 struct clk *parent_clk, *sel_clk, *spi_clk;
89 struct spi_transfer *cur_transfer; 89 struct spi_transfer *cur_transfer;
90 u32 xfer_len; 90 u32 xfer_len;
91 struct scatterlist *tx_sgl, *rx_sgl; 91 struct scatterlist *tx_sgl, *rx_sgl;
@@ -173,22 +173,6 @@ static void mtk_spi_config(struct mtk_spi *mdata,
173 writel(mdata->pad_sel, mdata->base + SPI_PAD_SEL_REG); 173 writel(mdata->pad_sel, mdata->base + SPI_PAD_SEL_REG);
174} 174}
175 175
176static int mtk_spi_prepare_hardware(struct spi_master *master)
177{
178 struct spi_transfer *trans;
179 struct mtk_spi *mdata = spi_master_get_devdata(master);
180 struct spi_message *msg = master->cur_msg;
181
182 trans = list_first_entry(&msg->transfers, struct spi_transfer,
183 transfer_list);
184 if (!trans->cs_change) {
185 mdata->state = MTK_SPI_IDLE;
186 mtk_spi_reset(mdata);
187 }
188
189 return 0;
190}
191
192static int mtk_spi_prepare_message(struct spi_master *master, 176static int mtk_spi_prepare_message(struct spi_master *master,
193 struct spi_message *msg) 177 struct spi_message *msg)
194{ 178{
@@ -228,11 +212,15 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
228 struct mtk_spi *mdata = spi_master_get_devdata(spi->master); 212 struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
229 213
230 reg_val = readl(mdata->base + SPI_CMD_REG); 214 reg_val = readl(mdata->base + SPI_CMD_REG);
231 if (!enable) 215 if (!enable) {
232 reg_val |= SPI_CMD_PAUSE_EN; 216 reg_val |= SPI_CMD_PAUSE_EN;
233 else 217 writel(reg_val, mdata->base + SPI_CMD_REG);
218 } else {
234 reg_val &= ~SPI_CMD_PAUSE_EN; 219 reg_val &= ~SPI_CMD_PAUSE_EN;
235 writel(reg_val, mdata->base + SPI_CMD_REG); 220 writel(reg_val, mdata->base + SPI_CMD_REG);
221 mdata->state = MTK_SPI_IDLE;
222 mtk_spi_reset(mdata);
223 }
236} 224}
237 225
238static void mtk_spi_prepare_transfer(struct spi_master *master, 226static void mtk_spi_prepare_transfer(struct spi_master *master,
@@ -509,7 +497,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
509 master->mode_bits = SPI_CPOL | SPI_CPHA; 497 master->mode_bits = SPI_CPOL | SPI_CPHA;
510 498
511 master->set_cs = mtk_spi_set_cs; 499 master->set_cs = mtk_spi_set_cs;
512 master->prepare_transfer_hardware = mtk_spi_prepare_hardware;
513 master->prepare_message = mtk_spi_prepare_message; 500 master->prepare_message = mtk_spi_prepare_message;
514 master->transfer_one = mtk_spi_transfer_one; 501 master->transfer_one = mtk_spi_transfer_one;
515 master->can_dma = mtk_spi_can_dma; 502 master->can_dma = mtk_spi_can_dma;
@@ -576,13 +563,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
576 goto err_put_master; 563 goto err_put_master;
577 } 564 }
578 565
579 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
580 if (IS_ERR(mdata->spi_clk)) {
581 ret = PTR_ERR(mdata->spi_clk);
582 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
583 goto err_put_master;
584 }
585
586 mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk"); 566 mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
587 if (IS_ERR(mdata->parent_clk)) { 567 if (IS_ERR(mdata->parent_clk)) {
588 ret = PTR_ERR(mdata->parent_clk); 568 ret = PTR_ERR(mdata->parent_clk);
@@ -590,13 +570,27 @@ static int mtk_spi_probe(struct platform_device *pdev)
590 goto err_put_master; 570 goto err_put_master;
591 } 571 }
592 572
573 mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk");
574 if (IS_ERR(mdata->sel_clk)) {
575 ret = PTR_ERR(mdata->sel_clk);
576 dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret);
577 goto err_put_master;
578 }
579
580 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
581 if (IS_ERR(mdata->spi_clk)) {
582 ret = PTR_ERR(mdata->spi_clk);
583 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
584 goto err_put_master;
585 }
586
593 ret = clk_prepare_enable(mdata->spi_clk); 587 ret = clk_prepare_enable(mdata->spi_clk);
594 if (ret < 0) { 588 if (ret < 0) {
595 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret); 589 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
596 goto err_put_master; 590 goto err_put_master;
597 } 591 }
598 592
599 ret = clk_set_parent(mdata->spi_clk, mdata->parent_clk); 593 ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
600 if (ret < 0) { 594 if (ret < 0) {
601 dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret); 595 dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
602 goto err_disable_clk; 596 goto err_disable_clk;
@@ -630,7 +624,6 @@ static int mtk_spi_remove(struct platform_device *pdev)
630 pm_runtime_disable(&pdev->dev); 624 pm_runtime_disable(&pdev->dev);
631 625
632 mtk_spi_reset(mdata); 626 mtk_spi_reset(mdata);
633 clk_disable_unprepare(mdata->spi_clk);
634 spi_master_put(master); 627 spi_master_put(master);
635 628
636 return 0; 629 return 0;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index fdd791977041..a8ef38ebb9c9 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -654,6 +654,10 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
654 if (!(sccr1_reg & SSCR1_TIE)) 654 if (!(sccr1_reg & SSCR1_TIE))
655 mask &= ~SSSR_TFS; 655 mask &= ~SSSR_TFS;
656 656
657 /* Ignore RX timeout interrupt if it is disabled */
658 if (!(sccr1_reg & SSCR1_TINTE))
659 mask &= ~SSSR_TINT;
660
657 if (!(status & mask)) 661 if (!(status & mask))
658 return IRQ_NONE; 662 return IRQ_NONE;
659 663
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
index 2e32ea2f194f..be6155cba9de 100644
--- a/drivers/spi/spi-xtensa-xtfpga.c
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -34,13 +34,13 @@ struct xtfpga_spi {
34static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi, 34static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi,
35 unsigned addr, u32 val) 35 unsigned addr, u32 val)
36{ 36{
37 iowrite32(val, spi->regs + addr); 37 __raw_writel(val, spi->regs + addr);
38} 38}
39 39
40static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi, 40static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi,
41 unsigned addr) 41 unsigned addr)
42{ 42{
43 return ioread32(spi->regs + addr); 43 return __raw_readl(spi->regs + addr);
44} 44}
45 45
46static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi) 46static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 3abb3903f2ad..a5f53de813d3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1610,8 +1610,7 @@ static struct class spi_master_class = {
1610 * 1610 *
1611 * The caller is responsible for assigning the bus number and initializing 1611 * The caller is responsible for assigning the bus number and initializing
1612 * the master's methods before calling spi_register_master(); and (after errors 1612 * the master's methods before calling spi_register_master(); and (after errors
1613 * adding the device) calling spi_master_put() and kfree() to prevent a memory 1613 * adding the device) calling spi_master_put() to prevent a memory leak.
1614 * leak.
1615 */ 1614 */
1616struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 1615struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1617{ 1616{
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index fba92a526531..ef008e52f953 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -651,7 +651,8 @@ static int spidev_release(struct inode *inode, struct file *filp)
651 kfree(spidev->rx_buffer); 651 kfree(spidev->rx_buffer);
652 spidev->rx_buffer = NULL; 652 spidev->rx_buffer = NULL;
653 653
654 spidev->speed_hz = spidev->spi->max_speed_hz; 654 if (spidev->spi)
655 spidev->speed_hz = spidev->spi->max_speed_hz;
655 656
656 /* ... after we unbound from the underlying device? */ 657 /* ... after we unbound from the underlying device? */
657 spin_lock_irq(&spidev->spi_lock); 658 spin_lock_irq(&spidev->spi_lock);
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index bdfb3c84c3cb..4a3cf9ba152f 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -451,7 +451,7 @@ static void periph_interrupt(struct spmi_pmic_arb_dev *pa, u8 apid)
451 } 451 }
452} 452}
453 453
454static void pmic_arb_chained_irq(unsigned int irq, struct irq_desc *desc) 454static void pmic_arb_chained_irq(struct irq_desc *desc)
455{ 455{
456 struct spmi_pmic_arb_dev *pa = irq_desc_get_handler_data(desc); 456 struct spmi_pmic_arb_dev *pa = irq_desc_get_handler_data(desc);
457 struct irq_chip *chip = irq_desc_get_chip(desc); 457 struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/staging/board/armadillo800eva.c b/drivers/staging/board/armadillo800eva.c
index 81df77bd55cc..9c41652ee908 100644
--- a/drivers/staging/board/armadillo800eva.c
+++ b/drivers/staging/board/armadillo800eva.c
@@ -91,7 +91,7 @@ static const struct board_staging_dev armadillo800eva_devices[] __initconst = {
91 .pdev = &lcdc0_device, 91 .pdev = &lcdc0_device,
92 .clocks = lcdc0_clocks, 92 .clocks = lcdc0_clocks,
93 .nclocks = ARRAY_SIZE(lcdc0_clocks), 93 .nclocks = ARRAY_SIZE(lcdc0_clocks),
94 .domain = "a4lc", 94 .domain = "/system-controller@e6180000/pm-domains/c5/a4lc@1"
95 }, 95 },
96}; 96};
97 97
diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c
index 29d456e29f38..3eb5eb8f069c 100644
--- a/drivers/staging/board/board.c
+++ b/drivers/staging/board/board.c
@@ -135,6 +135,40 @@ int __init board_staging_register_clock(const struct board_staging_clk *bsc)
135 return error; 135 return error;
136} 136}
137 137
138#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
139static int board_staging_add_dev_domain(struct platform_device *pdev,
140 const char *domain)
141{
142 struct of_phandle_args pd_args;
143 struct generic_pm_domain *pd;
144 struct device_node *np;
145
146 np = of_find_node_by_path(domain);
147 if (!np) {
148 pr_err("Cannot find domain node %s\n", domain);
149 return -ENOENT;
150 }
151
152 pd_args.np = np;
153 pd_args.args_count = 0;
154 pd = of_genpd_get_from_provider(&pd_args);
155 if (IS_ERR(pd)) {
156 pr_err("Cannot find genpd %s (%ld)\n", domain, PTR_ERR(pd));
157 return PTR_ERR(pd);
158
159 }
160 pr_debug("Found genpd %s for device %s\n", pd->name, pdev->name);
161
162 return pm_genpd_add_device(pd, &pdev->dev);
163}
164#else
165static inline int board_staging_add_dev_domain(struct platform_device *pdev,
166 const char *domain)
167{
168 return 0;
169}
170#endif
171
138int __init board_staging_register_device(const struct board_staging_dev *dev) 172int __init board_staging_register_device(const struct board_staging_dev *dev)
139{ 173{
140 struct platform_device *pdev = dev->pdev; 174 struct platform_device *pdev = dev->pdev;
@@ -161,7 +195,7 @@ int __init board_staging_register_device(const struct board_staging_dev *dev)
161 } 195 }
162 196
163 if (dev->domain) 197 if (dev->domain)
164 __pm_genpd_name_add_device(dev->domain, &pdev->dev, NULL); 198 board_staging_add_dev_domain(pdev, dev->domain);
165 199
166 return error; 200 return error;
167} 201}
diff --git a/drivers/staging/rdma/Kconfig b/drivers/staging/rdma/Kconfig
index cf5fe9bb87a1..d7f62359d743 100644
--- a/drivers/staging/rdma/Kconfig
+++ b/drivers/staging/rdma/Kconfig
@@ -24,6 +24,8 @@ if STAGING_RDMA
24 24
25source "drivers/staging/rdma/amso1100/Kconfig" 25source "drivers/staging/rdma/amso1100/Kconfig"
26 26
27source "drivers/staging/rdma/ehca/Kconfig"
28
27source "drivers/staging/rdma/hfi1/Kconfig" 29source "drivers/staging/rdma/hfi1/Kconfig"
28 30
29source "drivers/staging/rdma/ipath/Kconfig" 31source "drivers/staging/rdma/ipath/Kconfig"
diff --git a/drivers/staging/rdma/Makefile b/drivers/staging/rdma/Makefile
index cbd915ac7f20..139d78ef2c24 100644
--- a/drivers/staging/rdma/Makefile
+++ b/drivers/staging/rdma/Makefile
@@ -1,4 +1,5 @@
1# Entries for RDMA_STAGING tree 1# Entries for RDMA_STAGING tree
2obj-$(CONFIG_INFINIBAND_AMSO1100) += amso1100/ 2obj-$(CONFIG_INFINIBAND_AMSO1100) += amso1100/
3obj-$(CONFIG_INFINIBAND_EHCA) += ehca/
3obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/ 4obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
4obj-$(CONFIG_INFINIBAND_IPATH) += ipath/ 5obj-$(CONFIG_INFINIBAND_IPATH) += ipath/
diff --git a/drivers/infiniband/hw/ehca/Kconfig b/drivers/staging/rdma/ehca/Kconfig
index 59f807d8d58e..3fadd2ad6426 100644
--- a/drivers/infiniband/hw/ehca/Kconfig
+++ b/drivers/staging/rdma/ehca/Kconfig
@@ -2,7 +2,8 @@ config INFINIBAND_EHCA
2 tristate "eHCA support" 2 tristate "eHCA support"
3 depends on IBMEBUS 3 depends on IBMEBUS
4 ---help--- 4 ---help---
5 This driver supports the IBM pSeries eHCA InfiniBand adapter. 5 This driver supports the deprecated IBM pSeries eHCA InfiniBand
6 adapter.
6 7
7 To compile the driver as a module, choose M here. The module 8 To compile the driver as a module, choose M here. The module
8 will be called ib_ehca. 9 will be called ib_ehca.
diff --git a/drivers/infiniband/hw/ehca/Makefile b/drivers/staging/rdma/ehca/Makefile
index 74d284e46a40..74d284e46a40 100644
--- a/drivers/infiniband/hw/ehca/Makefile
+++ b/drivers/staging/rdma/ehca/Makefile
diff --git a/drivers/staging/rdma/ehca/TODO b/drivers/staging/rdma/ehca/TODO
new file mode 100644
index 000000000000..199a4a600142
--- /dev/null
+++ b/drivers/staging/rdma/ehca/TODO
@@ -0,0 +1,4 @@
19/2015
2
3The ehca driver has been deprecated and moved to drivers/staging/rdma.
4It will be removed in the 4.6 merge window.
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/staging/rdma/ehca/ehca_av.c
index 465926319f3d..465926319f3d 100644
--- a/drivers/infiniband/hw/ehca/ehca_av.c
+++ b/drivers/staging/rdma/ehca/ehca_av.c
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/staging/rdma/ehca/ehca_classes.h
index bd45e0f3923f..bd45e0f3923f 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/staging/rdma/ehca/ehca_classes.h
diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/staging/rdma/ehca/ehca_classes_pSeries.h
index 689c35786dd2..689c35786dd2 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
+++ b/drivers/staging/rdma/ehca/ehca_classes_pSeries.h
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/staging/rdma/ehca/ehca_cq.c
index 9b68b175069b..9b68b175069b 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/staging/rdma/ehca/ehca_cq.c
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/staging/rdma/ehca/ehca_eq.c
index 90da6747d395..90da6747d395 100644
--- a/drivers/infiniband/hw/ehca/ehca_eq.c
+++ b/drivers/staging/rdma/ehca/ehca_eq.c
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/staging/rdma/ehca/ehca_hca.c
index e8b1bb65797a..e8b1bb65797a 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/staging/rdma/ehca/ehca_hca.c
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/staging/rdma/ehca/ehca_irq.c
index 8615d7cf7e01..8615d7cf7e01 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/staging/rdma/ehca/ehca_irq.c
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.h b/drivers/staging/rdma/ehca/ehca_irq.h
index 5370199f08c7..5370199f08c7 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.h
+++ b/drivers/staging/rdma/ehca/ehca_irq.h
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/staging/rdma/ehca/ehca_iverbs.h
index 80e6a3d5df3e..80e6a3d5df3e 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/staging/rdma/ehca/ehca_iverbs.h
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/staging/rdma/ehca/ehca_main.c
index 8246418cd4e0..8246418cd4e0 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/staging/rdma/ehca/ehca_main.c
diff --git a/drivers/infiniband/hw/ehca/ehca_mcast.c b/drivers/staging/rdma/ehca/ehca_mcast.c
index cec181532924..cec181532924 100644
--- a/drivers/infiniband/hw/ehca/ehca_mcast.c
+++ b/drivers/staging/rdma/ehca/ehca_mcast.c
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/staging/rdma/ehca/ehca_mrmw.c
index f914b30999f8..f914b30999f8 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/staging/rdma/ehca/ehca_mrmw.c
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.h b/drivers/staging/rdma/ehca/ehca_mrmw.h
index 50d8b51306dd..50d8b51306dd 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.h
+++ b/drivers/staging/rdma/ehca/ehca_mrmw.h
diff --git a/drivers/infiniband/hw/ehca/ehca_pd.c b/drivers/staging/rdma/ehca/ehca_pd.c
index 351577a6670a..351577a6670a 100644
--- a/drivers/infiniband/hw/ehca/ehca_pd.c
+++ b/drivers/staging/rdma/ehca/ehca_pd.c
diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/staging/rdma/ehca/ehca_qes.h
index 90c4efa67586..90c4efa67586 100644
--- a/drivers/infiniband/hw/ehca/ehca_qes.h
+++ b/drivers/staging/rdma/ehca/ehca_qes.h
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/staging/rdma/ehca/ehca_qp.c
index 2e89356c46fa..2e89356c46fa 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/staging/rdma/ehca/ehca_qp.c
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/staging/rdma/ehca/ehca_reqs.c
index 47f94984353d..47f94984353d 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/staging/rdma/ehca/ehca_reqs.c
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/staging/rdma/ehca/ehca_sqp.c
index 376b031c2c7f..376b031c2c7f 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/staging/rdma/ehca/ehca_sqp.c
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/staging/rdma/ehca/ehca_tools.h
index d280b12aae64..d280b12aae64 100644
--- a/drivers/infiniband/hw/ehca/ehca_tools.h
+++ b/drivers/staging/rdma/ehca/ehca_tools.h
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/staging/rdma/ehca/ehca_uverbs.c
index 1a1d5d99fcf9..1a1d5d99fcf9 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/staging/rdma/ehca/ehca_uverbs.c
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/staging/rdma/ehca/hcp_if.c
index 89517ffb4389..89517ffb4389 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/staging/rdma/ehca/hcp_if.c
diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/staging/rdma/ehca/hcp_if.h
index a46e514c367b..a46e514c367b 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.h
+++ b/drivers/staging/rdma/ehca/hcp_if.h
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/staging/rdma/ehca/hcp_phyp.c
index 077376ff3d28..077376ff3d28 100644
--- a/drivers/infiniband/hw/ehca/hcp_phyp.c
+++ b/drivers/staging/rdma/ehca/hcp_phyp.c
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/staging/rdma/ehca/hcp_phyp.h
index d1b029910249..d1b029910249 100644
--- a/drivers/infiniband/hw/ehca/hcp_phyp.h
+++ b/drivers/staging/rdma/ehca/hcp_phyp.h
diff --git a/drivers/infiniband/hw/ehca/hipz_fns.h b/drivers/staging/rdma/ehca/hipz_fns.h
index 9dac93d02140..9dac93d02140 100644
--- a/drivers/infiniband/hw/ehca/hipz_fns.h
+++ b/drivers/staging/rdma/ehca/hipz_fns.h
diff --git a/drivers/infiniband/hw/ehca/hipz_fns_core.h b/drivers/staging/rdma/ehca/hipz_fns_core.h
index 868735fd3187..868735fd3187 100644
--- a/drivers/infiniband/hw/ehca/hipz_fns_core.h
+++ b/drivers/staging/rdma/ehca/hipz_fns_core.h
diff --git a/drivers/infiniband/hw/ehca/hipz_hw.h b/drivers/staging/rdma/ehca/hipz_hw.h
index bf996c7acc42..bf996c7acc42 100644
--- a/drivers/infiniband/hw/ehca/hipz_hw.h
+++ b/drivers/staging/rdma/ehca/hipz_hw.h
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/staging/rdma/ehca/ipz_pt_fn.c
index 7ffc748cb973..7ffc748cb973 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c
+++ b/drivers/staging/rdma/ehca/ipz_pt_fn.c
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/staging/rdma/ehca/ipz_pt_fn.h
index a801274ea337..a801274ea337 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.h
+++ b/drivers/staging/rdma/ehca/ipz_pt_fn.h
diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c
index 654eafef1d30..aa58e597df06 100644
--- a/drivers/staging/rdma/hfi1/chip.c
+++ b/drivers/staging/rdma/hfi1/chip.c
@@ -2710,7 +2710,7 @@ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
2710 if (sleep_ok) { 2710 if (sleep_ok) {
2711 mutex_lock(&ppd->hls_lock); 2711 mutex_lock(&ppd->hls_lock);
2712 } else { 2712 } else {
2713 while (mutex_trylock(&ppd->hls_lock) == EBUSY) 2713 while (!mutex_trylock(&ppd->hls_lock))
2714 udelay(1); 2714 udelay(1);
2715 } 2715 }
2716 2716
@@ -2758,7 +2758,7 @@ int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
2758 if (sleep_ok) { 2758 if (sleep_ok) {
2759 mutex_lock(&dd->pport->hls_lock); 2759 mutex_lock(&dd->pport->hls_lock);
2760 } else { 2760 } else {
2761 while (mutex_trylock(&dd->pport->hls_lock) == EBUSY) 2761 while (!mutex_trylock(&dd->pport->hls_lock))
2762 udelay(1); 2762 udelay(1);
2763 } 2763 }
2764 2764
diff --git a/drivers/staging/rdma/hfi1/device.c b/drivers/staging/rdma/hfi1/device.c
index 07c87a87775f..bc26a5392712 100644
--- a/drivers/staging/rdma/hfi1/device.c
+++ b/drivers/staging/rdma/hfi1/device.c
@@ -57,11 +57,13 @@
57#include "device.h" 57#include "device.h"
58 58
59static struct class *class; 59static struct class *class;
60static struct class *user_class;
60static dev_t hfi1_dev; 61static dev_t hfi1_dev;
61 62
62int hfi1_cdev_init(int minor, const char *name, 63int hfi1_cdev_init(int minor, const char *name,
63 const struct file_operations *fops, 64 const struct file_operations *fops,
64 struct cdev *cdev, struct device **devp) 65 struct cdev *cdev, struct device **devp,
66 bool user_accessible)
65{ 67{
66 const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor); 68 const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor);
67 struct device *device = NULL; 69 struct device *device = NULL;
@@ -78,7 +80,11 @@ int hfi1_cdev_init(int minor, const char *name,
78 goto done; 80 goto done;
79 } 81 }
80 82
81 device = device_create(class, NULL, dev, NULL, "%s", name); 83 if (user_accessible)
84 device = device_create(user_class, NULL, dev, NULL, "%s", name);
85 else
86 device = device_create(class, NULL, dev, NULL, "%s", name);
87
82 if (!IS_ERR(device)) 88 if (!IS_ERR(device))
83 goto done; 89 goto done;
84 ret = PTR_ERR(device); 90 ret = PTR_ERR(device);
@@ -110,6 +116,26 @@ const char *class_name(void)
110 return hfi1_class_name; 116 return hfi1_class_name;
111} 117}
112 118
119static char *hfi1_devnode(struct device *dev, umode_t *mode)
120{
121 if (mode)
122 *mode = 0600;
123 return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
124}
125
126static const char *hfi1_class_name_user = "hfi1_user";
127const char *class_name_user(void)
128{
129 return hfi1_class_name_user;
130}
131
132static char *hfi1_user_devnode(struct device *dev, umode_t *mode)
133{
134 if (mode)
135 *mode = 0666;
136 return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
137}
138
113int __init dev_init(void) 139int __init dev_init(void)
114{ 140{
115 int ret; 141 int ret;
@@ -125,7 +151,22 @@ int __init dev_init(void)
125 ret = PTR_ERR(class); 151 ret = PTR_ERR(class);
126 pr_err("Could not create device class (err %d)\n", -ret); 152 pr_err("Could not create device class (err %d)\n", -ret);
127 unregister_chrdev_region(hfi1_dev, HFI1_NMINORS); 153 unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
154 goto done;
128 } 155 }
156 class->devnode = hfi1_devnode;
157
158 user_class = class_create(THIS_MODULE, class_name_user());
159 if (IS_ERR(user_class)) {
160 ret = PTR_ERR(user_class);
161 pr_err("Could not create device class for user accessible files (err %d)\n",
162 -ret);
163 class_destroy(class);
164 class = NULL;
165 user_class = NULL;
166 unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
167 goto done;
168 }
169 user_class->devnode = hfi1_user_devnode;
129 170
130done: 171done:
131 return ret; 172 return ret;
@@ -133,10 +174,11 @@ done:
133 174
134void dev_cleanup(void) 175void dev_cleanup(void)
135{ 176{
136 if (class) { 177 class_destroy(class);
137 class_destroy(class); 178 class = NULL;
138 class = NULL; 179
139 } 180 class_destroy(user_class);
181 user_class = NULL;
140 182
141 unregister_chrdev_region(hfi1_dev, HFI1_NMINORS); 183 unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
142} 184}
diff --git a/drivers/staging/rdma/hfi1/device.h b/drivers/staging/rdma/hfi1/device.h
index 98caecd3d807..2850ff739d81 100644
--- a/drivers/staging/rdma/hfi1/device.h
+++ b/drivers/staging/rdma/hfi1/device.h
@@ -52,7 +52,8 @@
52 52
53int hfi1_cdev_init(int minor, const char *name, 53int hfi1_cdev_init(int minor, const char *name,
54 const struct file_operations *fops, 54 const struct file_operations *fops,
55 struct cdev *cdev, struct device **devp); 55 struct cdev *cdev, struct device **devp,
56 bool user_accessible);
56void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp); 57void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp);
57const char *class_name(void); 58const char *class_name(void);
58int __init dev_init(void); 59int __init dev_init(void);
diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c
index 6777d6b659cf..3e8d5ac4c626 100644
--- a/drivers/staging/rdma/hfi1/diag.c
+++ b/drivers/staging/rdma/hfi1/diag.c
@@ -292,7 +292,7 @@ int hfi1_diag_add(struct hfi1_devdata *dd)
292 if (atomic_inc_return(&diagpkt_count) == 1) { 292 if (atomic_inc_return(&diagpkt_count) == 1) {
293 ret = hfi1_cdev_init(HFI1_DIAGPKT_MINOR, name, 293 ret = hfi1_cdev_init(HFI1_DIAGPKT_MINOR, name,
294 &diagpkt_file_ops, &diagpkt_cdev, 294 &diagpkt_file_ops, &diagpkt_cdev,
295 &diagpkt_device); 295 &diagpkt_device, false);
296 } 296 }
297 297
298 return ret; 298 return ret;
@@ -592,7 +592,8 @@ static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name)
592 592
593 ret = hfi1_cdev_init(HFI1_SNOOP_CAPTURE_BASE + dd->unit, name, 593 ret = hfi1_cdev_init(HFI1_SNOOP_CAPTURE_BASE + dd->unit, name,
594 &snoop_file_ops, 594 &snoop_file_ops,
595 &dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev); 595 &dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev,
596 false);
596 597
597 if (ret) { 598 if (ret) {
598 dd_dev_err(dd, "Couldn't create %s device: %d", name, ret); 599 dd_dev_err(dd, "Couldn't create %s device: %d", name, ret);
@@ -1012,11 +1013,10 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
1012 case HFI1_SNOOP_IOCSETLINKSTATE_EXTRA: 1013 case HFI1_SNOOP_IOCSETLINKSTATE_EXTRA:
1013 memset(&link_info, 0, sizeof(link_info)); 1014 memset(&link_info, 0, sizeof(link_info));
1014 1015
1015 ret = copy_from_user(&link_info, 1016 if (copy_from_user(&link_info,
1016 (struct hfi1_link_info __user *)arg, 1017 (struct hfi1_link_info __user *)arg,
1017 sizeof(link_info)); 1018 sizeof(link_info)))
1018 if (ret) 1019 ret = -EFAULT;
1019 break;
1020 1020
1021 value = link_info.port_state; 1021 value = link_info.port_state;
1022 index = link_info.port_number; 1022 index = link_info.port_number;
@@ -1080,9 +1080,10 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
1080 case HFI1_SNOOP_IOCGETLINKSTATE_EXTRA: 1080 case HFI1_SNOOP_IOCGETLINKSTATE_EXTRA:
1081 if (cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) { 1081 if (cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) {
1082 memset(&link_info, 0, sizeof(link_info)); 1082 memset(&link_info, 0, sizeof(link_info));
1083 ret = copy_from_user(&link_info, 1083 if (copy_from_user(&link_info,
1084 (struct hfi1_link_info __user *)arg, 1084 (struct hfi1_link_info __user *)arg,
1085 sizeof(link_info)); 1085 sizeof(link_info)))
1086 ret = -EFAULT;
1086 index = link_info.port_number; 1087 index = link_info.port_number;
1087 } else { 1088 } else {
1088 ret = __get_user(index, (int __user *) arg); 1089 ret = __get_user(index, (int __user *) arg);
@@ -1114,9 +1115,10 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
1114 ppd->link_speed_active; 1115 ppd->link_speed_active;
1115 link_info.link_width_active = 1116 link_info.link_width_active =
1116 ppd->link_width_active; 1117 ppd->link_width_active;
1117 ret = copy_to_user( 1118 if (copy_to_user(
1118 (struct hfi1_link_info __user *)arg, 1119 (struct hfi1_link_info __user *)arg,
1119 &link_info, sizeof(link_info)); 1120 &link_info, sizeof(link_info)))
1121 ret = -EFAULT;
1120 } else { 1122 } else {
1121 ret = __put_user(value, (int __user *)arg); 1123 ret = __put_user(value, (int __user *)arg);
1122 } 1124 }
@@ -1142,10 +1144,9 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
1142 snoop_dbg("Setting filter"); 1144 snoop_dbg("Setting filter");
1143 /* just copy command structure */ 1145 /* just copy command structure */
1144 argp = (unsigned long *)arg; 1146 argp = (unsigned long *)arg;
1145 ret = copy_from_user(&filter_cmd, (void __user *)argp, 1147 if (copy_from_user(&filter_cmd, (void __user *)argp,
1146 sizeof(filter_cmd)); 1148 sizeof(filter_cmd))) {
1147 if (ret < 0) { 1149 ret = -EFAULT;
1148 pr_alert("Error copying filter command\n");
1149 break; 1150 break;
1150 } 1151 }
1151 if (filter_cmd.opcode >= HFI1_MAX_FILTERS) { 1152 if (filter_cmd.opcode >= HFI1_MAX_FILTERS) {
@@ -1167,12 +1168,11 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
1167 break; 1168 break;
1168 } 1169 }
1169 /* copy remaining data from userspace */ 1170 /* copy remaining data from userspace */
1170 ret = copy_from_user((u8 *)filter_value, 1171 if (copy_from_user((u8 *)filter_value,
1171 (void __user *)filter_cmd.value_ptr, 1172 (void __user *)filter_cmd.value_ptr,
1172 filter_cmd.length); 1173 filter_cmd.length)) {
1173 if (ret < 0) {
1174 kfree(filter_value); 1174 kfree(filter_value);
1175 pr_alert("Error copying filter data\n"); 1175 ret = -EFAULT;
1176 break; 1176 break;
1177 } 1177 }
1178 /* Drain packets first */ 1178 /* Drain packets first */
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
index 469861750b76..72d38500d8ce 100644
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ b/drivers/staging/rdma/hfi1/file_ops.c
@@ -1181,6 +1181,7 @@ static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
1181 struct hfi1_filedata *fd = fp->private_data; 1181 struct hfi1_filedata *fd = fp->private_data;
1182 int ret = 0; 1182 int ret = 0;
1183 1183
1184 memset(&cinfo, 0, sizeof(cinfo));
1184 ret = hfi1_get_base_kinfo(uctxt, &cinfo); 1185 ret = hfi1_get_base_kinfo(uctxt, &cinfo);
1185 if (ret < 0) 1186 if (ret < 0)
1186 goto done; 1187 goto done;
@@ -2089,14 +2090,16 @@ static int user_add(struct hfi1_devdata *dd)
2089 2090
2090 if (atomic_inc_return(&user_count) == 1) { 2091 if (atomic_inc_return(&user_count) == 1) {
2091 ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops, 2092 ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops,
2092 &wildcard_cdev, &wildcard_device); 2093 &wildcard_cdev, &wildcard_device,
2094 true);
2093 if (ret) 2095 if (ret)
2094 goto done; 2096 goto done;
2095 } 2097 }
2096 2098
2097 snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit); 2099 snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
2098 ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops, 2100 ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops,
2099 &dd->user_cdev, &dd->user_device); 2101 &dd->user_cdev, &dd->user_device,
2102 true);
2100 if (ret) 2103 if (ret)
2101 goto done; 2104 goto done;
2102 2105
@@ -2104,7 +2107,8 @@ static int user_add(struct hfi1_devdata *dd)
2104 snprintf(name, sizeof(name), 2107 snprintf(name, sizeof(name),
2105 "%s_ui%d", class_name(), dd->unit); 2108 "%s_ui%d", class_name(), dd->unit);
2106 ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops, 2109 ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops,
2107 &dd->ui_cdev, &dd->ui_device); 2110 &dd->ui_cdev, &dd->ui_device,
2111 false);
2108 if (ret) 2112 if (ret)
2109 goto done; 2113 goto done;
2110 } 2114 }
diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c
index 37269eb90c34..b2c1b72d38ce 100644
--- a/drivers/staging/rdma/hfi1/mad.c
+++ b/drivers/staging/rdma/hfi1/mad.c
@@ -1717,9 +1717,9 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
1717 psi->port_states.portphysstate_portstate = 1717 psi->port_states.portphysstate_portstate =
1718 (hfi1_ibphys_portstate(ppd) << 4) | (lstate & 0xf); 1718 (hfi1_ibphys_portstate(ppd) << 4) | (lstate & 0xf);
1719 psi->link_width_downgrade_tx_active = 1719 psi->link_width_downgrade_tx_active =
1720 ppd->link_width_downgrade_tx_active; 1720 cpu_to_be16(ppd->link_width_downgrade_tx_active);
1721 psi->link_width_downgrade_rx_active = 1721 psi->link_width_downgrade_rx_active =
1722 ppd->link_width_downgrade_rx_active; 1722 cpu_to_be16(ppd->link_width_downgrade_rx_active);
1723 if (resp_len) 1723 if (resp_len)
1724 *resp_len += sizeof(struct opa_port_state_info); 1724 *resp_len += sizeof(struct opa_port_state_info);
1725 1725
diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c
index a8c903caecce..aecd1a74741c 100644
--- a/drivers/staging/rdma/hfi1/sdma.c
+++ b/drivers/staging/rdma/hfi1/sdma.c
@@ -737,7 +737,7 @@ u16 sdma_get_descq_cnt(void)
737 */ 737 */
738 if (!is_power_of_2(count)) 738 if (!is_power_of_2(count))
739 return SDMA_DESCQ_CNT; 739 return SDMA_DESCQ_CNT;
740 if (count < 64 && count > 32768) 740 if (count < 64 || count > 32768)
741 return SDMA_DESCQ_CNT; 741 return SDMA_DESCQ_CNT;
742 return count; 742 return count;
743} 743}
@@ -1848,7 +1848,7 @@ static void dump_sdma_state(struct sdma_engine *sde)
1848 dd_dev_err(sde->dd, 1848 dd_dev_err(sde->dd,
1849 "\taidx: %u amode: %u alen: %u\n", 1849 "\taidx: %u amode: %u alen: %u\n",
1850 (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK) 1850 (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
1851 >> SDMA_DESC1_HEADER_INDEX_MASK), 1851 >> SDMA_DESC1_HEADER_INDEX_SHIFT),
1852 (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK) 1852 (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1853 >> SDMA_DESC1_HEADER_MODE_SHIFT), 1853 >> SDMA_DESC1_HEADER_MODE_SHIFT),
1854 (u8)((desc[1] & SDMA_DESC1_HEADER_DWS_SMASK) 1854 (u8)((desc[1] & SDMA_DESC1_HEADER_DWS_SMASK)
@@ -1926,7 +1926,7 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
1926 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) 1926 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
1927 seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n", 1927 seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
1928 (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK) 1928 (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
1929 >> SDMA_DESC1_HEADER_INDEX_MASK), 1929 >> SDMA_DESC1_HEADER_INDEX_SHIFT),
1930 (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK) 1930 (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1931 >> SDMA_DESC1_HEADER_MODE_SHIFT)); 1931 >> SDMA_DESC1_HEADER_MODE_SHIFT));
1932 head = (head + 1) & sde->sdma_mask; 1932 head = (head + 1) & sde->sdma_mask;
diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h
index 1e613fcd8f4c..496086903891 100644
--- a/drivers/staging/rdma/hfi1/sdma.h
+++ b/drivers/staging/rdma/hfi1/sdma.h
@@ -109,53 +109,53 @@
109/* 109/*
110 * Bits defined in the send DMA descriptor. 110 * Bits defined in the send DMA descriptor.
111 */ 111 */
112#define SDMA_DESC0_FIRST_DESC_FLAG (1ULL<<63) 112#define SDMA_DESC0_FIRST_DESC_FLAG (1ULL << 63)
113#define SDMA_DESC0_LAST_DESC_FLAG (1ULL<<62) 113#define SDMA_DESC0_LAST_DESC_FLAG (1ULL << 62)
114#define SDMA_DESC0_BYTE_COUNT_SHIFT 48 114#define SDMA_DESC0_BYTE_COUNT_SHIFT 48
115#define SDMA_DESC0_BYTE_COUNT_WIDTH 14 115#define SDMA_DESC0_BYTE_COUNT_WIDTH 14
116#define SDMA_DESC0_BYTE_COUNT_MASK \ 116#define SDMA_DESC0_BYTE_COUNT_MASK \
117 ((1ULL<<SDMA_DESC0_BYTE_COUNT_WIDTH)-1ULL) 117 ((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1)
118#define SDMA_DESC0_BYTE_COUNT_SMASK \ 118#define SDMA_DESC0_BYTE_COUNT_SMASK \
119 (SDMA_DESC0_BYTE_COUNT_MASK<<SDMA_DESC0_BYTE_COUNT_SHIFT) 119 (SDMA_DESC0_BYTE_COUNT_MASK << SDMA_DESC0_BYTE_COUNT_SHIFT)
120#define SDMA_DESC0_PHY_ADDR_SHIFT 0 120#define SDMA_DESC0_PHY_ADDR_SHIFT 0
121#define SDMA_DESC0_PHY_ADDR_WIDTH 48 121#define SDMA_DESC0_PHY_ADDR_WIDTH 48
122#define SDMA_DESC0_PHY_ADDR_MASK \ 122#define SDMA_DESC0_PHY_ADDR_MASK \
123 ((1ULL<<SDMA_DESC0_PHY_ADDR_WIDTH)-1ULL) 123 ((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1)
124#define SDMA_DESC0_PHY_ADDR_SMASK \ 124#define SDMA_DESC0_PHY_ADDR_SMASK \
125 (SDMA_DESC0_PHY_ADDR_MASK<<SDMA_DESC0_PHY_ADDR_SHIFT) 125 (SDMA_DESC0_PHY_ADDR_MASK << SDMA_DESC0_PHY_ADDR_SHIFT)
126 126
127#define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32 127#define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32
128#define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32 128#define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32
129#define SDMA_DESC1_HEADER_UPDATE1_MASK \ 129#define SDMA_DESC1_HEADER_UPDATE1_MASK \
130 ((1ULL<<SDMA_DESC1_HEADER_UPDATE1_WIDTH)-1ULL) 130 ((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1)
131#define SDMA_DESC1_HEADER_UPDATE1_SMASK \ 131#define SDMA_DESC1_HEADER_UPDATE1_SMASK \
132 (SDMA_DESC1_HEADER_UPDATE1_MASK<<SDMA_DESC1_HEADER_UPDATE1_SHIFT) 132 (SDMA_DESC1_HEADER_UPDATE1_MASK << SDMA_DESC1_HEADER_UPDATE1_SHIFT)
133#define SDMA_DESC1_HEADER_MODE_SHIFT 13 133#define SDMA_DESC1_HEADER_MODE_SHIFT 13
134#define SDMA_DESC1_HEADER_MODE_WIDTH 3 134#define SDMA_DESC1_HEADER_MODE_WIDTH 3
135#define SDMA_DESC1_HEADER_MODE_MASK \ 135#define SDMA_DESC1_HEADER_MODE_MASK \
136 ((1ULL<<SDMA_DESC1_HEADER_MODE_WIDTH)-1ULL) 136 ((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1)
137#define SDMA_DESC1_HEADER_MODE_SMASK \ 137#define SDMA_DESC1_HEADER_MODE_SMASK \
138 (SDMA_DESC1_HEADER_MODE_MASK<<SDMA_DESC1_HEADER_MODE_SHIFT) 138 (SDMA_DESC1_HEADER_MODE_MASK << SDMA_DESC1_HEADER_MODE_SHIFT)
139#define SDMA_DESC1_HEADER_INDEX_SHIFT 8 139#define SDMA_DESC1_HEADER_INDEX_SHIFT 8
140#define SDMA_DESC1_HEADER_INDEX_WIDTH 5 140#define SDMA_DESC1_HEADER_INDEX_WIDTH 5
141#define SDMA_DESC1_HEADER_INDEX_MASK \ 141#define SDMA_DESC1_HEADER_INDEX_MASK \
142 ((1ULL<<SDMA_DESC1_HEADER_INDEX_WIDTH)-1ULL) 142 ((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1)
143#define SDMA_DESC1_HEADER_INDEX_SMASK \ 143#define SDMA_DESC1_HEADER_INDEX_SMASK \
144 (SDMA_DESC1_HEADER_INDEX_MASK<<SDMA_DESC1_HEADER_INDEX_SHIFT) 144 (SDMA_DESC1_HEADER_INDEX_MASK << SDMA_DESC1_HEADER_INDEX_SHIFT)
145#define SDMA_DESC1_HEADER_DWS_SHIFT 4 145#define SDMA_DESC1_HEADER_DWS_SHIFT 4
146#define SDMA_DESC1_HEADER_DWS_WIDTH 4 146#define SDMA_DESC1_HEADER_DWS_WIDTH 4
147#define SDMA_DESC1_HEADER_DWS_MASK \ 147#define SDMA_DESC1_HEADER_DWS_MASK \
148 ((1ULL<<SDMA_DESC1_HEADER_DWS_WIDTH)-1ULL) 148 ((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1)
149#define SDMA_DESC1_HEADER_DWS_SMASK \ 149#define SDMA_DESC1_HEADER_DWS_SMASK \
150 (SDMA_DESC1_HEADER_DWS_MASK<<SDMA_DESC1_HEADER_DWS_SHIFT) 150 (SDMA_DESC1_HEADER_DWS_MASK << SDMA_DESC1_HEADER_DWS_SHIFT)
151#define SDMA_DESC1_GENERATION_SHIFT 2 151#define SDMA_DESC1_GENERATION_SHIFT 2
152#define SDMA_DESC1_GENERATION_WIDTH 2 152#define SDMA_DESC1_GENERATION_WIDTH 2
153#define SDMA_DESC1_GENERATION_MASK \ 153#define SDMA_DESC1_GENERATION_MASK \
154 ((1ULL<<SDMA_DESC1_GENERATION_WIDTH)-1ULL) 154 ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1)
155#define SDMA_DESC1_GENERATION_SMASK \ 155#define SDMA_DESC1_GENERATION_SMASK \
156 (SDMA_DESC1_GENERATION_MASK<<SDMA_DESC1_GENERATION_SHIFT) 156 (SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT)
157#define SDMA_DESC1_INT_REQ_FLAG (1ULL<<1) 157#define SDMA_DESC1_INT_REQ_FLAG (1ULL << 1)
158#define SDMA_DESC1_HEAD_TO_HOST_FLAG (1ULL<<0) 158#define SDMA_DESC1_HEAD_TO_HOST_FLAG (1ULL << 0)
159 159
160enum sdma_states { 160enum sdma_states {
161 sdma_state_s00_hw_down, 161 sdma_state_s00_hw_down,
diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c
index 53ac21431542..41bb59eb001c 100644
--- a/drivers/staging/rdma/hfi1/verbs.c
+++ b/drivers/staging/rdma/hfi1/verbs.c
@@ -749,11 +749,13 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
749 struct verbs_txreq *tx; 749 struct verbs_txreq *tx;
750 750
751 tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC); 751 tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
752 if (!tx) 752 if (!tx) {
753 /* call slow path to get the lock */ 753 /* call slow path to get the lock */
754 tx = __get_txreq(dev, qp); 754 tx = __get_txreq(dev, qp);
755 if (tx) 755 if (IS_ERR(tx))
756 tx->qp = qp; 756 return tx;
757 }
758 tx->qp = qp;
757 return tx; 759 return tx;
758} 760}
759 761
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index fd092909a457..342a07c58d89 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -269,14 +269,14 @@ int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg,
269} 269}
270 270
271bool iscsit_check_np_match( 271bool iscsit_check_np_match(
272 struct __kernel_sockaddr_storage *sockaddr, 272 struct sockaddr_storage *sockaddr,
273 struct iscsi_np *np, 273 struct iscsi_np *np,
274 int network_transport) 274 int network_transport)
275{ 275{
276 struct sockaddr_in *sock_in, *sock_in_e; 276 struct sockaddr_in *sock_in, *sock_in_e;
277 struct sockaddr_in6 *sock_in6, *sock_in6_e; 277 struct sockaddr_in6 *sock_in6, *sock_in6_e;
278 bool ip_match = false; 278 bool ip_match = false;
279 u16 port; 279 u16 port, port_e;
280 280
281 if (sockaddr->ss_family == AF_INET6) { 281 if (sockaddr->ss_family == AF_INET6) {
282 sock_in6 = (struct sockaddr_in6 *)sockaddr; 282 sock_in6 = (struct sockaddr_in6 *)sockaddr;
@@ -288,6 +288,7 @@ bool iscsit_check_np_match(
288 ip_match = true; 288 ip_match = true;
289 289
290 port = ntohs(sock_in6->sin6_port); 290 port = ntohs(sock_in6->sin6_port);
291 port_e = ntohs(sock_in6_e->sin6_port);
291 } else { 292 } else {
292 sock_in = (struct sockaddr_in *)sockaddr; 293 sock_in = (struct sockaddr_in *)sockaddr;
293 sock_in_e = (struct sockaddr_in *)&np->np_sockaddr; 294 sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
@@ -296,9 +297,10 @@ bool iscsit_check_np_match(
296 ip_match = true; 297 ip_match = true;
297 298
298 port = ntohs(sock_in->sin_port); 299 port = ntohs(sock_in->sin_port);
300 port_e = ntohs(sock_in_e->sin_port);
299 } 301 }
300 302
301 if (ip_match && (np->np_port == port) && 303 if (ip_match && (port_e == port) &&
302 (np->np_network_transport == network_transport)) 304 (np->np_network_transport == network_transport))
303 return true; 305 return true;
304 306
@@ -309,7 +311,7 @@ bool iscsit_check_np_match(
309 * Called with mutex np_lock held 311 * Called with mutex np_lock held
310 */ 312 */
311static struct iscsi_np *iscsit_get_np( 313static struct iscsi_np *iscsit_get_np(
312 struct __kernel_sockaddr_storage *sockaddr, 314 struct sockaddr_storage *sockaddr,
313 int network_transport) 315 int network_transport)
314{ 316{
315 struct iscsi_np *np; 317 struct iscsi_np *np;
@@ -340,12 +342,9 @@ static struct iscsi_np *iscsit_get_np(
340} 342}
341 343
342struct iscsi_np *iscsit_add_np( 344struct iscsi_np *iscsit_add_np(
343 struct __kernel_sockaddr_storage *sockaddr, 345 struct sockaddr_storage *sockaddr,
344 char *ip_str,
345 int network_transport) 346 int network_transport)
346{ 347{
347 struct sockaddr_in *sock_in;
348 struct sockaddr_in6 *sock_in6;
349 struct iscsi_np *np; 348 struct iscsi_np *np;
350 int ret; 349 int ret;
351 350
@@ -368,16 +367,6 @@ struct iscsi_np *iscsit_add_np(
368 } 367 }
369 368
370 np->np_flags |= NPF_IP_NETWORK; 369 np->np_flags |= NPF_IP_NETWORK;
371 if (sockaddr->ss_family == AF_INET6) {
372 sock_in6 = (struct sockaddr_in6 *)sockaddr;
373 snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str);
374 np->np_port = ntohs(sock_in6->sin6_port);
375 } else {
376 sock_in = (struct sockaddr_in *)sockaddr;
377 sprintf(np->np_ip, "%s", ip_str);
378 np->np_port = ntohs(sock_in->sin_port);
379 }
380
381 np->np_network_transport = network_transport; 370 np->np_network_transport = network_transport;
382 spin_lock_init(&np->np_thread_lock); 371 spin_lock_init(&np->np_thread_lock);
383 init_completion(&np->np_restart_comp); 372 init_completion(&np->np_restart_comp);
@@ -411,8 +400,8 @@ struct iscsi_np *iscsit_add_np(
411 list_add_tail(&np->np_list, &g_np_list); 400 list_add_tail(&np->np_list, &g_np_list);
412 mutex_unlock(&np_lock); 401 mutex_unlock(&np_lock);
413 402
414 pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n", 403 pr_debug("CORE[0] - Added Network Portal: %pISpc on %s\n",
415 np->np_ip, np->np_port, np->np_transport->name); 404 &np->np_sockaddr, np->np_transport->name);
416 405
417 return np; 406 return np;
418} 407}
@@ -481,8 +470,8 @@ int iscsit_del_np(struct iscsi_np *np)
481 list_del(&np->np_list); 470 list_del(&np->np_list);
482 mutex_unlock(&np_lock); 471 mutex_unlock(&np_lock);
483 472
484 pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n", 473 pr_debug("CORE[0] - Removed Network Portal: %pISpc on %s\n",
485 np->np_ip, np->np_port, np->np_transport->name); 474 &np->np_sockaddr, np->np_transport->name);
486 475
487 iscsit_put_transport(np->np_transport); 476 iscsit_put_transport(np->np_transport);
488 kfree(np); 477 kfree(np);
@@ -1209,7 +1198,6 @@ static u32 iscsit_do_crypto_hash_sg(
1209 u8 *pad_bytes) 1198 u8 *pad_bytes)
1210{ 1199{
1211 u32 data_crc; 1200 u32 data_crc;
1212 u32 i;
1213 struct scatterlist *sg; 1201 struct scatterlist *sg;
1214 unsigned int page_off; 1202 unsigned int page_off;
1215 1203
@@ -1218,15 +1206,15 @@ static u32 iscsit_do_crypto_hash_sg(
1218 sg = cmd->first_data_sg; 1206 sg = cmd->first_data_sg;
1219 page_off = cmd->first_data_sg_off; 1207 page_off = cmd->first_data_sg_off;
1220 1208
1221 i = 0;
1222 while (data_length) { 1209 while (data_length) {
1223 u32 cur_len = min_t(u32, data_length, (sg[i].length - page_off)); 1210 u32 cur_len = min_t(u32, data_length, (sg->length - page_off));
1224 1211
1225 crypto_hash_update(hash, &sg[i], cur_len); 1212 crypto_hash_update(hash, sg, cur_len);
1226 1213
1227 data_length -= cur_len; 1214 data_length -= cur_len;
1228 page_off = 0; 1215 page_off = 0;
1229 i++; 1216 /* iscsit_map_iovec has already checked for invalid sg pointers */
1217 sg = sg_next(sg);
1230 } 1218 }
1231 1219
1232 if (padding) { 1220 if (padding) {
@@ -2556,7 +2544,7 @@ static int iscsit_send_conn_drop_async_message(
2556 cmd->stat_sn = conn->stat_sn++; 2544 cmd->stat_sn = conn->stat_sn++;
2557 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2545 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2558 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2546 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2559 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2547 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
2560 hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION; 2548 hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
2561 hdr->param1 = cpu_to_be16(cmd->logout_cid); 2549 hdr->param1 = cpu_to_be16(cmd->logout_cid);
2562 hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait); 2550 hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
@@ -2628,7 +2616,7 @@ iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2628 hdr->statsn = cpu_to_be32(0xFFFFFFFF); 2616 hdr->statsn = cpu_to_be32(0xFFFFFFFF);
2629 2617
2630 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2618 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2631 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2619 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
2632 hdr->datasn = cpu_to_be32(datain->data_sn); 2620 hdr->datasn = cpu_to_be32(datain->data_sn);
2633 hdr->offset = cpu_to_be32(datain->offset); 2621 hdr->offset = cpu_to_be32(datain->offset);
2634 2622
@@ -2839,7 +2827,7 @@ iscsit_build_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2839 2827
2840 iscsit_increment_maxcmdsn(cmd, conn->sess); 2828 iscsit_increment_maxcmdsn(cmd, conn->sess);
2841 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2829 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2842 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2830 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
2843 2831
2844 pr_debug("Built Logout Response ITT: 0x%08x StatSN:" 2832 pr_debug("Built Logout Response ITT: 0x%08x StatSN:"
2845 " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n", 2833 " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
@@ -2902,7 +2890,7 @@ iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2902 iscsit_increment_maxcmdsn(cmd, conn->sess); 2890 iscsit_increment_maxcmdsn(cmd, conn->sess);
2903 2891
2904 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2892 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2905 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2893 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
2906 2894
2907 pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x," 2895 pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
2908 " StatSN: 0x%08x, Length %u\n", (nopout_response) ? 2896 " StatSN: 0x%08x, Length %u\n", (nopout_response) ?
@@ -3049,7 +3037,7 @@ static int iscsit_send_r2t(
3049 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag); 3037 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
3050 hdr->statsn = cpu_to_be32(conn->stat_sn); 3038 hdr->statsn = cpu_to_be32(conn->stat_sn);
3051 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3039 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3052 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3040 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3053 hdr->r2tsn = cpu_to_be32(r2t->r2t_sn); 3041 hdr->r2tsn = cpu_to_be32(r2t->r2t_sn);
3054 hdr->data_offset = cpu_to_be32(r2t->offset); 3042 hdr->data_offset = cpu_to_be32(r2t->offset);
3055 hdr->data_length = cpu_to_be32(r2t->xfer_len); 3043 hdr->data_length = cpu_to_be32(r2t->xfer_len);
@@ -3202,7 +3190,7 @@ void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3202 3190
3203 iscsit_increment_maxcmdsn(cmd, conn->sess); 3191 iscsit_increment_maxcmdsn(cmd, conn->sess);
3204 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3192 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3205 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3193 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3206 3194
3207 pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x," 3195 pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
3208 " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n", 3196 " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
@@ -3321,7 +3309,7 @@ iscsit_build_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3321 3309
3322 iscsit_increment_maxcmdsn(cmd, conn->sess); 3310 iscsit_increment_maxcmdsn(cmd, conn->sess);
3323 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3311 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3324 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3312 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3325 3313
3326 pr_debug("Built Task Management Response ITT: 0x%08x," 3314 pr_debug("Built Task Management Response ITT: 0x%08x,"
3327 " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n", 3315 " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
@@ -3399,6 +3387,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3399 int target_name_printed; 3387 int target_name_printed;
3400 unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */ 3388 unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
3401 unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL; 3389 unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
3390 bool active;
3402 3391
3403 buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength, 3392 buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength,
3404 SENDTARGETS_BUF_LIMIT); 3393 SENDTARGETS_BUF_LIMIT);
@@ -3452,19 +3441,18 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3452 } 3441 }
3453 3442
3454 spin_lock(&tpg->tpg_state_lock); 3443 spin_lock(&tpg->tpg_state_lock);
3455 if ((tpg->tpg_state == TPG_STATE_FREE) || 3444 active = (tpg->tpg_state == TPG_STATE_ACTIVE);
3456 (tpg->tpg_state == TPG_STATE_INACTIVE)) {
3457 spin_unlock(&tpg->tpg_state_lock);
3458 continue;
3459 }
3460 spin_unlock(&tpg->tpg_state_lock); 3445 spin_unlock(&tpg->tpg_state_lock);
3461 3446
3447 if (!active && tpg->tpg_attrib.tpg_enabled_sendtargets)
3448 continue;
3449
3462 spin_lock(&tpg->tpg_np_lock); 3450 spin_lock(&tpg->tpg_np_lock);
3463 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, 3451 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
3464 tpg_np_list) { 3452 tpg_np_list) {
3465 struct iscsi_np *np = tpg_np->tpg_np; 3453 struct iscsi_np *np = tpg_np->tpg_np;
3466 bool inaddr_any = iscsit_check_inaddr_any(np); 3454 bool inaddr_any = iscsit_check_inaddr_any(np);
3467 char *fmt_str; 3455 struct sockaddr_storage *sockaddr;
3468 3456
3469 if (np->np_network_transport != network_transport) 3457 if (np->np_network_transport != network_transport)
3470 continue; 3458 continue;
@@ -3492,15 +3480,15 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3492 } 3480 }
3493 } 3481 }
3494 3482
3495 if (np->np_sockaddr.ss_family == AF_INET6) 3483 if (inaddr_any)
3496 fmt_str = "TargetAddress=[%s]:%hu,%hu"; 3484 sockaddr = &conn->local_sockaddr;
3497 else 3485 else
3498 fmt_str = "TargetAddress=%s:%hu,%hu"; 3486 sockaddr = &np->np_sockaddr;
3499 3487
3500 len = sprintf(buf, fmt_str, 3488 len = sprintf(buf, "TargetAddress="
3501 inaddr_any ? conn->local_ip : np->np_ip, 3489 "%pISpc,%hu",
3502 np->np_port, 3490 sockaddr,
3503 tpg->tpgt); 3491 tpg->tpgt);
3504 len += 1; 3492 len += 1;
3505 3493
3506 if ((len + payload_len) > buffer_len) { 3494 if ((len + payload_len) > buffer_len) {
@@ -3576,7 +3564,7 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3576 */ 3564 */
3577 cmd->maxcmdsn_inc = 0; 3565 cmd->maxcmdsn_inc = 0;
3578 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3566 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3579 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3567 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3580 3568
3581 pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x," 3569 pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x,"
3582 " Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag, 3570 " Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag,
@@ -3654,7 +3642,7 @@ iscsit_build_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3654 cmd->stat_sn = conn->stat_sn++; 3642 cmd->stat_sn = conn->stat_sn++;
3655 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3643 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3656 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3644 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3657 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3645 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3658 3646
3659} 3647}
3660EXPORT_SYMBOL(iscsit_build_reject); 3648EXPORT_SYMBOL(iscsit_build_reject);
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 7d0f9c00d9c2..4cf2c0f2ba2f 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -10,10 +10,10 @@ extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *);
10extern void iscsit_login_kref_put(struct kref *); 10extern void iscsit_login_kref_put(struct kref *);
11extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *, 11extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *,
12 struct iscsi_tpg_np *); 12 struct iscsi_tpg_np *);
13extern bool iscsit_check_np_match(struct __kernel_sockaddr_storage *, 13extern bool iscsit_check_np_match(struct sockaddr_storage *,
14 struct iscsi_np *, int); 14 struct iscsi_np *, int);
15extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *, 15extern struct iscsi_np *iscsit_add_np(struct sockaddr_storage *,
16 char *, int); 16 int);
17extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *, 17extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
18 struct iscsi_portal_group *, bool); 18 struct iscsi_portal_group *, bool);
19extern int iscsit_del_np(struct iscsi_np *); 19extern int iscsit_del_np(struct iscsi_np *);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index c1898c84b3d2..c7461d770d3a 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -99,7 +99,7 @@ static ssize_t lio_target_np_store_sctp(
99 * Use existing np->np_sockaddr for SCTP network portal reference 99 * Use existing np->np_sockaddr for SCTP network portal reference
100 */ 100 */
101 tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr, 101 tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
102 np->np_ip, tpg_np, ISCSI_SCTP_TCP); 102 tpg_np, ISCSI_SCTP_TCP);
103 if (!tpg_np_sctp || IS_ERR(tpg_np_sctp)) 103 if (!tpg_np_sctp || IS_ERR(tpg_np_sctp))
104 goto out; 104 goto out;
105 } else { 105 } else {
@@ -177,7 +177,7 @@ static ssize_t lio_target_np_store_iser(
177 } 177 }
178 178
179 tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr, 179 tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
180 np->np_ip, tpg_np, ISCSI_INFINIBAND); 180 tpg_np, ISCSI_INFINIBAND);
181 if (IS_ERR(tpg_np_iser)) { 181 if (IS_ERR(tpg_np_iser)) {
182 rc = PTR_ERR(tpg_np_iser); 182 rc = PTR_ERR(tpg_np_iser);
183 goto out; 183 goto out;
@@ -220,7 +220,7 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
220 struct iscsi_portal_group *tpg; 220 struct iscsi_portal_group *tpg;
221 struct iscsi_tpg_np *tpg_np; 221 struct iscsi_tpg_np *tpg_np;
222 char *str, *str2, *ip_str, *port_str; 222 char *str, *str2, *ip_str, *port_str;
223 struct __kernel_sockaddr_storage sockaddr; 223 struct sockaddr_storage sockaddr;
224 struct sockaddr_in *sock_in; 224 struct sockaddr_in *sock_in;
225 struct sockaddr_in6 *sock_in6; 225 struct sockaddr_in6 *sock_in6;
226 unsigned long port; 226 unsigned long port;
@@ -235,7 +235,7 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
235 memset(buf, 0, MAX_PORTAL_LEN + 1); 235 memset(buf, 0, MAX_PORTAL_LEN + 1);
236 snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name); 236 snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name);
237 237
238 memset(&sockaddr, 0, sizeof(struct __kernel_sockaddr_storage)); 238 memset(&sockaddr, 0, sizeof(struct sockaddr_storage));
239 239
240 str = strstr(buf, "["); 240 str = strstr(buf, "[");
241 if (str) { 241 if (str) {
@@ -248,8 +248,8 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
248 return ERR_PTR(-EINVAL); 248 return ERR_PTR(-EINVAL);
249 } 249 }
250 str++; /* Skip over leading "[" */ 250 str++; /* Skip over leading "[" */
251 *str2 = '\0'; /* Terminate the IPv6 address */ 251 *str2 = '\0'; /* Terminate the unbracketed IPv6 address */
252 str2++; /* Skip over the "]" */ 252 str2++; /* Skip over the \0 */
253 port_str = strstr(str2, ":"); 253 port_str = strstr(str2, ":");
254 if (!port_str) { 254 if (!port_str) {
255 pr_err("Unable to locate \":port\"" 255 pr_err("Unable to locate \":port\""
@@ -267,7 +267,7 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
267 sock_in6 = (struct sockaddr_in6 *)&sockaddr; 267 sock_in6 = (struct sockaddr_in6 *)&sockaddr;
268 sock_in6->sin6_family = AF_INET6; 268 sock_in6->sin6_family = AF_INET6;
269 sock_in6->sin6_port = htons((unsigned short)port); 269 sock_in6->sin6_port = htons((unsigned short)port);
270 ret = in6_pton(str, IPV6_ADDRESS_SPACE, 270 ret = in6_pton(str, -1,
271 (void *)&sock_in6->sin6_addr.in6_u, -1, &end); 271 (void *)&sock_in6->sin6_addr.in6_u, -1, &end);
272 if (ret <= 0) { 272 if (ret <= 0) {
273 pr_err("in6_pton returned: %d\n", ret); 273 pr_err("in6_pton returned: %d\n", ret);
@@ -316,7 +316,7 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
316 * sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/ 316 * sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/
317 * 317 *
318 */ 318 */
319 tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, str, NULL, 319 tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, NULL,
320 ISCSI_TCP); 320 ISCSI_TCP);
321 if (IS_ERR(tpg_np)) { 321 if (IS_ERR(tpg_np)) {
322 iscsit_put_tpg(tpg); 322 iscsit_put_tpg(tpg);
@@ -344,8 +344,8 @@ static void lio_target_call_delnpfromtpg(
344 344
345 se_tpg = &tpg->tpg_se_tpg; 345 se_tpg = &tpg->tpg_se_tpg;
346 pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu" 346 pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu"
347 " PORTAL: %s:%hu\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item), 347 " PORTAL: %pISpc\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
348 tpg->tpgt, tpg_np->tpg_np->np_ip, tpg_np->tpg_np->np_port); 348 tpg->tpgt, &tpg_np->tpg_np->np_sockaddr);
349 349
350 ret = iscsit_tpg_del_network_portal(tpg, tpg_np); 350 ret = iscsit_tpg_del_network_portal(tpg, tpg_np);
351 if (ret < 0) 351 if (ret < 0)
@@ -656,6 +656,7 @@ static ssize_t lio_target_nacl_show_info(
656 struct iscsi_conn *conn; 656 struct iscsi_conn *conn;
657 struct se_session *se_sess; 657 struct se_session *se_sess;
658 ssize_t rb = 0; 658 ssize_t rb = 0;
659 u32 max_cmd_sn;
659 660
660 spin_lock_bh(&se_nacl->nacl_sess_lock); 661 spin_lock_bh(&se_nacl->nacl_sess_lock);
661 se_sess = se_nacl->nacl_sess; 662 se_sess = se_nacl->nacl_sess;
@@ -703,11 +704,12 @@ static ssize_t lio_target_nacl_show_info(
703 " Values]-----------------------\n"); 704 " Values]-----------------------\n");
704 rb += sprintf(page+rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN" 705 rb += sprintf(page+rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN"
705 " : MaxCmdSN : ITT : TTT\n"); 706 " : MaxCmdSN : ITT : TTT\n");
707 max_cmd_sn = (u32) atomic_read(&sess->max_cmd_sn);
706 rb += sprintf(page+rb, " 0x%08x 0x%08x 0x%08x 0x%08x" 708 rb += sprintf(page+rb, " 0x%08x 0x%08x 0x%08x 0x%08x"
707 " 0x%08x 0x%08x\n", 709 " 0x%08x 0x%08x\n",
708 sess->cmdsn_window, 710 sess->cmdsn_window,
709 (sess->max_cmd_sn - sess->exp_cmd_sn) + 1, 711 (max_cmd_sn - sess->exp_cmd_sn) + 1,
710 sess->exp_cmd_sn, sess->max_cmd_sn, 712 sess->exp_cmd_sn, max_cmd_sn,
711 sess->init_task_tag, sess->targ_xfer_tag); 713 sess->init_task_tag, sess->targ_xfer_tag);
712 rb += sprintf(page+rb, "----------------------[iSCSI" 714 rb += sprintf(page+rb, "----------------------[iSCSI"
713 " Connections]-------------------------\n"); 715 " Connections]-------------------------\n");
@@ -751,7 +753,7 @@ static ssize_t lio_target_nacl_show_info(
751 break; 753 break;
752 } 754 }
753 755
754 rb += sprintf(page+rb, " Address %s %s", conn->login_ip, 756 rb += sprintf(page+rb, " Address %pISc %s", &conn->login_sockaddr,
755 (conn->network_transport == ISCSI_TCP) ? 757 (conn->network_transport == ISCSI_TCP) ?
756 "TCP" : "SCTP"); 758 "TCP" : "SCTP");
757 rb += sprintf(page+rb, " StatSN: 0x%08x\n", 759 rb += sprintf(page+rb, " StatSN: 0x%08x\n",
@@ -1010,6 +1012,11 @@ TPG_ATTR(t10_pi, S_IRUGO | S_IWUSR);
1010 */ 1012 */
1011DEF_TPG_ATTRIB(fabric_prot_type); 1013DEF_TPG_ATTRIB(fabric_prot_type);
1012TPG_ATTR(fabric_prot_type, S_IRUGO | S_IWUSR); 1014TPG_ATTR(fabric_prot_type, S_IRUGO | S_IWUSR);
1015/*
1016 * Define iscsi_tpg_attrib_s_tpg_enabled_sendtargets
1017 */
1018DEF_TPG_ATTRIB(tpg_enabled_sendtargets);
1019TPG_ATTR(tpg_enabled_sendtargets, S_IRUGO | S_IWUSR);
1013 1020
1014static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { 1021static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
1015 &iscsi_tpg_attrib_authentication.attr, 1022 &iscsi_tpg_attrib_authentication.attr,
@@ -1024,6 +1031,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
1024 &iscsi_tpg_attrib_default_erl.attr, 1031 &iscsi_tpg_attrib_default_erl.attr,
1025 &iscsi_tpg_attrib_t10_pi.attr, 1032 &iscsi_tpg_attrib_t10_pi.attr,
1026 &iscsi_tpg_attrib_fabric_prot_type.attr, 1033 &iscsi_tpg_attrib_fabric_prot_type.attr,
1034 &iscsi_tpg_attrib_tpg_enabled_sendtargets.attr,
1027 NULL, 1035 NULL,
1028}; 1036};
1029 1037
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index 5fabcd3d623f..0382fa24b53b 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -47,19 +47,19 @@ void iscsit_determine_maxcmdsn(struct iscsi_session *sess)
47 * core_set_queue_depth_for_node(). 47 * core_set_queue_depth_for_node().
48 */ 48 */
49 sess->cmdsn_window = se_nacl->queue_depth; 49 sess->cmdsn_window = se_nacl->queue_depth;
50 sess->max_cmd_sn = (sess->max_cmd_sn + se_nacl->queue_depth) - 1; 50 atomic_add(se_nacl->queue_depth - 1, &sess->max_cmd_sn);
51} 51}
52 52
53void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess) 53void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess)
54{ 54{
55 u32 max_cmd_sn;
56
55 if (cmd->immediate_cmd || cmd->maxcmdsn_inc) 57 if (cmd->immediate_cmd || cmd->maxcmdsn_inc)
56 return; 58 return;
57 59
58 cmd->maxcmdsn_inc = 1; 60 cmd->maxcmdsn_inc = 1;
59 61
60 mutex_lock(&sess->cmdsn_mutex); 62 max_cmd_sn = atomic_inc_return(&sess->max_cmd_sn);
61 sess->max_cmd_sn += 1; 63 pr_debug("Updated MaxCmdSN to 0x%08x\n", max_cmd_sn);
62 pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
63 mutex_unlock(&sess->cmdsn_mutex);
64} 64}
65EXPORT_SYMBOL(iscsit_increment_maxcmdsn); 65EXPORT_SYMBOL(iscsit_increment_maxcmdsn);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 7e8f65e5448f..96e78c823d13 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -331,7 +331,7 @@ static int iscsi_login_zero_tsih_s1(
331 * The FFP CmdSN window values will be allocated from the TPG's 331 * The FFP CmdSN window values will be allocated from the TPG's
332 * Initiator Node's ACL once the login has been successfully completed. 332 * Initiator Node's ACL once the login has been successfully completed.
333 */ 333 */
334 sess->max_cmd_sn = be32_to_cpu(pdu->cmdsn); 334 atomic_set(&sess->max_cmd_sn, be32_to_cpu(pdu->cmdsn));
335 335
336 sess->sess_ops = kzalloc(sizeof(struct iscsi_sess_ops), GFP_KERNEL); 336 sess->sess_ops = kzalloc(sizeof(struct iscsi_sess_ops), GFP_KERNEL);
337 if (!sess->sess_ops) { 337 if (!sess->sess_ops) {
@@ -729,9 +729,9 @@ void iscsi_post_login_handler(
729 stop_timer = 1; 729 stop_timer = 1;
730 } 730 }
731 731
732 pr_debug("iSCSI Login successful on CID: %hu from %s to" 732 pr_debug("iSCSI Login successful on CID: %hu from %pISpc to"
733 " %s:%hu,%hu\n", conn->cid, conn->login_ip, 733 " %pISpc,%hu\n", conn->cid, &conn->login_sockaddr,
734 conn->local_ip, conn->local_port, tpg->tpgt); 734 &conn->local_sockaddr, tpg->tpgt);
735 735
736 list_add_tail(&conn->conn_list, &sess->sess_conn_list); 736 list_add_tail(&conn->conn_list, &sess->sess_conn_list);
737 atomic_inc(&sess->nconn); 737 atomic_inc(&sess->nconn);
@@ -776,8 +776,8 @@ void iscsi_post_login_handler(
776 pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n"); 776 pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
777 sess->session_state = TARG_SESS_STATE_LOGGED_IN; 777 sess->session_state = TARG_SESS_STATE_LOGGED_IN;
778 778
779 pr_debug("iSCSI Login successful on CID: %hu from %s to %s:%hu,%hu\n", 779 pr_debug("iSCSI Login successful on CID: %hu from %pISpc to %pISpc,%hu\n",
780 conn->cid, conn->login_ip, conn->local_ip, conn->local_port, 780 conn->cid, &conn->login_sockaddr, &conn->local_sockaddr,
781 tpg->tpgt); 781 tpg->tpgt);
782 782
783 spin_lock_bh(&sess->conn_lock); 783 spin_lock_bh(&sess->conn_lock);
@@ -823,8 +823,8 @@ static void iscsi_handle_login_thread_timeout(unsigned long data)
823 struct iscsi_np *np = (struct iscsi_np *) data; 823 struct iscsi_np *np = (struct iscsi_np *) data;
824 824
825 spin_lock_bh(&np->np_thread_lock); 825 spin_lock_bh(&np->np_thread_lock);
826 pr_err("iSCSI Login timeout on Network Portal %s:%hu\n", 826 pr_err("iSCSI Login timeout on Network Portal %pISpc\n",
827 np->np_ip, np->np_port); 827 &np->np_sockaddr);
828 828
829 if (np->np_login_timer_flags & ISCSI_TF_STOP) { 829 if (np->np_login_timer_flags & ISCSI_TF_STOP) {
830 spin_unlock_bh(&np->np_thread_lock); 830 spin_unlock_bh(&np->np_thread_lock);
@@ -877,7 +877,7 @@ static void iscsi_stop_login_thread_timer(struct iscsi_np *np)
877 877
878int iscsit_setup_np( 878int iscsit_setup_np(
879 struct iscsi_np *np, 879 struct iscsi_np *np,
880 struct __kernel_sockaddr_storage *sockaddr) 880 struct sockaddr_storage *sockaddr)
881{ 881{
882 struct socket *sock = NULL; 882 struct socket *sock = NULL;
883 int backlog = ISCSIT_TCP_BACKLOG, ret, opt = 0, len; 883 int backlog = ISCSIT_TCP_BACKLOG, ret, opt = 0, len;
@@ -916,7 +916,7 @@ int iscsit_setup_np(
916 * in iscsi_target_configfs.c code.. 916 * in iscsi_target_configfs.c code..
917 */ 917 */
918 memcpy(&np->np_sockaddr, sockaddr, 918 memcpy(&np->np_sockaddr, sockaddr,
919 sizeof(struct __kernel_sockaddr_storage)); 919 sizeof(struct sockaddr_storage));
920 920
921 if (sockaddr->ss_family == AF_INET6) 921 if (sockaddr->ss_family == AF_INET6)
922 len = sizeof(struct sockaddr_in6); 922 len = sizeof(struct sockaddr_in6);
@@ -975,7 +975,7 @@ fail:
975 975
976int iscsi_target_setup_login_socket( 976int iscsi_target_setup_login_socket(
977 struct iscsi_np *np, 977 struct iscsi_np *np,
978 struct __kernel_sockaddr_storage *sockaddr) 978 struct sockaddr_storage *sockaddr)
979{ 979{
980 struct iscsit_transport *t; 980 struct iscsit_transport *t;
981 int rc; 981 int rc;
@@ -1015,44 +1015,42 @@ int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
1015 rc = conn->sock->ops->getname(conn->sock, 1015 rc = conn->sock->ops->getname(conn->sock,
1016 (struct sockaddr *)&sock_in6, &err, 1); 1016 (struct sockaddr *)&sock_in6, &err, 1);
1017 if (!rc) { 1017 if (!rc) {
1018 if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) 1018 if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) {
1019 snprintf(conn->login_ip, sizeof(conn->login_ip), "[%pI6c]", 1019 memcpy(&conn->login_sockaddr, &sock_in6, sizeof(sock_in6));
1020 &sock_in6.sin6_addr.in6_u); 1020 } else {
1021 else 1021 /* Pretend to be an ipv4 socket */
1022 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI4", 1022 sock_in.sin_family = AF_INET;
1023 &sock_in6.sin6_addr.s6_addr32[3]); 1023 sock_in.sin_port = sock_in6.sin6_port;
1024 conn->login_port = ntohs(sock_in6.sin6_port); 1024 memcpy(&sock_in.sin_addr, &sock_in6.sin6_addr.s6_addr32[3], 4);
1025 memcpy(&conn->login_sockaddr, &sock_in, sizeof(sock_in));
1026 }
1025 } 1027 }
1026 1028
1027 rc = conn->sock->ops->getname(conn->sock, 1029 rc = conn->sock->ops->getname(conn->sock,
1028 (struct sockaddr *)&sock_in6, &err, 0); 1030 (struct sockaddr *)&sock_in6, &err, 0);
1029 if (!rc) { 1031 if (!rc) {
1030 if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) 1032 if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) {
1031 snprintf(conn->local_ip, sizeof(conn->local_ip), "[%pI6c]", 1033 memcpy(&conn->local_sockaddr, &sock_in6, sizeof(sock_in6));
1032 &sock_in6.sin6_addr.in6_u); 1034 } else {
1033 else 1035 /* Pretend to be an ipv4 socket */
1034 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI4", 1036 sock_in.sin_family = AF_INET;
1035 &sock_in6.sin6_addr.s6_addr32[3]); 1037 sock_in.sin_port = sock_in6.sin6_port;
1036 conn->local_port = ntohs(sock_in6.sin6_port); 1038 memcpy(&sock_in.sin_addr, &sock_in6.sin6_addr.s6_addr32[3], 4);
1039 memcpy(&conn->local_sockaddr, &sock_in, sizeof(sock_in));
1040 }
1037 } 1041 }
1038 } else { 1042 } else {
1039 memset(&sock_in, 0, sizeof(struct sockaddr_in)); 1043 memset(&sock_in, 0, sizeof(struct sockaddr_in));
1040 1044
1041 rc = conn->sock->ops->getname(conn->sock, 1045 rc = conn->sock->ops->getname(conn->sock,
1042 (struct sockaddr *)&sock_in, &err, 1); 1046 (struct sockaddr *)&sock_in, &err, 1);
1043 if (!rc) { 1047 if (!rc)
1044 sprintf(conn->login_ip, "%pI4", 1048 memcpy(&conn->login_sockaddr, &sock_in, sizeof(sock_in));
1045 &sock_in.sin_addr.s_addr);
1046 conn->login_port = ntohs(sock_in.sin_port);
1047 }
1048 1049
1049 rc = conn->sock->ops->getname(conn->sock, 1050 rc = conn->sock->ops->getname(conn->sock,
1050 (struct sockaddr *)&sock_in, &err, 0); 1051 (struct sockaddr *)&sock_in, &err, 0);
1051 if (!rc) { 1052 if (!rc)
1052 sprintf(conn->local_ip, "%pI4", 1053 memcpy(&conn->local_sockaddr, &sock_in, sizeof(sock_in));
1053 &sock_in.sin_addr.s_addr);
1054 conn->local_port = ntohs(sock_in.sin_port);
1055 }
1056 } 1054 }
1057 1055
1058 return 0; 1056 return 0;
@@ -1302,8 +1300,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1302 spin_lock_bh(&np->np_thread_lock); 1300 spin_lock_bh(&np->np_thread_lock);
1303 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { 1301 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
1304 spin_unlock_bh(&np->np_thread_lock); 1302 spin_unlock_bh(&np->np_thread_lock);
1305 pr_err("iSCSI Network Portal on %s:%hu currently not" 1303 pr_err("iSCSI Network Portal on %pISpc currently not"
1306 " active.\n", np->np_ip, np->np_port); 1304 " active.\n", &np->np_sockaddr);
1307 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 1305 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1308 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE); 1306 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
1309 goto new_sess_out; 1307 goto new_sess_out;
@@ -1312,9 +1310,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1312 1310
1313 conn->network_transport = np->np_network_transport; 1311 conn->network_transport = np->np_network_transport;
1314 1312
1315 pr_debug("Received iSCSI login request from %s on %s Network" 1313 pr_debug("Received iSCSI login request from %pISpc on %s Network"
1316 " Portal %s:%hu\n", conn->login_ip, np->np_transport->name, 1314 " Portal %pISpc\n", &conn->login_sockaddr, np->np_transport->name,
1317 conn->local_ip, conn->local_port); 1315 &conn->local_sockaddr);
1318 1316
1319 pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n"); 1317 pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
1320 conn->conn_state = TARG_CONN_STATE_IN_LOGIN; 1318 conn->conn_state = TARG_CONN_STATE_IN_LOGIN;
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 57aa0d0fd820..b597aa2c61a1 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -5,9 +5,9 @@ extern int iscsi_login_setup_crypto(struct iscsi_conn *);
5extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *); 5extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
6extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32); 6extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
7extern int iscsit_setup_np(struct iscsi_np *, 7extern int iscsit_setup_np(struct iscsi_np *,
8 struct __kernel_sockaddr_storage *); 8 struct sockaddr_storage *);
9extern int iscsi_target_setup_login_socket(struct iscsi_np *, 9extern int iscsi_target_setup_login_socket(struct iscsi_np *,
10 struct __kernel_sockaddr_storage *); 10 struct sockaddr_storage *);
11extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *); 11extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
12extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); 12extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
13extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); 13extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index f9cde9141836..5c964c09c89f 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -341,7 +341,6 @@ static int iscsi_target_check_first_request(
341static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_login *login) 341static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
342{ 342{
343 u32 padding = 0; 343 u32 padding = 0;
344 struct iscsi_session *sess = conn->sess;
345 struct iscsi_login_rsp *login_rsp; 344 struct iscsi_login_rsp *login_rsp;
346 345
347 login_rsp = (struct iscsi_login_rsp *) login->rsp; 346 login_rsp = (struct iscsi_login_rsp *) login->rsp;
@@ -353,7 +352,7 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
353 login_rsp->itt = login->init_task_tag; 352 login_rsp->itt = login->init_task_tag;
354 login_rsp->statsn = cpu_to_be32(conn->stat_sn++); 353 login_rsp->statsn = cpu_to_be32(conn->stat_sn++);
355 login_rsp->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 354 login_rsp->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
356 login_rsp->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 355 login_rsp->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
357 356
358 pr_debug("Sending Login Response, Flags: 0x%02x, ITT: 0x%08x," 357 pr_debug("Sending Login Response, Flags: 0x%02x, ITT: 0x%08x,"
359 " ExpCmdSN; 0x%08x, MaxCmdSN: 0x%08x, StatSN: 0x%08x, Length:" 358 " ExpCmdSN; 0x%08x, MaxCmdSN: 0x%08x, StatSN: 0x%08x, Length:"
@@ -382,10 +381,6 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
382 goto err; 381 goto err;
383 382
384 login->rsp_length = 0; 383 login->rsp_length = 0;
385 mutex_lock(&sess->cmdsn_mutex);
386 login_rsp->exp_cmdsn = cpu_to_be32(sess->exp_cmd_sn);
387 login_rsp->max_cmdsn = cpu_to_be32(sess->max_cmd_sn);
388 mutex_unlock(&sess->cmdsn_mutex);
389 384
390 return 0; 385 return 0;
391 386
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index 5e1349a3b143..9dd94ff0b62c 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -430,7 +430,7 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr(
430 int ret; 430 int ret;
431 431
432 spin_lock(&lstat->lock); 432 spin_lock(&lstat->lock);
433 ret = snprintf(page, PAGE_SIZE, "%s\n", lstat->last_intr_fail_ip_addr); 433 ret = snprintf(page, PAGE_SIZE, "%pISc\n", &lstat->last_intr_fail_sockaddr);
434 spin_unlock(&lstat->lock); 434 spin_unlock(&lstat->lock);
435 435
436 return ret; 436 return ret;
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index cf59c397007b..11320df939f7 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -50,7 +50,7 @@ u8 iscsit_tmr_abort_task(
50 pr_err("Unable to locate RefTaskTag: 0x%08x on CID:" 50 pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
51 " %hu.\n", hdr->rtt, conn->cid); 51 " %hu.\n", hdr->rtt, conn->cid);
52 return (iscsi_sna_gte(be32_to_cpu(hdr->refcmdsn), conn->sess->exp_cmd_sn) && 52 return (iscsi_sna_gte(be32_to_cpu(hdr->refcmdsn), conn->sess->exp_cmd_sn) &&
53 iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), conn->sess->max_cmd_sn)) ? 53 iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), (u32) atomic_read(&conn->sess->max_cmd_sn))) ?
54 ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK; 54 ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
55 } 55 }
56 if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) { 56 if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) {
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 968068ffcb1c..23c95cd14167 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -226,6 +226,7 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
226 a->default_erl = TA_DEFAULT_ERL; 226 a->default_erl = TA_DEFAULT_ERL;
227 a->t10_pi = TA_DEFAULT_T10_PI; 227 a->t10_pi = TA_DEFAULT_T10_PI;
228 a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE; 228 a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE;
229 a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS;
229} 230}
230 231
231int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) 232int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -430,7 +431,7 @@ struct iscsi_tpg_np *iscsit_tpg_locate_child_np(
430 431
431static bool iscsit_tpg_check_network_portal( 432static bool iscsit_tpg_check_network_portal(
432 struct iscsi_tiqn *tiqn, 433 struct iscsi_tiqn *tiqn,
433 struct __kernel_sockaddr_storage *sockaddr, 434 struct sockaddr_storage *sockaddr,
434 int network_transport) 435 int network_transport)
435{ 436{
436 struct iscsi_portal_group *tpg; 437 struct iscsi_portal_group *tpg;
@@ -459,8 +460,7 @@ static bool iscsit_tpg_check_network_portal(
459 460
460struct iscsi_tpg_np *iscsit_tpg_add_network_portal( 461struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
461 struct iscsi_portal_group *tpg, 462 struct iscsi_portal_group *tpg,
462 struct __kernel_sockaddr_storage *sockaddr, 463 struct sockaddr_storage *sockaddr,
463 char *ip_str,
464 struct iscsi_tpg_np *tpg_np_parent, 464 struct iscsi_tpg_np *tpg_np_parent,
465 int network_transport) 465 int network_transport)
466{ 466{
@@ -470,8 +470,8 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
470 if (!tpg_np_parent) { 470 if (!tpg_np_parent) {
471 if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr, 471 if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
472 network_transport)) { 472 network_transport)) {
473 pr_err("Network Portal: %s already exists on a" 473 pr_err("Network Portal: %pISc already exists on a"
474 " different TPG on %s\n", ip_str, 474 " different TPG on %s\n", sockaddr,
475 tpg->tpg_tiqn->tiqn); 475 tpg->tpg_tiqn->tiqn);
476 return ERR_PTR(-EEXIST); 476 return ERR_PTR(-EEXIST);
477 } 477 }
@@ -484,7 +484,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
484 return ERR_PTR(-ENOMEM); 484 return ERR_PTR(-ENOMEM);
485 } 485 }
486 486
487 np = iscsit_add_np(sockaddr, ip_str, network_transport); 487 np = iscsit_add_np(sockaddr, network_transport);
488 if (IS_ERR(np)) { 488 if (IS_ERR(np)) {
489 kfree(tpg_np); 489 kfree(tpg_np);
490 return ERR_CAST(np); 490 return ERR_CAST(np);
@@ -514,8 +514,8 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
514 spin_unlock(&tpg_np_parent->tpg_np_parent_lock); 514 spin_unlock(&tpg_np_parent->tpg_np_parent_lock);
515 } 515 }
516 516
517 pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n", 517 pr_debug("CORE[%s] - Added Network Portal: %pISpc,%hu on %s\n",
518 tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt, 518 tpg->tpg_tiqn->tiqn, &np->np_sockaddr, tpg->tpgt,
519 np->np_transport->name); 519 np->np_transport->name);
520 520
521 return tpg_np; 521 return tpg_np;
@@ -528,8 +528,8 @@ static int iscsit_tpg_release_np(
528{ 528{
529 iscsit_clear_tpg_np_login_thread(tpg_np, tpg, true); 529 iscsit_clear_tpg_np_login_thread(tpg_np, tpg, true);
530 530
531 pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n", 531 pr_debug("CORE[%s] - Removed Network Portal: %pISpc,%hu on %s\n",
532 tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt, 532 tpg->tpg_tiqn->tiqn, &np->np_sockaddr, tpg->tpgt,
533 np->np_transport->name); 533 np->np_transport->name);
534 534
535 tpg_np->tpg_np = NULL; 535 tpg_np->tpg_np = NULL;
@@ -892,3 +892,21 @@ int iscsit_ta_fabric_prot_type(
892 892
893 return 0; 893 return 0;
894} 894}
895
896int iscsit_ta_tpg_enabled_sendtargets(
897 struct iscsi_portal_group *tpg,
898 u32 flag)
899{
900 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
901
902 if ((flag != 0) && (flag != 1)) {
903 pr_err("Illegal value %d\n", flag);
904 return -EINVAL;
905 }
906
907 a->tpg_enabled_sendtargets = flag;
908 pr_debug("iSCSI_TPG[%hu] - TPG enabled bit required for SendTargets:"
909 " %s\n", tpg->tpgt, (a->tpg_enabled_sendtargets) ? "ON" : "OFF");
910
911 return 0;
912}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 95ff5bdecd71..9db32bd24cd4 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -22,7 +22,7 @@ extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsi_session
22extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *); 22extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *);
23extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int); 23extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int);
24extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *, 24extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *,
25 struct __kernel_sockaddr_storage *, char *, struct iscsi_tpg_np *, 25 struct sockaddr_storage *, struct iscsi_tpg_np *,
26 int); 26 int);
27extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *, 27extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
28 struct iscsi_tpg_np *); 28 struct iscsi_tpg_np *);
@@ -40,5 +40,6 @@ extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32);
40extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32); 40extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
41extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32); 41extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
42extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32); 42extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32);
43extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32);
43 44
44#endif /* ISCSI_TARGET_TPG_H */ 45#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index a2bff0702eb2..428b0d9e3dba 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -233,6 +233,7 @@ struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
233 233
234static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn) 234static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
235{ 235{
236 u32 max_cmdsn;
236 int ret; 237 int ret;
237 238
238 /* 239 /*
@@ -241,10 +242,10 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm
241 * or order CmdSNs due to multiple connection sessions and/or 242 * or order CmdSNs due to multiple connection sessions and/or
242 * CRC failures. 243 * CRC failures.
243 */ 244 */
244 if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) { 245 max_cmdsn = atomic_read(&sess->max_cmd_sn);
246 if (iscsi_sna_gt(cmdsn, max_cmdsn)) {
245 pr_err("Received CmdSN: 0x%08x is greater than" 247 pr_err("Received CmdSN: 0x%08x is greater than"
246 " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, 248 " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, max_cmdsn);
247 sess->max_cmd_sn);
248 ret = CMDSN_MAXCMDSN_OVERRUN; 249 ret = CMDSN_MAXCMDSN_OVERRUN;
249 250
250 } else if (cmdsn == sess->exp_cmd_sn) { 251 } else if (cmdsn == sess->exp_cmd_sn) {
@@ -1371,6 +1372,33 @@ int tx_data(
1371 return iscsit_do_tx_data(conn, &c); 1372 return iscsit_do_tx_data(conn, &c);
1372} 1373}
1373 1374
1375static bool sockaddr_equal(struct sockaddr_storage *x, struct sockaddr_storage *y)
1376{
1377 switch (x->ss_family) {
1378 case AF_INET: {
1379 struct sockaddr_in *sinx = (struct sockaddr_in *)x;
1380 struct sockaddr_in *siny = (struct sockaddr_in *)y;
1381 if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
1382 return false;
1383 if (sinx->sin_port != siny->sin_port)
1384 return false;
1385 break;
1386 }
1387 case AF_INET6: {
1388 struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
1389 struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
1390 if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
1391 return false;
1392 if (sinx->sin6_port != siny->sin6_port)
1393 return false;
1394 break;
1395 }
1396 default:
1397 return false;
1398 }
1399 return true;
1400}
1401
1374void iscsit_collect_login_stats( 1402void iscsit_collect_login_stats(
1375 struct iscsi_conn *conn, 1403 struct iscsi_conn *conn,
1376 u8 status_class, 1404 u8 status_class,
@@ -1387,7 +1415,7 @@ void iscsit_collect_login_stats(
1387 ls = &tiqn->login_stats; 1415 ls = &tiqn->login_stats;
1388 1416
1389 spin_lock(&ls->lock); 1417 spin_lock(&ls->lock);
1390 if (!strcmp(conn->login_ip, ls->last_intr_fail_ip_addr) && 1418 if (sockaddr_equal(&conn->login_sockaddr, &ls->last_intr_fail_sockaddr) &&
1391 ((get_jiffies_64() - ls->last_fail_time) < 10)) { 1419 ((get_jiffies_64() - ls->last_fail_time) < 10)) {
1392 /* We already have the failure info for this login */ 1420 /* We already have the failure info for this login */
1393 spin_unlock(&ls->lock); 1421 spin_unlock(&ls->lock);
@@ -1427,8 +1455,7 @@ void iscsit_collect_login_stats(
1427 1455
1428 ls->last_intr_fail_ip_family = conn->login_family; 1456 ls->last_intr_fail_ip_family = conn->login_family;
1429 1457
1430 snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE, 1458 ls->last_intr_fail_sockaddr = conn->login_sockaddr;
1431 "%s", conn->login_ip);
1432 ls->last_fail_time = get_jiffies_64(); 1459 ls->last_fail_time = get_jiffies_64();
1433 } 1460 }
1434 1461
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index a556bdebd775..5bc85ffed720 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -526,7 +526,7 @@ static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
526static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg) 526static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
527{ 527{
528 /* 528 /*
529 * Return the passed NAA identifier for the SAS Target Port 529 * Return the passed NAA identifier for the Target Port
530 */ 530 */
531 return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0]; 531 return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
532} 532}
@@ -845,7 +845,7 @@ static int tcm_loop_make_nexus(
845 transport_free_session(tl_nexus->se_sess); 845 transport_free_session(tl_nexus->se_sess);
846 goto out; 846 goto out;
847 } 847 }
848 /* Now, register the SAS I_T Nexus as active. */ 848 /* Now, register the I_T Nexus as active. */
849 transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, 849 transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
850 tl_nexus->se_sess, tl_nexus); 850 tl_nexus->se_sess, tl_nexus);
851 tl_tpg->tl_nexus = tl_nexus; 851 tl_tpg->tl_nexus = tl_nexus;
@@ -884,7 +884,7 @@ static int tcm_loop_drop_nexus(
884 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba), 884 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
885 tl_nexus->se_sess->se_node_acl->initiatorname); 885 tl_nexus->se_sess->se_node_acl->initiatorname);
886 /* 886 /*
887 * Release the SCSI I_T Nexus to the emulated SAS Target Port 887 * Release the SCSI I_T Nexus to the emulated Target Port
888 */ 888 */
889 transport_deregister_session(tl_nexus->se_sess); 889 transport_deregister_session(tl_nexus->se_sess);
890 tpg->tl_nexus = NULL; 890 tpg->tl_nexus = NULL;
@@ -1034,6 +1034,11 @@ static ssize_t tcm_loop_tpg_store_transport_status(
1034 } 1034 }
1035 if (!strncmp(page, "offline", 7)) { 1035 if (!strncmp(page, "offline", 7)) {
1036 tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE; 1036 tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
1037 if (tl_tpg->tl_nexus) {
1038 struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
1039
1040 core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
1041 }
1037 return count; 1042 return count;
1038 } 1043 }
1039 return -EINVAL; 1044 return -EINVAL;
@@ -1077,7 +1082,7 @@ static struct se_portal_group *tcm_loop_make_naa_tpg(
1077 tl_tpg->tl_hba = tl_hba; 1082 tl_tpg->tl_hba = tl_hba;
1078 tl_tpg->tl_tpgt = tpgt; 1083 tl_tpg->tl_tpgt = tpgt;
1079 /* 1084 /*
1080 * Register the tl_tpg as a emulated SAS TCM Target Endpoint 1085 * Register the tl_tpg as a emulated TCM Target Endpoint
1081 */ 1086 */
1082 ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id); 1087 ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
1083 if (ret < 0) 1088 if (ret < 0)
@@ -1102,11 +1107,11 @@ static void tcm_loop_drop_naa_tpg(
1102 tl_hba = tl_tpg->tl_hba; 1107 tl_hba = tl_tpg->tl_hba;
1103 tpgt = tl_tpg->tl_tpgt; 1108 tpgt = tl_tpg->tl_tpgt;
1104 /* 1109 /*
1105 * Release the I_T Nexus for the Virtual SAS link if present 1110 * Release the I_T Nexus for the Virtual target link if present
1106 */ 1111 */
1107 tcm_loop_drop_nexus(tl_tpg); 1112 tcm_loop_drop_nexus(tl_tpg);
1108 /* 1113 /*
1109 * Deregister the tl_tpg as a emulated SAS TCM Target Endpoint 1114 * Deregister the tl_tpg as a emulated TCM Target Endpoint
1110 */ 1115 */
1111 core_tpg_deregister(se_tpg); 1116 core_tpg_deregister(se_tpg);
1112 1117
@@ -1199,8 +1204,9 @@ static void tcm_loop_drop_scsi_hba(
1199 struct tcm_loop_hba, tl_hba_wwn); 1204 struct tcm_loop_hba, tl_hba_wwn);
1200 1205
1201 pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target" 1206 pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
1202 " SAS Address: %s at Linux/SCSI Host ID: %d\n", 1207 " %s Address: %s at Linux/SCSI Host ID: %d\n",
1203 tl_hba->tl_wwn_address, tl_hba->sh->host_no); 1208 tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
1209 tl_hba->sh->host_no);
1204 /* 1210 /*
1205 * Call device_unregister() on the original tl_hba->dev. 1211 * Call device_unregister() on the original tl_hba->dev.
1206 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will 1212 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 09e682b1c549..dcc424ac35d4 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -620,8 +620,6 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
620 620
621 lacl->mapped_lun = mapped_lun; 621 lacl->mapped_lun = mapped_lun;
622 lacl->se_lun_nacl = nacl; 622 lacl->se_lun_nacl = nacl;
623 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
624 nacl->initiatorname);
625 623
626 return lacl; 624 return lacl;
627} 625}
@@ -656,7 +654,7 @@ int core_dev_add_initiator_node_lun_acl(
656 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 654 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
657 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, 655 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
658 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", 656 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
659 lacl->initiatorname); 657 nacl->initiatorname);
660 /* 658 /*
661 * Check to see if there are any existing persistent reservation APTPL 659 * Check to see if there are any existing persistent reservation APTPL
662 * pre-registrations that need to be enabled for this LUN ACL.. 660 * pre-registrations that need to be enabled for this LUN ACL..
@@ -688,7 +686,7 @@ int core_dev_del_initiator_node_lun_acl(
688 " InitiatorNode: %s Mapped LUN: %llu\n", 686 " InitiatorNode: %s Mapped LUN: %llu\n",
689 tpg->se_tpg_tfo->get_fabric_name(), 687 tpg->se_tpg_tfo->get_fabric_name(),
690 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 688 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
691 lacl->initiatorname, lacl->mapped_lun); 689 nacl->initiatorname, lacl->mapped_lun);
692 690
693 return 0; 691 return 0;
694} 692}
@@ -701,7 +699,7 @@ void core_dev_free_initiator_node_lun_acl(
701 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(), 699 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(),
702 tpg->se_tpg_tfo->tpg_get_tag(tpg), 700 tpg->se_tpg_tfo->tpg_get_tag(tpg),
703 tpg->se_tpg_tfo->get_fabric_name(), 701 tpg->se_tpg_tfo->get_fabric_name(),
704 lacl->initiatorname, lacl->mapped_lun); 702 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
705 703
706 kfree(lacl); 704 kfree(lacl);
707} 705}
@@ -754,7 +752,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
754 dev->dev_link_magic = SE_DEV_LINK_MAGIC; 752 dev->dev_link_magic = SE_DEV_LINK_MAGIC;
755 dev->se_hba = hba; 753 dev->se_hba = hba;
756 dev->transport = hba->backend->ops; 754 dev->transport = hba->backend->ops;
757 dev->prot_length = sizeof(struct se_dif_v1_tuple); 755 dev->prot_length = sizeof(struct t10_pi_tuple);
758 dev->hba_index = hba->hba_index; 756 dev->hba_index = hba->hba_index;
759 757
760 INIT_LIST_HEAD(&dev->dev_list); 758 INIT_LIST_HEAD(&dev->dev_list);
@@ -771,7 +769,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
771 spin_lock_init(&dev->se_tmr_lock); 769 spin_lock_init(&dev->se_tmr_lock);
772 spin_lock_init(&dev->qf_cmd_lock); 770 spin_lock_init(&dev->qf_cmd_lock);
773 sema_init(&dev->caw_sem, 1); 771 sema_init(&dev->caw_sem, 1);
774 atomic_set(&dev->dev_ordered_id, 0);
775 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 772 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
776 spin_lock_init(&dev->t10_wwn.t10_vpd_lock); 773 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
777 INIT_LIST_HEAD(&dev->t10_pr.registration_list); 774 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 48a36989c1a6..be42429468e2 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -203,7 +203,7 @@ static ssize_t target_fabric_mappedlun_store_write_protect(
203 pr_debug("%s_ConfigFS: Changed Initiator ACL: %s" 203 pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
204 " Mapped LUN: %llu Write Protect bit to %s\n", 204 " Mapped LUN: %llu Write Protect bit to %s\n",
205 se_tpg->se_tpg_tfo->get_fabric_name(), 205 se_tpg->se_tpg_tfo->get_fabric_name(),
206 lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF"); 206 se_nacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
207 207
208 return count; 208 return count;
209 209
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index be9cefc07407..9522960c7fdd 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -184,3 +184,8 @@ core_delete_hba(struct se_hba *hba)
184 kfree(hba); 184 kfree(hba);
185 return 0; 185 return 0;
186} 186}
187
188bool target_sense_desc_format(struct se_device *dev)
189{
190 return dev->transport->get_blocks(dev) > U32_MAX;
191}
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index e318ddbe15da..0b4b2a67d9f9 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -154,6 +154,38 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
154 return 0; 154 return 0;
155} 155}
156 156
157static sense_reason_t
158sbc_emulate_startstop(struct se_cmd *cmd)
159{
160 unsigned char *cdb = cmd->t_task_cdb;
161
162 /*
163 * See sbc3r36 section 5.25
164 * Immediate bit should be set since there is nothing to complete
165 * POWER CONDITION MODIFIER 0h
166 */
167 if (!(cdb[1] & 1) || cdb[2] || cdb[3])
168 return TCM_INVALID_CDB_FIELD;
169
170 /*
171 * See sbc3r36 section 5.25
172 * POWER CONDITION 0h START_VALID - process START and LOEJ
173 */
174 if (cdb[4] >> 4 & 0xf)
175 return TCM_INVALID_CDB_FIELD;
176
177 /*
178 * See sbc3r36 section 5.25
179 * LOEJ 0h - nothing to load or unload
180 * START 1h - we are ready
181 */
182 if (!(cdb[4] & 1) || (cdb[4] & 2) || (cdb[4] & 4))
183 return TCM_INVALID_CDB_FIELD;
184
185 target_complete_cmd(cmd, SAM_STAT_GOOD);
186 return 0;
187}
188
157sector_t sbc_get_write_same_sectors(struct se_cmd *cmd) 189sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
158{ 190{
159 u32 num_blocks; 191 u32 num_blocks;
@@ -960,6 +992,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
960 " than 1\n", sectors); 992 " than 1\n", sectors);
961 return TCM_INVALID_CDB_FIELD; 993 return TCM_INVALID_CDB_FIELD;
962 } 994 }
995 if (sbc_check_dpofua(dev, cmd, cdb))
996 return TCM_INVALID_CDB_FIELD;
997
963 /* 998 /*
964 * Double size because we have two buffers, note that 999 * Double size because we have two buffers, note that
965 * zero is not an error.. 1000 * zero is not an error..
@@ -1069,6 +1104,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
1069 size = 0; 1104 size = 0;
1070 cmd->execute_cmd = sbc_emulate_noop; 1105 cmd->execute_cmd = sbc_emulate_noop;
1071 break; 1106 break;
1107 case START_STOP:
1108 size = 0;
1109 cmd->execute_cmd = sbc_emulate_startstop;
1110 break;
1072 default: 1111 default:
1073 ret = spc_parse_cdb(cmd, &size); 1112 ret = spc_parse_cdb(cmd, &size);
1074 if (ret) 1113 if (ret)
@@ -1191,7 +1230,7 @@ void
1191sbc_dif_generate(struct se_cmd *cmd) 1230sbc_dif_generate(struct se_cmd *cmd)
1192{ 1231{
1193 struct se_device *dev = cmd->se_dev; 1232 struct se_device *dev = cmd->se_dev;
1194 struct se_dif_v1_tuple *sdt; 1233 struct t10_pi_tuple *sdt;
1195 struct scatterlist *dsg = cmd->t_data_sg, *psg; 1234 struct scatterlist *dsg = cmd->t_data_sg, *psg;
1196 sector_t sector = cmd->t_task_lba; 1235 sector_t sector = cmd->t_task_lba;
1197 void *daddr, *paddr; 1236 void *daddr, *paddr;
@@ -1203,7 +1242,7 @@ sbc_dif_generate(struct se_cmd *cmd)
1203 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1242 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1204 1243
1205 for (j = 0; j < psg->length; 1244 for (j = 0; j < psg->length;
1206 j += sizeof(struct se_dif_v1_tuple)) { 1245 j += sizeof(*sdt)) {
1207 __u16 crc; 1246 __u16 crc;
1208 unsigned int avail; 1247 unsigned int avail;
1209 1248
@@ -1256,7 +1295,7 @@ sbc_dif_generate(struct se_cmd *cmd)
1256} 1295}
1257 1296
1258static sense_reason_t 1297static sense_reason_t
1259sbc_dif_v1_verify(struct se_cmd *cmd, struct se_dif_v1_tuple *sdt, 1298sbc_dif_v1_verify(struct se_cmd *cmd, struct t10_pi_tuple *sdt,
1260 __u16 crc, sector_t sector, unsigned int ei_lba) 1299 __u16 crc, sector_t sector, unsigned int ei_lba)
1261{ 1300{
1262 __be16 csum; 1301 __be16 csum;
@@ -1346,7 +1385,7 @@ sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1346 unsigned int ei_lba, struct scatterlist *psg, int psg_off) 1385 unsigned int ei_lba, struct scatterlist *psg, int psg_off)
1347{ 1386{
1348 struct se_device *dev = cmd->se_dev; 1387 struct se_device *dev = cmd->se_dev;
1349 struct se_dif_v1_tuple *sdt; 1388 struct t10_pi_tuple *sdt;
1350 struct scatterlist *dsg = cmd->t_data_sg; 1389 struct scatterlist *dsg = cmd->t_data_sg;
1351 sector_t sector = start; 1390 sector_t sector = start;
1352 void *daddr, *paddr; 1391 void *daddr, *paddr;
@@ -1361,7 +1400,7 @@ sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1361 1400
1362 for (i = psg_off; i < psg->length && 1401 for (i = psg_off; i < psg->length &&
1363 sector < start + sectors; 1402 sector < start + sectors;
1364 i += sizeof(struct se_dif_v1_tuple)) { 1403 i += sizeof(*sdt)) {
1365 __u16 crc; 1404 __u16 crc;
1366 unsigned int avail; 1405 unsigned int avail;
1367 1406
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index f87d4cef6d39..9413e1a949e5 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -484,8 +484,8 @@ static sense_reason_t
484spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) 484spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
485{ 485{
486 struct se_device *dev = cmd->se_dev; 486 struct se_device *dev = cmd->se_dev;
487 int have_tp = 0; 487 u32 mtl = 0;
488 int opt, min; 488 int have_tp = 0, opt, min;
489 489
490 /* 490 /*
491 * Following spc3r22 section 6.5.3 Block Limits VPD page, when 491 * Following spc3r22 section 6.5.3 Block Limits VPD page, when
@@ -516,8 +516,15 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
516 516
517 /* 517 /*
518 * Set MAXIMUM TRANSFER LENGTH 518 * Set MAXIMUM TRANSFER LENGTH
519 *
520 * XXX: Currently assumes single PAGE_SIZE per scatterlist for fabrics
521 * enforcing maximum HW scatter-gather-list entry limit
519 */ 522 */
520 put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]); 523 if (cmd->se_tfo->max_data_sg_nents) {
524 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) /
525 dev->dev_attrib.block_size;
526 }
527 put_unaligned_be32(min_not_zero(mtl, dev->dev_attrib.hw_max_sectors), &buf[8]);
521 528
522 /* 529 /*
523 * Set OPTIMAL TRANSFER LENGTH 530 * Set OPTIMAL TRANSFER LENGTH
@@ -768,7 +775,12 @@ static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
768 if (pc == 1) 775 if (pc == 1)
769 goto out; 776 goto out;
770 777
771 p[2] = 2; 778 /* GLTSD: No implicit save of log parameters */
779 p[2] = (1 << 1);
780 if (target_sense_desc_format(dev))
781 /* D_SENSE: Descriptor format sense data for 64bit sectors */
782 p[2] |= (1 << 2);
783
772 /* 784 /*
773 * From spc4r23, 7.4.7 Control mode page 785 * From spc4r23, 7.4.7 Control mode page
774 * 786 *
@@ -1151,6 +1163,7 @@ static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
1151 unsigned char *rbuf; 1163 unsigned char *rbuf;
1152 u8 ua_asc = 0, ua_ascq = 0; 1164 u8 ua_asc = 0, ua_ascq = 0;
1153 unsigned char buf[SE_SENSE_BUF]; 1165 unsigned char buf[SE_SENSE_BUF];
1166 bool desc_format = target_sense_desc_format(cmd->se_dev);
1154 1167
1155 memset(buf, 0, SE_SENSE_BUF); 1168 memset(buf, 0, SE_SENSE_BUF);
1156 1169
@@ -1164,32 +1177,11 @@ static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
1164 if (!rbuf) 1177 if (!rbuf)
1165 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1178 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1166 1179
1167 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) { 1180 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))
1168 /* 1181 scsi_build_sense_buffer(desc_format, buf, UNIT_ATTENTION,
1169 * CURRENT ERROR, UNIT ATTENTION 1182 ua_asc, ua_ascq);
1170 */ 1183 else
1171 buf[0] = 0x70; 1184 scsi_build_sense_buffer(desc_format, buf, NO_SENSE, 0x0, 0x0);
1172 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
1173
1174 /*
1175 * The Additional Sense Code (ASC) from the UNIT ATTENTION
1176 */
1177 buf[SPC_ASC_KEY_OFFSET] = ua_asc;
1178 buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
1179 buf[7] = 0x0A;
1180 } else {
1181 /*
1182 * CURRENT ERROR, NO SENSE
1183 */
1184 buf[0] = 0x70;
1185 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
1186
1187 /*
1188 * NO ADDITIONAL SENSE INFORMATION
1189 */
1190 buf[SPC_ASC_KEY_OFFSET] = 0x00;
1191 buf[7] = 0x0A;
1192 }
1193 1185
1194 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 1186 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
1195 transport_kunmap_data_sg(cmd); 1187 transport_kunmap_data_sg(cmd);
@@ -1418,9 +1410,6 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1418 } 1410 }
1419 break; 1411 break;
1420 default: 1412 default:
1421 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
1422 " 0x%02x, sending CHECK_CONDITION.\n",
1423 cmd->se_tfo->get_fabric_name(), cdb[0]);
1424 return TCM_UNSUPPORTED_SCSI_OPCODE; 1413 return TCM_UNSUPPORTED_SCSI_OPCODE;
1425 } 1414 }
1426 1415
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index babde4ad841f..2d0381dd105c 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -41,6 +41,7 @@
41#include "target_core_internal.h" 41#include "target_core_internal.h"
42#include "target_core_alua.h" 42#include "target_core_alua.h"
43#include "target_core_pr.h" 43#include "target_core_pr.h"
44#include "target_core_ua.h"
44 45
45extern struct se_device *g_lun0_dev; 46extern struct se_device *g_lun0_dev;
46 47
@@ -83,6 +84,22 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
83} 84}
84EXPORT_SYMBOL(core_tpg_get_initiator_node_acl); 85EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
85 86
87void core_allocate_nexus_loss_ua(
88 struct se_node_acl *nacl)
89{
90 struct se_dev_entry *deve;
91
92 if (!nacl)
93 return;
94
95 rcu_read_lock();
96 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
97 core_scsi3_ua_allocate(deve, 0x29,
98 ASCQ_29H_NEXUS_LOSS_OCCURRED);
99 rcu_read_unlock();
100}
101EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
102
86/* core_tpg_add_node_to_devs(): 103/* core_tpg_add_node_to_devs():
87 * 104 *
88 * 105 *
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ce8574b7220c..5bacc7b5ed6d 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -39,6 +39,7 @@
39#include <net/sock.h> 39#include <net/sock.h>
40#include <net/tcp.h> 40#include <net/tcp.h>
41#include <scsi/scsi_proto.h> 41#include <scsi/scsi_proto.h>
42#include <scsi/scsi_common.h>
42 43
43#include <target/target_core_base.h> 44#include <target/target_core_base.h>
44#include <target/target_core_backend.h> 45#include <target/target_core_backend.h>
@@ -1074,6 +1075,55 @@ transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1074} 1075}
1075EXPORT_SYMBOL(transport_set_vpd_ident); 1076EXPORT_SYMBOL(transport_set_vpd_ident);
1076 1077
1078static sense_reason_t
1079target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
1080 unsigned int size)
1081{
1082 u32 mtl;
1083
1084 if (!cmd->se_tfo->max_data_sg_nents)
1085 return TCM_NO_SENSE;
1086 /*
1087 * Check if fabric enforced maximum SGL entries per I/O descriptor
1088 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT +
1089 * residual_count and reduce original cmd->data_length to maximum
1090 * length based on single PAGE_SIZE entry scatter-lists.
1091 */
1092 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
1093 if (cmd->data_length > mtl) {
1094 /*
1095 * If an existing CDB overflow is present, calculate new residual
1096 * based on CDB size minus fabric maximum transfer length.
1097 *
1098 * If an existing CDB underflow is present, calculate new residual
1099 * based on original cmd->data_length minus fabric maximum transfer
1100 * length.
1101 *
1102 * Otherwise, set the underflow residual based on cmd->data_length
1103 * minus fabric maximum transfer length.
1104 */
1105 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1106 cmd->residual_count = (size - mtl);
1107 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1108 u32 orig_dl = size + cmd->residual_count;
1109 cmd->residual_count = (orig_dl - mtl);
1110 } else {
1111 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1112 cmd->residual_count = (cmd->data_length - mtl);
1113 }
1114 cmd->data_length = mtl;
1115 /*
1116 * Reset sbc_check_prot() calculated protection payload
1117 * length based upon the new smaller MTL.
1118 */
1119 if (cmd->prot_length) {
1120 u32 sectors = (mtl / dev->dev_attrib.block_size);
1121 cmd->prot_length = dev->prot_length * sectors;
1122 }
1123 }
1124 return TCM_NO_SENSE;
1125}
1126
1077sense_reason_t 1127sense_reason_t
1078target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1128target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1079{ 1129{
@@ -1087,9 +1137,9 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1087 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1137 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
1088 cmd->data_length, size, cmd->t_task_cdb[0]); 1138 cmd->data_length, size, cmd->t_task_cdb[0]);
1089 1139
1090 if (cmd->data_direction == DMA_TO_DEVICE) { 1140 if (cmd->data_direction == DMA_TO_DEVICE &&
1091 pr_err("Rejecting underflow/overflow" 1141 cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1092 " WRITE data\n"); 1142 pr_err("Rejecting underflow/overflow WRITE data\n");
1093 return TCM_INVALID_CDB_FIELD; 1143 return TCM_INVALID_CDB_FIELD;
1094 } 1144 }
1095 /* 1145 /*
@@ -1119,7 +1169,7 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1119 } 1169 }
1120 } 1170 }
1121 1171
1122 return 0; 1172 return target_check_max_data_sg_nents(cmd, dev, size);
1123 1173
1124} 1174}
1125 1175
@@ -1177,14 +1227,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
1177 " emulation is not supported\n"); 1227 " emulation is not supported\n");
1178 return TCM_INVALID_CDB_FIELD; 1228 return TCM_INVALID_CDB_FIELD;
1179 } 1229 }
1180 /* 1230
1181 * Used to determine when ORDERED commands should go from
1182 * Dormant to Active status.
1183 */
1184 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
1185 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1186 cmd->se_ordered_id, cmd->sam_task_attr,
1187 dev->transport->name);
1188 return 0; 1231 return 0;
1189} 1232}
1190 1233
@@ -1246,6 +1289,11 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
1246 } 1289 }
1247 1290
1248 ret = dev->transport->parse_cdb(cmd); 1291 ret = dev->transport->parse_cdb(cmd);
1292 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
1293 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1294 cmd->se_tfo->get_fabric_name(),
1295 cmd->se_sess->se_node_acl->initiatorname,
1296 cmd->t_task_cdb[0]);
1249 if (ret) 1297 if (ret)
1250 return ret; 1298 return ret;
1251 1299
@@ -1693,8 +1741,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1693 1741
1694check_stop: 1742check_stop:
1695 transport_lun_remove_cmd(cmd); 1743 transport_lun_remove_cmd(cmd);
1696 if (!transport_cmd_check_stop_to_fabric(cmd)) 1744 transport_cmd_check_stop_to_fabric(cmd);
1697 ;
1698 return; 1745 return;
1699 1746
1700queue_full: 1747queue_full:
@@ -1767,16 +1814,14 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1767 */ 1814 */
1768 switch (cmd->sam_task_attr) { 1815 switch (cmd->sam_task_attr) {
1769 case TCM_HEAD_TAG: 1816 case TCM_HEAD_TAG:
1770 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, " 1817 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
1771 "se_ordered_id: %u\n", 1818 cmd->t_task_cdb[0]);
1772 cmd->t_task_cdb[0], cmd->se_ordered_id);
1773 return false; 1819 return false;
1774 case TCM_ORDERED_TAG: 1820 case TCM_ORDERED_TAG:
1775 atomic_inc_mb(&dev->dev_ordered_sync); 1821 atomic_inc_mb(&dev->dev_ordered_sync);
1776 1822
1777 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " 1823 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
1778 " se_ordered_id: %u\n", 1824 cmd->t_task_cdb[0]);
1779 cmd->t_task_cdb[0], cmd->se_ordered_id);
1780 1825
1781 /* 1826 /*
1782 * Execute an ORDERED command if no other older commands 1827 * Execute an ORDERED command if no other older commands
@@ -1800,10 +1845,8 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1800 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 1845 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
1801 spin_unlock(&dev->delayed_cmd_lock); 1846 spin_unlock(&dev->delayed_cmd_lock);
1802 1847
1803 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" 1848 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
1804 " delayed CMD list, se_ordered_id: %u\n", 1849 cmd->t_task_cdb[0], cmd->sam_task_attr);
1805 cmd->t_task_cdb[0], cmd->sam_task_attr,
1806 cmd->se_ordered_id);
1807 return true; 1850 return true;
1808} 1851}
1809 1852
@@ -1888,20 +1931,18 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
1888 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 1931 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
1889 atomic_dec_mb(&dev->simple_cmds); 1932 atomic_dec_mb(&dev->simple_cmds);
1890 dev->dev_cur_ordered_id++; 1933 dev->dev_cur_ordered_id++;
1891 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 1934 pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n",
1892 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 1935 dev->dev_cur_ordered_id);
1893 cmd->se_ordered_id);
1894 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 1936 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
1895 dev->dev_cur_ordered_id++; 1937 dev->dev_cur_ordered_id++;
1896 pr_debug("Incremented dev_cur_ordered_id: %u for" 1938 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
1897 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 1939 dev->dev_cur_ordered_id);
1898 cmd->se_ordered_id);
1899 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 1940 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
1900 atomic_dec_mb(&dev->dev_ordered_sync); 1941 atomic_dec_mb(&dev->dev_ordered_sync);
1901 1942
1902 dev->dev_cur_ordered_id++; 1943 dev->dev_cur_ordered_id++;
1903 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 1944 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
1904 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); 1945 dev->dev_cur_ordered_id);
1905 } 1946 }
1906 1947
1907 target_restart_delayed_cmds(dev); 1948 target_restart_delayed_cmds(dev);
@@ -2615,37 +2656,159 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
2615} 2656}
2616EXPORT_SYMBOL(transport_wait_for_tasks); 2657EXPORT_SYMBOL(transport_wait_for_tasks);
2617 2658
2618static int transport_get_sense_codes( 2659struct sense_info {
2619 struct se_cmd *cmd, 2660 u8 key;
2620 u8 *asc, 2661 u8 asc;
2621 u8 *ascq) 2662 u8 ascq;
2663 bool add_sector_info;
2664};
2665
2666static const struct sense_info sense_info_table[] = {
2667 [TCM_NO_SENSE] = {
2668 .key = NOT_READY
2669 },
2670 [TCM_NON_EXISTENT_LUN] = {
2671 .key = ILLEGAL_REQUEST,
2672 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
2673 },
2674 [TCM_UNSUPPORTED_SCSI_OPCODE] = {
2675 .key = ILLEGAL_REQUEST,
2676 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
2677 },
2678 [TCM_SECTOR_COUNT_TOO_MANY] = {
2679 .key = ILLEGAL_REQUEST,
2680 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
2681 },
2682 [TCM_UNKNOWN_MODE_PAGE] = {
2683 .key = ILLEGAL_REQUEST,
2684 .asc = 0x24, /* INVALID FIELD IN CDB */
2685 },
2686 [TCM_CHECK_CONDITION_ABORT_CMD] = {
2687 .key = ABORTED_COMMAND,
2688 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
2689 .ascq = 0x03,
2690 },
2691 [TCM_INCORRECT_AMOUNT_OF_DATA] = {
2692 .key = ABORTED_COMMAND,
2693 .asc = 0x0c, /* WRITE ERROR */
2694 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
2695 },
2696 [TCM_INVALID_CDB_FIELD] = {
2697 .key = ILLEGAL_REQUEST,
2698 .asc = 0x24, /* INVALID FIELD IN CDB */
2699 },
2700 [TCM_INVALID_PARAMETER_LIST] = {
2701 .key = ILLEGAL_REQUEST,
2702 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
2703 },
2704 [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
2705 .key = ILLEGAL_REQUEST,
2706 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
2707 },
2708 [TCM_UNEXPECTED_UNSOLICITED_DATA] = {
2709 .key = ILLEGAL_REQUEST,
2710 .asc = 0x0c, /* WRITE ERROR */
2711 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
2712 },
2713 [TCM_SERVICE_CRC_ERROR] = {
2714 .key = ABORTED_COMMAND,
2715 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
2716 .ascq = 0x05, /* N/A */
2717 },
2718 [TCM_SNACK_REJECTED] = {
2719 .key = ABORTED_COMMAND,
2720 .asc = 0x11, /* READ ERROR */
2721 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
2722 },
2723 [TCM_WRITE_PROTECTED] = {
2724 .key = DATA_PROTECT,
2725 .asc = 0x27, /* WRITE PROTECTED */
2726 },
2727 [TCM_ADDRESS_OUT_OF_RANGE] = {
2728 .key = ILLEGAL_REQUEST,
2729 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
2730 },
2731 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
2732 .key = UNIT_ATTENTION,
2733 },
2734 [TCM_CHECK_CONDITION_NOT_READY] = {
2735 .key = NOT_READY,
2736 },
2737 [TCM_MISCOMPARE_VERIFY] = {
2738 .key = MISCOMPARE,
2739 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
2740 .ascq = 0x00,
2741 },
2742 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
2743 .key = ABORTED_COMMAND,
2744 .asc = 0x10,
2745 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
2746 .add_sector_info = true,
2747 },
2748 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
2749 .key = ABORTED_COMMAND,
2750 .asc = 0x10,
2751 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
2752 .add_sector_info = true,
2753 },
2754 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
2755 .key = ABORTED_COMMAND,
2756 .asc = 0x10,
2757 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
2758 .add_sector_info = true,
2759 },
2760 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
2761 /*
2762 * Returning ILLEGAL REQUEST would cause immediate IO errors on
2763 * Solaris initiators. Returning NOT READY instead means the
2764 * operations will be retried a finite number of times and we
2765 * can survive intermittent errors.
2766 */
2767 .key = NOT_READY,
2768 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
2769 },
2770};
2771
2772static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
2622{ 2773{
2623 *asc = cmd->scsi_asc; 2774 const struct sense_info *si;
2624 *ascq = cmd->scsi_ascq; 2775 u8 *buffer = cmd->sense_buffer;
2776 int r = (__force int)reason;
2777 u8 asc, ascq;
2778 bool desc_format = target_sense_desc_format(cmd->se_dev);
2625 2779
2626 return 0; 2780 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
2627} 2781 si = &sense_info_table[r];
2782 else
2783 si = &sense_info_table[(__force int)
2784 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
2628 2785
2629static 2786 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
2630void transport_err_sector_info(unsigned char *buffer, sector_t bad_sector) 2787 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
2631{ 2788 WARN_ON_ONCE(asc == 0);
2632 /* Place failed LBA in sense data information descriptor 0. */ 2789 } else if (si->asc == 0) {
2633 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 0xc; 2790 WARN_ON_ONCE(cmd->scsi_asc == 0);
2634 buffer[SPC_DESC_TYPE_OFFSET] = 0; /* Information */ 2791 asc = cmd->scsi_asc;
2635 buffer[SPC_ADDITIONAL_DESC_LEN_OFFSET] = 0xa; 2792 ascq = cmd->scsi_ascq;
2636 buffer[SPC_VALIDITY_OFFSET] = 0x80; 2793 } else {
2794 asc = si->asc;
2795 ascq = si->ascq;
2796 }
2797
2798 scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq);
2799 if (si->add_sector_info)
2800 return scsi_set_sense_information(buffer,
2801 cmd->scsi_sense_length,
2802 cmd->bad_sector);
2637 2803
2638 /* Descriptor Information: failing sector */ 2804 return 0;
2639 put_unaligned_be64(bad_sector, &buffer[12]);
2640} 2805}
2641 2806
2642int 2807int
2643transport_send_check_condition_and_sense(struct se_cmd *cmd, 2808transport_send_check_condition_and_sense(struct se_cmd *cmd,
2644 sense_reason_t reason, int from_transport) 2809 sense_reason_t reason, int from_transport)
2645{ 2810{
2646 unsigned char *buffer = cmd->sense_buffer;
2647 unsigned long flags; 2811 unsigned long flags;
2648 u8 asc = 0, ascq = 0;
2649 2812
2650 spin_lock_irqsave(&cmd->t_state_lock, flags); 2813 spin_lock_irqsave(&cmd->t_state_lock, flags);
2651 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 2814 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
@@ -2655,243 +2818,17 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
2655 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 2818 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
2656 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2819 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2657 2820
2658 if (!reason && from_transport) 2821 if (!from_transport) {
2659 goto after_reason; 2822 int rc;
2660 2823
2661 if (!from_transport)
2662 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 2824 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
2663 2825 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
2664 /* 2826 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
2665 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses 2827 rc = translate_sense_reason(cmd, reason);
2666 * SENSE KEY values from include/scsi/scsi.h 2828 if (rc)
2667 */ 2829 return rc;
2668 switch (reason) {
2669 case TCM_NO_SENSE:
2670 /* CURRENT ERROR */
2671 buffer[0] = 0x70;
2672 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2673 /* Not Ready */
2674 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
2675 /* NO ADDITIONAL SENSE INFORMATION */
2676 buffer[SPC_ASC_KEY_OFFSET] = 0;
2677 buffer[SPC_ASCQ_KEY_OFFSET] = 0;
2678 break;
2679 case TCM_NON_EXISTENT_LUN:
2680 /* CURRENT ERROR */
2681 buffer[0] = 0x70;
2682 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2683 /* ILLEGAL REQUEST */
2684 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2685 /* LOGICAL UNIT NOT SUPPORTED */
2686 buffer[SPC_ASC_KEY_OFFSET] = 0x25;
2687 break;
2688 case TCM_UNSUPPORTED_SCSI_OPCODE:
2689 case TCM_SECTOR_COUNT_TOO_MANY:
2690 /* CURRENT ERROR */
2691 buffer[0] = 0x70;
2692 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2693 /* ILLEGAL REQUEST */
2694 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2695 /* INVALID COMMAND OPERATION CODE */
2696 buffer[SPC_ASC_KEY_OFFSET] = 0x20;
2697 break;
2698 case TCM_UNKNOWN_MODE_PAGE:
2699 /* CURRENT ERROR */
2700 buffer[0] = 0x70;
2701 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2702 /* ILLEGAL REQUEST */
2703 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2704 /* INVALID FIELD IN CDB */
2705 buffer[SPC_ASC_KEY_OFFSET] = 0x24;
2706 break;
2707 case TCM_CHECK_CONDITION_ABORT_CMD:
2708 /* CURRENT ERROR */
2709 buffer[0] = 0x70;
2710 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2711 /* ABORTED COMMAND */
2712 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
2713 /* BUS DEVICE RESET FUNCTION OCCURRED */
2714 buffer[SPC_ASC_KEY_OFFSET] = 0x29;
2715 buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
2716 break;
2717 case TCM_INCORRECT_AMOUNT_OF_DATA:
2718 /* CURRENT ERROR */
2719 buffer[0] = 0x70;
2720 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2721 /* ABORTED COMMAND */
2722 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
2723 /* WRITE ERROR */
2724 buffer[SPC_ASC_KEY_OFFSET] = 0x0c;
2725 /* NOT ENOUGH UNSOLICITED DATA */
2726 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0d;
2727 break;
2728 case TCM_INVALID_CDB_FIELD:
2729 /* CURRENT ERROR */
2730 buffer[0] = 0x70;
2731 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2732 /* ILLEGAL REQUEST */
2733 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2734 /* INVALID FIELD IN CDB */
2735 buffer[SPC_ASC_KEY_OFFSET] = 0x24;
2736 break;
2737 case TCM_INVALID_PARAMETER_LIST:
2738 /* CURRENT ERROR */
2739 buffer[0] = 0x70;
2740 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2741 /* ILLEGAL REQUEST */
2742 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2743 /* INVALID FIELD IN PARAMETER LIST */
2744 buffer[SPC_ASC_KEY_OFFSET] = 0x26;
2745 break;
2746 case TCM_PARAMETER_LIST_LENGTH_ERROR:
2747 /* CURRENT ERROR */
2748 buffer[0] = 0x70;
2749 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2750 /* ILLEGAL REQUEST */
2751 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2752 /* PARAMETER LIST LENGTH ERROR */
2753 buffer[SPC_ASC_KEY_OFFSET] = 0x1a;
2754 break;
2755 case TCM_UNEXPECTED_UNSOLICITED_DATA:
2756 /* CURRENT ERROR */
2757 buffer[0] = 0x70;
2758 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2759 /* ABORTED COMMAND */
2760 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
2761 /* WRITE ERROR */
2762 buffer[SPC_ASC_KEY_OFFSET] = 0x0c;
2763 /* UNEXPECTED_UNSOLICITED_DATA */
2764 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0c;
2765 break;
2766 case TCM_SERVICE_CRC_ERROR:
2767 /* CURRENT ERROR */
2768 buffer[0] = 0x70;
2769 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2770 /* ABORTED COMMAND */
2771 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
2772 /* PROTOCOL SERVICE CRC ERROR */
2773 buffer[SPC_ASC_KEY_OFFSET] = 0x47;
2774 /* N/A */
2775 buffer[SPC_ASCQ_KEY_OFFSET] = 0x05;
2776 break;
2777 case TCM_SNACK_REJECTED:
2778 /* CURRENT ERROR */
2779 buffer[0] = 0x70;
2780 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2781 /* ABORTED COMMAND */
2782 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
2783 /* READ ERROR */
2784 buffer[SPC_ASC_KEY_OFFSET] = 0x11;
2785 /* FAILED RETRANSMISSION REQUEST */
2786 buffer[SPC_ASCQ_KEY_OFFSET] = 0x13;
2787 break;
2788 case TCM_WRITE_PROTECTED:
2789 /* CURRENT ERROR */
2790 buffer[0] = 0x70;
2791 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2792 /* DATA PROTECT */
2793 buffer[SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
2794 /* WRITE PROTECTED */
2795 buffer[SPC_ASC_KEY_OFFSET] = 0x27;
2796 break;
2797 case TCM_ADDRESS_OUT_OF_RANGE:
2798 /* CURRENT ERROR */
2799 buffer[0] = 0x70;
2800 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2801 /* ILLEGAL REQUEST */
2802 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2803 /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
2804 buffer[SPC_ASC_KEY_OFFSET] = 0x21;
2805 break;
2806 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
2807 /* CURRENT ERROR */
2808 buffer[0] = 0x70;
2809 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2810 /* UNIT ATTENTION */
2811 buffer[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
2812 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
2813 buffer[SPC_ASC_KEY_OFFSET] = asc;
2814 buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
2815 break;
2816 case TCM_CHECK_CONDITION_NOT_READY:
2817 /* CURRENT ERROR */
2818 buffer[0] = 0x70;
2819 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2820 /* Not Ready */
2821 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
2822 transport_get_sense_codes(cmd, &asc, &ascq);
2823 buffer[SPC_ASC_KEY_OFFSET] = asc;
2824 buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
2825 break;
2826 case TCM_MISCOMPARE_VERIFY:
2827 /* CURRENT ERROR */
2828 buffer[0] = 0x70;
2829 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2830 buffer[SPC_SENSE_KEY_OFFSET] = MISCOMPARE;
2831 /* MISCOMPARE DURING VERIFY OPERATION */
2832 buffer[SPC_ASC_KEY_OFFSET] = 0x1d;
2833 buffer[SPC_ASCQ_KEY_OFFSET] = 0x00;
2834 break;
2835 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
2836 /* CURRENT ERROR */
2837 buffer[0] = 0x70;
2838 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2839 /* ILLEGAL REQUEST */
2840 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2841 /* LOGICAL BLOCK GUARD CHECK FAILED */
2842 buffer[SPC_ASC_KEY_OFFSET] = 0x10;
2843 buffer[SPC_ASCQ_KEY_OFFSET] = 0x01;
2844 transport_err_sector_info(buffer, cmd->bad_sector);
2845 break;
2846 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
2847 /* CURRENT ERROR */
2848 buffer[0] = 0x70;
2849 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2850 /* ILLEGAL REQUEST */
2851 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2852 /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
2853 buffer[SPC_ASC_KEY_OFFSET] = 0x10;
2854 buffer[SPC_ASCQ_KEY_OFFSET] = 0x02;
2855 transport_err_sector_info(buffer, cmd->bad_sector);
2856 break;
2857 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
2858 /* CURRENT ERROR */
2859 buffer[0] = 0x70;
2860 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2861 /* ILLEGAL REQUEST */
2862 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2863 /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
2864 buffer[SPC_ASC_KEY_OFFSET] = 0x10;
2865 buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
2866 transport_err_sector_info(buffer, cmd->bad_sector);
2867 break;
2868 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
2869 default:
2870 /* CURRENT ERROR */
2871 buffer[0] = 0x70;
2872 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2873 /*
2874 * Returning ILLEGAL REQUEST would cause immediate IO errors on
2875 * Solaris initiators. Returning NOT READY instead means the
2876 * operations will be retried a finite number of times and we
2877 * can survive intermittent errors.
2878 */
2879 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
2880 /* LOGICAL UNIT COMMUNICATION FAILURE */
2881 buffer[SPC_ASC_KEY_OFFSET] = 0x08;
2882 break;
2883 } 2830 }
2884 /*
2885 * This code uses linux/include/scsi/scsi.h SAM status codes!
2886 */
2887 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
2888 /*
2889 * Automatically padded, this value is encoded in the fabric's
2890 * data_length response PDU containing the SCSI defined sense data.
2891 */
2892 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
2893 2831
2894after_reason:
2895 trace_target_cmd_complete(cmd); 2832 trace_target_cmd_complete(cmd);
2896 return cmd->se_tfo->queue_status(cmd); 2833 return cmd->se_tfo->queue_status(cmd);
2897} 2834}
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index c448ef421ce7..937cebf76633 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -25,6 +25,7 @@
25#include <linux/parser.h> 25#include <linux/parser.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/uio_driver.h> 27#include <linux/uio_driver.h>
28#include <linux/stringify.h>
28#include <net/genetlink.h> 29#include <net/genetlink.h>
29#include <scsi/scsi_common.h> 30#include <scsi/scsi_common.h>
30#include <scsi/scsi_proto.h> 31#include <scsi/scsi_proto.h>
@@ -538,14 +539,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
538 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 539 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
539 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", 540 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
540 cmd->se_cmd); 541 cmd->se_cmd);
541 transport_generic_request_failure(cmd->se_cmd, 542 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
542 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 543 } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
543 cmd->se_cmd = NULL;
544 kmem_cache_free(tcmu_cmd_cache, cmd);
545 return;
546 }
547
548 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
549 memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer, 544 memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
550 se_cmd->scsi_sense_length); 545 se_cmd->scsi_sense_length);
551 546
@@ -577,7 +572,6 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
577static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) 572static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
578{ 573{
579 struct tcmu_mailbox *mb; 574 struct tcmu_mailbox *mb;
580 LIST_HEAD(cpl_cmds);
581 unsigned long flags; 575 unsigned long flags;
582 int handled = 0; 576 int handled = 0;
583 577
@@ -905,7 +899,7 @@ static int tcmu_configure_device(struct se_device *dev)
905 WARN_ON(!PAGE_ALIGNED(udev->data_off)); 899 WARN_ON(!PAGE_ALIGNED(udev->data_off));
906 WARN_ON(udev->data_size % PAGE_SIZE); 900 WARN_ON(udev->data_size % PAGE_SIZE);
907 901
908 info->version = xstr(TCMU_MAILBOX_VERSION); 902 info->version = __stringify(TCMU_MAILBOX_VERSION);
909 903
910 info->mem[0].name = "tcm-user command & data buffer"; 904 info->mem[0].name = "tcm-user command & data buffer";
911 info->mem[0].addr = (phys_addr_t) udev->mb_addr; 905 info->mem[0].addr = (phys_addr_t) udev->mb_addr;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 4515f52546f8..47fe94ee10b8 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -450,6 +450,8 @@ int target_xcopy_setup_pt(void)
450 memset(&xcopy_pt_sess, 0, sizeof(struct se_session)); 450 memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
451 INIT_LIST_HEAD(&xcopy_pt_sess.sess_list); 451 INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
452 INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list); 452 INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
453 INIT_LIST_HEAD(&xcopy_pt_sess.sess_cmd_list);
454 spin_lock_init(&xcopy_pt_sess.sess_cmd_lock);
453 455
454 xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg; 456 xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
455 xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess; 457 xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
@@ -644,7 +646,7 @@ static int target_xcopy_read_source(
644 pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n", 646 pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
645 (unsigned long long)src_lba, src_sectors, length); 647 (unsigned long long)src_lba, src_sectors, length);
646 648
647 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, 649 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
648 DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]); 650 DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
649 xop->src_pt_cmd = xpt_cmd; 651 xop->src_pt_cmd = xpt_cmd;
650 652
@@ -704,7 +706,7 @@ static int target_xcopy_write_destination(
704 pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n", 706 pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
705 (unsigned long long)dst_lba, dst_sectors, length); 707 (unsigned long long)dst_lba, dst_sectors, length);
706 708
707 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, 709 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
708 DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]); 710 DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
709 xop->dst_pt_cmd = xpt_cmd; 711 xop->dst_pt_cmd = xpt_cmd;
710 712
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 68031723e5be..aa3caca8bace 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -255,7 +255,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
255 struct ft_cmd *cmd = arg; 255 struct ft_cmd *cmd = arg;
256 struct fc_frame_header *fh; 256 struct fc_frame_header *fh;
257 257
258 if (unlikely(IS_ERR(fp))) { 258 if (IS_ERR(fp)) {
259 /* XXX need to find cmd if queued */ 259 /* XXX need to find cmd if queued */
260 cmd->seq = NULL; 260 cmd->seq = NULL;
261 cmd->aborted = true; 261 cmd->aborted = true;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 118938ee8552..5aabc4bc0d75 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -163,7 +163,7 @@ config THERMAL_EMULATION
163 163
164config HISI_THERMAL 164config HISI_THERMAL
165 tristate "Hisilicon thermal driver" 165 tristate "Hisilicon thermal driver"
166 depends on ARCH_HISI && CPU_THERMAL && OF 166 depends on (ARCH_HISI && CPU_THERMAL && OF) || COMPILE_TEST
167 help 167 help
168 Enable this to plug hisilicon's thermal sensor driver into the Linux 168 Enable this to plug hisilicon's thermal sensor driver into the Linux
169 thermal framework. cpufreq is used as the cooling device to throttle 169 thermal framework. cpufreq is used as the cooling device to throttle
@@ -182,7 +182,7 @@ config IMX_THERMAL
182 182
183config SPEAR_THERMAL 183config SPEAR_THERMAL
184 bool "SPEAr thermal sensor driver" 184 bool "SPEAr thermal sensor driver"
185 depends on PLAT_SPEAR 185 depends on PLAT_SPEAR || COMPILE_TEST
186 depends on OF 186 depends on OF
187 help 187 help
188 Enable this to plug the SPEAr thermal sensor driver into the Linux 188 Enable this to plug the SPEAr thermal sensor driver into the Linux
@@ -190,7 +190,7 @@ config SPEAR_THERMAL
190 190
191config ROCKCHIP_THERMAL 191config ROCKCHIP_THERMAL
192 tristate "Rockchip thermal driver" 192 tristate "Rockchip thermal driver"
193 depends on ARCH_ROCKCHIP 193 depends on ARCH_ROCKCHIP || COMPILE_TEST
194 depends on RESET_CONTROLLER 194 depends on RESET_CONTROLLER
195 help 195 help
196 Rockchip thermal driver provides support for Temperature sensor 196 Rockchip thermal driver provides support for Temperature sensor
@@ -208,7 +208,7 @@ config RCAR_THERMAL
208 208
209config KIRKWOOD_THERMAL 209config KIRKWOOD_THERMAL
210 tristate "Temperature sensor on Marvell Kirkwood SoCs" 210 tristate "Temperature sensor on Marvell Kirkwood SoCs"
211 depends on MACH_KIRKWOOD 211 depends on MACH_KIRKWOOD || COMPILE_TEST
212 depends on OF 212 depends on OF
213 help 213 help
214 Support for the Kirkwood thermal sensor driver into the Linux thermal 214 Support for the Kirkwood thermal sensor driver into the Linux thermal
@@ -216,7 +216,7 @@ config KIRKWOOD_THERMAL
216 216
217config DOVE_THERMAL 217config DOVE_THERMAL
218 tristate "Temperature sensor on Marvell Dove SoCs" 218 tristate "Temperature sensor on Marvell Dove SoCs"
219 depends on ARCH_DOVE || MACH_DOVE 219 depends on ARCH_DOVE || MACH_DOVE || COMPILE_TEST
220 depends on OF 220 depends on OF
221 help 221 help
222 Support for the Dove thermal sensor driver in the Linux thermal 222 Support for the Dove thermal sensor driver in the Linux thermal
@@ -234,7 +234,7 @@ config DB8500_THERMAL
234 234
235config ARMADA_THERMAL 235config ARMADA_THERMAL
236 tristate "Armada 370/XP thermal management" 236 tristate "Armada 370/XP thermal management"
237 depends on ARCH_MVEBU 237 depends on ARCH_MVEBU || COMPILE_TEST
238 depends on OF 238 depends on OF
239 help 239 help
240 Enable this option if you want to have support for thermal management 240 Enable this option if you want to have support for thermal management
@@ -340,12 +340,21 @@ config ACPI_THERMAL_REL
340 tristate 340 tristate
341 depends on ACPI 341 depends on ACPI
342 342
343config INTEL_PCH_THERMAL
344 tristate "Intel PCH Thermal Reporting Driver"
345 depends on X86 && PCI
346 help
347 Enable this to support thermal reporting on certain intel PCHs.
348 Thermal reporting device will provide temperature reading,
349 programmable trip points and other information.
350
343menu "Texas Instruments thermal drivers" 351menu "Texas Instruments thermal drivers"
352depends on ARCH_HAS_BANDGAP || COMPILE_TEST
344source "drivers/thermal/ti-soc-thermal/Kconfig" 353source "drivers/thermal/ti-soc-thermal/Kconfig"
345endmenu 354endmenu
346 355
347menu "Samsung thermal drivers" 356menu "Samsung thermal drivers"
348depends on ARCH_EXYNOS 357depends on ARCH_EXYNOS || COMPILE_TEST
349source "drivers/thermal/samsung/Kconfig" 358source "drivers/thermal/samsung/Kconfig"
350endmenu 359endmenu
351 360
@@ -356,7 +365,7 @@ endmenu
356 365
357config QCOM_SPMI_TEMP_ALARM 366config QCOM_SPMI_TEMP_ALARM
358 tristate "Qualcomm SPMI PMIC Temperature Alarm" 367 tristate "Qualcomm SPMI PMIC Temperature Alarm"
359 depends on OF && SPMI && IIO 368 depends on OF && (SPMI || COMPILE_TEST) && IIO
360 select REGMAP_SPMI 369 select REGMAP_SPMI
361 help 370 help
362 This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP) 371 This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP)
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 535dfee1496f..26f160809959 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o
41obj-$(CONFIG_INTEL_QUARK_DTS_THERMAL) += intel_quark_dts_thermal.o 41obj-$(CONFIG_INTEL_QUARK_DTS_THERMAL) += intel_quark_dts_thermal.o
42obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/ 42obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
43obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/ 43obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
44obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
44obj-$(CONFIG_ST_THERMAL) += st/ 45obj-$(CONFIG_ST_THERMAL) += st/
45obj-$(CONFIG_TEGRA_SOCTHERM) += tegra_soctherm.o 46obj-$(CONFIG_TEGRA_SOCTHERM) += tegra_soctherm.o
46obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o 47obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 01255fd65135..26b8d326546a 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -155,7 +155,7 @@ static bool armada_is_valid(struct armada_thermal_priv *priv)
155} 155}
156 156
157static int armada_get_temp(struct thermal_zone_device *thermal, 157static int armada_get_temp(struct thermal_zone_device *thermal,
158 unsigned long *temp) 158 int *temp)
159{ 159{
160 struct armada_thermal_priv *priv = thermal->devdata; 160 struct armada_thermal_priv *priv = thermal->devdata;
161 unsigned long reg; 161 unsigned long reg;
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 620dcd405ff6..42c6f71bdcc1 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -262,7 +262,9 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
262 * efficiently. Power is stored in mW, frequency in KHz. The 262 * efficiently. Power is stored in mW, frequency in KHz. The
263 * resulting table is in ascending order. 263 * resulting table is in ascending order.
264 * 264 *
265 * Return: 0 on success, -E* on error. 265 * Return: 0 on success, -EINVAL if there are no OPPs for any CPUs,
266 * -ENOMEM if we run out of memory or -EAGAIN if an OPP was
267 * added/enabled while the function was executing.
266 */ 268 */
267static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device, 269static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
268 u32 capacitance) 270 u32 capacitance)
@@ -273,8 +275,6 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
273 int num_opps = 0, cpu, i, ret = 0; 275 int num_opps = 0, cpu, i, ret = 0;
274 unsigned long freq; 276 unsigned long freq;
275 277
276 rcu_read_lock();
277
278 for_each_cpu(cpu, &cpufreq_device->allowed_cpus) { 278 for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
279 dev = get_cpu_device(cpu); 279 dev = get_cpu_device(cpu);
280 if (!dev) { 280 if (!dev) {
@@ -284,24 +284,20 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
284 } 284 }
285 285
286 num_opps = dev_pm_opp_get_opp_count(dev); 286 num_opps = dev_pm_opp_get_opp_count(dev);
287 if (num_opps > 0) { 287 if (num_opps > 0)
288 break; 288 break;
289 } else if (num_opps < 0) { 289 else if (num_opps < 0)
290 ret = num_opps; 290 return num_opps;
291 goto unlock;
292 }
293 } 291 }
294 292
295 if (num_opps == 0) { 293 if (num_opps == 0)
296 ret = -EINVAL; 294 return -EINVAL;
297 goto unlock;
298 }
299 295
300 power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL); 296 power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL);
301 if (!power_table) { 297 if (!power_table)
302 ret = -ENOMEM; 298 return -ENOMEM;
303 goto unlock; 299
304 } 300 rcu_read_lock();
305 301
306 for (freq = 0, i = 0; 302 for (freq = 0, i = 0;
307 opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp); 303 opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp);
@@ -309,6 +305,12 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
309 u32 freq_mhz, voltage_mv; 305 u32 freq_mhz, voltage_mv;
310 u64 power; 306 u64 power;
311 307
308 if (i >= num_opps) {
309 rcu_read_unlock();
310 ret = -EAGAIN;
311 goto free_power_table;
312 }
313
312 freq_mhz = freq / 1000000; 314 freq_mhz = freq / 1000000;
313 voltage_mv = dev_pm_opp_get_voltage(opp) / 1000; 315 voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
314 316
@@ -326,17 +328,22 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
326 power_table[i].power = power; 328 power_table[i].power = power;
327 } 329 }
328 330
329 if (i == 0) { 331 rcu_read_unlock();
332
333 if (i != num_opps) {
330 ret = PTR_ERR(opp); 334 ret = PTR_ERR(opp);
331 goto unlock; 335 goto free_power_table;
332 } 336 }
333 337
334 cpufreq_device->cpu_dev = dev; 338 cpufreq_device->cpu_dev = dev;
335 cpufreq_device->dyn_power_table = power_table; 339 cpufreq_device->dyn_power_table = power_table;
336 cpufreq_device->dyn_power_table_entries = i; 340 cpufreq_device->dyn_power_table_entries = i;
337 341
338unlock: 342 return 0;
339 rcu_read_unlock(); 343
344free_power_table:
345 kfree(power_table);
346
340 return ret; 347 return ret;
341} 348}
342 349
@@ -847,7 +854,7 @@ __cpufreq_cooling_register(struct device_node *np,
847 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); 854 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
848 if (ret) { 855 if (ret) {
849 cool_dev = ERR_PTR(ret); 856 cool_dev = ERR_PTR(ret);
850 goto free_table; 857 goto free_power_table;
851 } 858 }
852 859
853 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", 860 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
@@ -889,6 +896,8 @@ __cpufreq_cooling_register(struct device_node *np,
889 896
890remove_idr: 897remove_idr:
891 release_idr(&cpufreq_idr, cpufreq_dev->id); 898 release_idr(&cpufreq_idr, cpufreq_dev->id);
899free_power_table:
900 kfree(cpufreq_dev->dyn_power_table);
892free_table: 901free_table:
893 kfree(cpufreq_dev->freq_table); 902 kfree(cpufreq_dev->freq_table);
894free_time_in_idle_timestamp: 903free_time_in_idle_timestamp:
@@ -1039,6 +1048,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
1039 1048
1040 thermal_cooling_device_unregister(cpufreq_dev->cool_dev); 1049 thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
1041 release_idr(&cpufreq_idr, cpufreq_dev->id); 1050 release_idr(&cpufreq_idr, cpufreq_dev->id);
1051 kfree(cpufreq_dev->dyn_power_table);
1042 kfree(cpufreq_dev->time_in_idle_timestamp); 1052 kfree(cpufreq_dev->time_in_idle_timestamp);
1043 kfree(cpufreq_dev->time_in_idle); 1053 kfree(cpufreq_dev->time_in_idle);
1044 kfree(cpufreq_dev->freq_table); 1054 kfree(cpufreq_dev->freq_table);
diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c
index 607b62c7e611..e58bd0b658b5 100644
--- a/drivers/thermal/db8500_cpufreq_cooling.c
+++ b/drivers/thermal/db8500_cpufreq_cooling.c
@@ -72,6 +72,7 @@ static const struct of_device_id db8500_cpufreq_cooling_match[] = {
72 { .compatible = "stericsson,db8500-cpufreq-cooling" }, 72 { .compatible = "stericsson,db8500-cpufreq-cooling" },
73 {}, 73 {},
74}; 74};
75MODULE_DEVICE_TABLE(of, db8500_cpufreq_cooling_match);
75#endif 76#endif
76 77
77static struct platform_driver db8500_cpufreq_cooling_driver = { 78static struct platform_driver db8500_cpufreq_cooling_driver = {
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index 2fb273c4baa9..652acd8fbe48 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -107,8 +107,7 @@ static int db8500_cdev_unbind(struct thermal_zone_device *thermal,
107} 107}
108 108
109/* Callback to get current temperature */ 109/* Callback to get current temperature */
110static int db8500_sys_get_temp(struct thermal_zone_device *thermal, 110static int db8500_sys_get_temp(struct thermal_zone_device *thermal, int *temp)
111 unsigned long *temp)
112{ 111{
113 struct db8500_thermal_zone *pzone = thermal->devdata; 112 struct db8500_thermal_zone *pzone = thermal->devdata;
114 113
@@ -180,7 +179,7 @@ static int db8500_sys_get_trip_type(struct thermal_zone_device *thermal,
180 179
181/* Callback to get trip point temperature */ 180/* Callback to get trip point temperature */
182static int db8500_sys_get_trip_temp(struct thermal_zone_device *thermal, 181static int db8500_sys_get_trip_temp(struct thermal_zone_device *thermal,
183 int trip, unsigned long *temp) 182 int trip, int *temp)
184{ 183{
185 struct db8500_thermal_zone *pzone = thermal->devdata; 184 struct db8500_thermal_zone *pzone = thermal->devdata;
186 struct db8500_thsens_platform_data *ptrips = pzone->trip_tab; 185 struct db8500_thsens_platform_data *ptrips = pzone->trip_tab;
@@ -195,7 +194,7 @@ static int db8500_sys_get_trip_temp(struct thermal_zone_device *thermal,
195 194
196/* Callback to get critical trip point temperature */ 195/* Callback to get critical trip point temperature */
197static int db8500_sys_get_crit_temp(struct thermal_zone_device *thermal, 196static int db8500_sys_get_crit_temp(struct thermal_zone_device *thermal,
198 unsigned long *temp) 197 int *temp)
199{ 198{
200 struct db8500_thermal_zone *pzone = thermal->devdata; 199 struct db8500_thermal_zone *pzone = thermal->devdata;
201 struct db8500_thsens_platform_data *ptrips = pzone->trip_tab; 200 struct db8500_thsens_platform_data *ptrips = pzone->trip_tab;
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c
index 09f6e304c274..a0bc9de42553 100644
--- a/drivers/thermal/dove_thermal.c
+++ b/drivers/thermal/dove_thermal.c
@@ -93,7 +93,7 @@ static int dove_init_sensor(const struct dove_thermal_priv *priv)
93} 93}
94 94
95static int dove_get_temp(struct thermal_zone_device *thermal, 95static int dove_get_temp(struct thermal_zone_device *thermal,
96 unsigned long *temp) 96 int *temp)
97{ 97{
98 unsigned long reg; 98 unsigned long reg;
99 struct dove_thermal_priv *priv = thermal->devdata; 99 struct dove_thermal_priv *priv = thermal->devdata;
diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c
index c2c10bbe24d6..34fe36504a55 100644
--- a/drivers/thermal/fair_share.c
+++ b/drivers/thermal/fair_share.c
@@ -34,7 +34,7 @@
34static int get_trip_level(struct thermal_zone_device *tz) 34static int get_trip_level(struct thermal_zone_device *tz)
35{ 35{
36 int count = 0; 36 int count = 0;
37 unsigned long trip_temp; 37 int trip_temp;
38 enum thermal_trip_type trip_type; 38 enum thermal_trip_type trip_type;
39 39
40 if (tz->trips == 0 || !tz->ops->get_trip_temp) 40 if (tz->trips == 0 || !tz->ops->get_trip_temp)
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index c5dd76b2ee74..70836c5b89bc 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -25,14 +25,13 @@
25 25
26static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) 26static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
27{ 27{
28 long trip_temp; 28 int trip_temp, trip_hyst;
29 unsigned long trip_hyst;
30 struct thermal_instance *instance; 29 struct thermal_instance *instance;
31 30
32 tz->ops->get_trip_temp(tz, trip, &trip_temp); 31 tz->ops->get_trip_temp(tz, trip, &trip_temp);
33 tz->ops->get_trip_hyst(tz, trip, &trip_hyst); 32 tz->ops->get_trip_hyst(tz, trip, &trip_hyst);
34 33
35 dev_dbg(&tz->device, "Trip%d[temp=%ld]:temp=%d:hyst=%ld\n", 34 dev_dbg(&tz->device, "Trip%d[temp=%d]:temp=%d:hyst=%d\n",
36 trip, trip_temp, tz->temperature, 35 trip, trip_temp, tz->temperature,
37 trip_hyst); 36 trip_hyst);
38 37
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index b49f97c734d0..36d07295f8e3 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -155,7 +155,7 @@ static void hisi_thermal_disable_sensor(struct hisi_thermal_data *data)
155 mutex_unlock(&data->thermal_lock); 155 mutex_unlock(&data->thermal_lock);
156} 156}
157 157
158static int hisi_thermal_get_temp(void *_sensor, long *temp) 158static int hisi_thermal_get_temp(void *_sensor, int *temp)
159{ 159{
160 struct hisi_thermal_sensor *sensor = _sensor; 160 struct hisi_thermal_sensor *sensor = _sensor;
161 struct hisi_thermal_data *data = sensor->thermal; 161 struct hisi_thermal_data *data = sensor->thermal;
@@ -178,7 +178,7 @@ static int hisi_thermal_get_temp(void *_sensor, long *temp)
178 data->irq_bind_sensor = sensor_id; 178 data->irq_bind_sensor = sensor_id;
179 mutex_unlock(&data->thermal_lock); 179 mutex_unlock(&data->thermal_lock);
180 180
181 dev_dbg(&data->pdev->dev, "id=%d, irq=%d, temp=%ld, thres=%d\n", 181 dev_dbg(&data->pdev->dev, "id=%d, irq=%d, temp=%d, thres=%d\n",
182 sensor->id, data->irq_enabled, *temp, sensor->thres_temp); 182 sensor->id, data->irq_enabled, *temp, sensor->thres_temp);
183 /* 183 /*
184 * Bind irq to sensor for two cases: 184 * Bind irq to sensor for two cases:
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index fde4c2876d14..4bec1d3c3d27 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -98,10 +98,10 @@ struct imx_thermal_data {
98 enum thermal_device_mode mode; 98 enum thermal_device_mode mode;
99 struct regmap *tempmon; 99 struct regmap *tempmon;
100 u32 c1, c2; /* See formula in imx_get_sensor_data() */ 100 u32 c1, c2; /* See formula in imx_get_sensor_data() */
101 unsigned long temp_passive; 101 int temp_passive;
102 unsigned long temp_critical; 102 int temp_critical;
103 unsigned long alarm_temp; 103 int alarm_temp;
104 unsigned long last_temp; 104 int last_temp;
105 bool irq_enabled; 105 bool irq_enabled;
106 int irq; 106 int irq;
107 struct clk *thermal_clk; 107 struct clk *thermal_clk;
@@ -109,7 +109,7 @@ struct imx_thermal_data {
109}; 109};
110 110
111static void imx_set_panic_temp(struct imx_thermal_data *data, 111static void imx_set_panic_temp(struct imx_thermal_data *data,
112 signed long panic_temp) 112 int panic_temp)
113{ 113{
114 struct regmap *map = data->tempmon; 114 struct regmap *map = data->tempmon;
115 int critical_value; 115 int critical_value;
@@ -121,7 +121,7 @@ static void imx_set_panic_temp(struct imx_thermal_data *data,
121} 121}
122 122
123static void imx_set_alarm_temp(struct imx_thermal_data *data, 123static void imx_set_alarm_temp(struct imx_thermal_data *data,
124 signed long alarm_temp) 124 int alarm_temp)
125{ 125{
126 struct regmap *map = data->tempmon; 126 struct regmap *map = data->tempmon;
127 int alarm_value; 127 int alarm_value;
@@ -133,7 +133,7 @@ static void imx_set_alarm_temp(struct imx_thermal_data *data,
133 TEMPSENSE0_ALARM_VALUE_SHIFT); 133 TEMPSENSE0_ALARM_VALUE_SHIFT);
134} 134}
135 135
136static int imx_get_temp(struct thermal_zone_device *tz, unsigned long *temp) 136static int imx_get_temp(struct thermal_zone_device *tz, int *temp)
137{ 137{
138 struct imx_thermal_data *data = tz->devdata; 138 struct imx_thermal_data *data = tz->devdata;
139 struct regmap *map = data->tempmon; 139 struct regmap *map = data->tempmon;
@@ -189,13 +189,13 @@ static int imx_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
189 if (data->alarm_temp == data->temp_critical && 189 if (data->alarm_temp == data->temp_critical &&
190 *temp < data->temp_passive) { 190 *temp < data->temp_passive) {
191 imx_set_alarm_temp(data, data->temp_passive); 191 imx_set_alarm_temp(data, data->temp_passive);
192 dev_dbg(&tz->device, "thermal alarm off: T < %lu\n", 192 dev_dbg(&tz->device, "thermal alarm off: T < %d\n",
193 data->alarm_temp / 1000); 193 data->alarm_temp / 1000);
194 } 194 }
195 } 195 }
196 196
197 if (*temp != data->last_temp) { 197 if (*temp != data->last_temp) {
198 dev_dbg(&tz->device, "millicelsius: %ld\n", *temp); 198 dev_dbg(&tz->device, "millicelsius: %d\n", *temp);
199 data->last_temp = *temp; 199 data->last_temp = *temp;
200 } 200 }
201 201
@@ -262,8 +262,7 @@ static int imx_get_trip_type(struct thermal_zone_device *tz, int trip,
262 return 0; 262 return 0;
263} 263}
264 264
265static int imx_get_crit_temp(struct thermal_zone_device *tz, 265static int imx_get_crit_temp(struct thermal_zone_device *tz, int *temp)
266 unsigned long *temp)
267{ 266{
268 struct imx_thermal_data *data = tz->devdata; 267 struct imx_thermal_data *data = tz->devdata;
269 268
@@ -272,7 +271,7 @@ static int imx_get_crit_temp(struct thermal_zone_device *tz,
272} 271}
273 272
274static int imx_get_trip_temp(struct thermal_zone_device *tz, int trip, 273static int imx_get_trip_temp(struct thermal_zone_device *tz, int trip,
275 unsigned long *temp) 274 int *temp)
276{ 275{
277 struct imx_thermal_data *data = tz->devdata; 276 struct imx_thermal_data *data = tz->devdata;
278 277
@@ -282,7 +281,7 @@ static int imx_get_trip_temp(struct thermal_zone_device *tz, int trip,
282} 281}
283 282
284static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip, 283static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip,
285 unsigned long temp) 284 int temp)
286{ 285{
287 struct imx_thermal_data *data = tz->devdata; 286 struct imx_thermal_data *data = tz->devdata;
288 287
@@ -434,7 +433,7 @@ static irqreturn_t imx_thermal_alarm_irq_thread(int irq, void *dev)
434{ 433{
435 struct imx_thermal_data *data = dev; 434 struct imx_thermal_data *data = dev;
436 435
437 dev_dbg(&data->tz->device, "THERMAL ALARM: T > %lu\n", 436 dev_dbg(&data->tz->device, "THERMAL ALARM: T > %d\n",
438 data->alarm_temp / 1000); 437 data->alarm_temp / 1000);
439 438
440 thermal_zone_device_update(data->tz); 439 thermal_zone_device_update(data->tz);
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
index 031018e7a65b..5836e5554433 100644
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -186,7 +186,7 @@ static int int3400_thermal_run_osc(acpi_handle handle,
186} 186}
187 187
188static int int3400_thermal_get_temp(struct thermal_zone_device *thermal, 188static int int3400_thermal_get_temp(struct thermal_zone_device *thermal,
189 unsigned long *temp) 189 int *temp)
190{ 190{
191 *temp = 20 * 1000; /* faked temp sensor with 20C */ 191 *temp = 20 * 1000; /* faked temp sensor with 20C */
192 return 0; 192 return 0;
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
index 1e25133d35e2..b9b2666aa94c 100644
--- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
@@ -20,7 +20,7 @@
20#include "int340x_thermal_zone.h" 20#include "int340x_thermal_zone.h"
21 21
22static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone, 22static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
23 unsigned long *temp) 23 int *temp)
24{ 24{
25 struct int34x_thermal_zone *d = zone->devdata; 25 struct int34x_thermal_zone *d = zone->devdata;
26 unsigned long long tmp; 26 unsigned long long tmp;
@@ -49,7 +49,7 @@ static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
49} 49}
50 50
51static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone, 51static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
52 int trip, unsigned long *temp) 52 int trip, int *temp)
53{ 53{
54 struct int34x_thermal_zone *d = zone->devdata; 54 struct int34x_thermal_zone *d = zone->devdata;
55 int i; 55 int i;
@@ -114,7 +114,7 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
114} 114}
115 115
116static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone, 116static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
117 int trip, unsigned long temp) 117 int trip, int temp)
118{ 118{
119 struct int34x_thermal_zone *d = zone->devdata; 119 struct int34x_thermal_zone *d = zone->devdata;
120 acpi_status status; 120 acpi_status status;
@@ -136,7 +136,7 @@ static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
136 136
137 137
138static int int340x_thermal_get_trip_hyst(struct thermal_zone_device *zone, 138static int int340x_thermal_get_trip_hyst(struct thermal_zone_device *zone,
139 int trip, unsigned long *temp) 139 int trip, int *temp)
140{ 140{
141 struct int34x_thermal_zone *d = zone->devdata; 141 struct int34x_thermal_zone *d = zone->devdata;
142 acpi_status status; 142 acpi_status status;
@@ -163,7 +163,7 @@ static struct thermal_zone_device_ops int340x_thermal_zone_ops = {
163}; 163};
164 164
165static int int340x_thermal_get_trip_config(acpi_handle handle, char *name, 165static int int340x_thermal_get_trip_config(acpi_handle handle, char *name,
166 unsigned long *temp) 166 int *temp)
167{ 167{
168 unsigned long long r; 168 unsigned long long r;
169 acpi_status status; 169 acpi_status status;
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
index 9f38ab72c4bf..aaadf724ff2e 100644
--- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
+++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
@@ -21,7 +21,7 @@
21#define INT340X_THERMAL_MAX_ACT_TRIP_COUNT 10 21#define INT340X_THERMAL_MAX_ACT_TRIP_COUNT 10
22 22
23struct active_trip { 23struct active_trip {
24 unsigned long temp; 24 int temp;
25 int id; 25 int id;
26 bool valid; 26 bool valid;
27}; 27};
@@ -31,11 +31,11 @@ struct int34x_thermal_zone {
31 struct active_trip act_trips[INT340X_THERMAL_MAX_ACT_TRIP_COUNT]; 31 struct active_trip act_trips[INT340X_THERMAL_MAX_ACT_TRIP_COUNT];
32 unsigned long *aux_trips; 32 unsigned long *aux_trips;
33 int aux_trip_nr; 33 int aux_trip_nr;
34 unsigned long psv_temp; 34 int psv_temp;
35 int psv_trip_id; 35 int psv_trip_id;
36 unsigned long crt_temp; 36 int crt_temp;
37 int crt_trip_id; 37 int crt_trip_id;
38 unsigned long hot_temp; 38 int hot_temp;
39 int hot_trip_id; 39 int hot_trip_id;
40 struct thermal_zone_device *zone; 40 struct thermal_zone_device *zone;
41 struct thermal_zone_device_ops *override_ops; 41 struct thermal_zone_device_ops *override_ops;
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index 3df3dc34b124..ccc0ad02d066 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -145,7 +145,7 @@ static int get_tjmax(void)
145 return -EINVAL; 145 return -EINVAL;
146} 146}
147 147
148static int read_temp_msr(unsigned long *temp) 148static int read_temp_msr(int *temp)
149{ 149{
150 int cpu; 150 int cpu;
151 u32 eax, edx; 151 u32 eax, edx;
@@ -177,7 +177,7 @@ err_ret:
177} 177}
178 178
179static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone, 179static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone,
180 unsigned long *temp) 180 int *temp)
181{ 181{
182 int ret; 182 int ret;
183 183
diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c
new file mode 100644
index 000000000000..50c7da79be83
--- /dev/null
+++ b/drivers/thermal/intel_pch_thermal.c
@@ -0,0 +1,283 @@
1/* intel_pch_thermal.c - Intel PCH Thermal driver
2 *
3 * Copyright (c) 2015, Intel Corporation.
4 *
5 * Authors:
6 * Tushar Dave <tushar.n.dave@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/types.h>
21#include <linux/init.h>
22#include <linux/pci.h>
23#include <linux/thermal.h>
24
25/* Intel PCH thermal Device IDs */
26#define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */
27
28/* Wildcat Point-LP PCH Thermal registers */
29#define WPT_TEMP 0x0000 /* Temperature */
30#define WPT_TSC 0x04 /* Thermal Sensor Control */
31#define WPT_TSS 0x06 /* Thermal Sensor Status */
32#define WPT_TSEL 0x08 /* Thermal Sensor Enable and Lock */
33#define WPT_TSREL 0x0A /* Thermal Sensor Report Enable and Lock */
34#define WPT_TSMIC 0x0C /* Thermal Sensor SMI Control */
35#define WPT_CTT 0x0010 /* Catastrophic Trip Point */
36#define WPT_TAHV 0x0014 /* Thermal Alert High Value */
37#define WPT_TALV 0x0018 /* Thermal Alert Low Value */
38#define WPT_TL 0x00000040 /* Throttle Value */
39#define WPT_PHL 0x0060 /* PCH Hot Level */
40#define WPT_PHLC 0x62 /* PHL Control */
41#define WPT_TAS 0x80 /* Thermal Alert Status */
42#define WPT_TSPIEN 0x82 /* PCI Interrupt Event Enables */
43#define WPT_TSGPEN 0x84 /* General Purpose Event Enables */
44
45/* Wildcat Point-LP PCH Thermal Register bit definitions */
46#define WPT_TEMP_TSR 0x00ff /* Temp TS Reading */
47#define WPT_TSC_CPDE 0x01 /* Catastrophic Power-Down Enable */
48#define WPT_TSS_TSDSS 0x10 /* Thermal Sensor Dynamic Shutdown Status */
49#define WPT_TSS_GPES 0x08 /* GPE status */
50#define WPT_TSEL_ETS 0x01 /* Enable TS */
51#define WPT_TSEL_PLDB 0x80 /* TSEL Policy Lock-Down Bit */
52#define WPT_TL_TOL 0x000001FF /* T0 Level */
53#define WPT_TL_T1L 0x1ff00000 /* T1 Level */
54#define WPT_TL_TTEN 0x20000000 /* TT Enable */
55
56static char driver_name[] = "Intel PCH thermal driver";
57
58struct pch_thermal_device {
59 void __iomem *hw_base;
60 const struct pch_dev_ops *ops;
61 struct pci_dev *pdev;
62 struct thermal_zone_device *tzd;
63 int crt_trip_id;
64 unsigned long crt_temp;
65 int hot_trip_id;
66 unsigned long hot_temp;
67};
68
69static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
70{
71 u8 tsel;
72 u16 trip_temp;
73
74 *nr_trips = 0;
75
76 /* Check if BIOS has already enabled thermal sensor */
77 if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS))
78 goto read_trips;
79
80 tsel = readb(ptd->hw_base + WPT_TSEL);
81 /*
82 * When TSEL's Policy Lock-Down bit is 1, TSEL become RO.
83 * If so, thermal sensor cannot enable. Bail out.
84 */
85 if (tsel & WPT_TSEL_PLDB) {
86 dev_err(&ptd->pdev->dev, "Sensor can't be enabled\n");
87 return -ENODEV;
88 }
89
90 writeb(tsel|WPT_TSEL_ETS, ptd->hw_base + WPT_TSEL);
91 if (!(WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS))) {
92 dev_err(&ptd->pdev->dev, "Sensor can't be enabled\n");
93 return -ENODEV;
94 }
95
96read_trips:
97 ptd->crt_trip_id = -1;
98 trip_temp = readw(ptd->hw_base + WPT_CTT);
99 trip_temp &= 0x1FF;
100 if (trip_temp) {
101 /* Resolution of 1/2 degree C and an offset of -50C */
102 ptd->crt_temp = trip_temp * 1000 / 2 - 50000;
103 ptd->crt_trip_id = 0;
104 ++(*nr_trips);
105 }
106
107 ptd->hot_trip_id = -1;
108 trip_temp = readw(ptd->hw_base + WPT_PHL);
109 trip_temp &= 0x1FF;
110 if (trip_temp) {
111 /* Resolution of 1/2 degree C and an offset of -50C */
112 ptd->hot_temp = trip_temp * 1000 / 2 - 50000;
113 ptd->hot_trip_id = *nr_trips;
114 ++(*nr_trips);
115 }
116
117 return 0;
118}
119
120static int pch_wpt_get_temp(struct pch_thermal_device *ptd, int *temp)
121{
122 u8 wpt_temp;
123
124 wpt_temp = WPT_TEMP_TSR & readl(ptd->hw_base + WPT_TEMP);
125
126 /* Resolution of 1/2 degree C and an offset of -50C */
127 *temp = (wpt_temp * 1000 / 2 - 50000);
128
129 return 0;
130}
131
132struct pch_dev_ops {
133 int (*hw_init)(struct pch_thermal_device *ptd, int *nr_trips);
134 int (*get_temp)(struct pch_thermal_device *ptd, int *temp);
135};
136
137
138/* dev ops for Wildcat Point */
139static struct pch_dev_ops pch_dev_ops_wpt = {
140 .hw_init = pch_wpt_init,
141 .get_temp = pch_wpt_get_temp,
142};
143
144static int pch_thermal_get_temp(struct thermal_zone_device *tzd, int *temp)
145{
146 struct pch_thermal_device *ptd = tzd->devdata;
147
148 return ptd->ops->get_temp(ptd, temp);
149}
150
151static int pch_get_trip_type(struct thermal_zone_device *tzd, int trip,
152 enum thermal_trip_type *type)
153{
154 struct pch_thermal_device *ptd = tzd->devdata;
155
156 if (ptd->crt_trip_id == trip)
157 *type = THERMAL_TRIP_CRITICAL;
158 else if (ptd->hot_trip_id == trip)
159 *type = THERMAL_TRIP_HOT;
160 else
161 return -EINVAL;
162
163 return 0;
164}
165
166static int pch_get_trip_temp(struct thermal_zone_device *tzd, int trip, int *temp)
167{
168 struct pch_thermal_device *ptd = tzd->devdata;
169
170 if (ptd->crt_trip_id == trip)
171 *temp = ptd->crt_temp;
172 else if (ptd->hot_trip_id == trip)
173 *temp = ptd->hot_temp;
174 else
175 return -EINVAL;
176
177 return 0;
178}
179
180static struct thermal_zone_device_ops tzd_ops = {
181 .get_temp = pch_thermal_get_temp,
182 .get_trip_type = pch_get_trip_type,
183 .get_trip_temp = pch_get_trip_temp,
184};
185
186
187static int intel_pch_thermal_probe(struct pci_dev *pdev,
188 const struct pci_device_id *id)
189{
190 struct pch_thermal_device *ptd;
191 int err;
192 int nr_trips;
193 char *dev_name;
194
195 ptd = devm_kzalloc(&pdev->dev, sizeof(*ptd), GFP_KERNEL);
196 if (!ptd)
197 return -ENOMEM;
198
199 switch (pdev->device) {
200 case PCH_THERMAL_DID_WPT:
201 ptd->ops = &pch_dev_ops_wpt;
202 dev_name = "pch_wildcat_point";
203 break;
204 default:
205 dev_err(&pdev->dev, "unknown pch thermal device\n");
206 return -ENODEV;
207 }
208
209 pci_set_drvdata(pdev, ptd);
210 ptd->pdev = pdev;
211
212 err = pci_enable_device(pdev);
213 if (err) {
214 dev_err(&pdev->dev, "failed to enable pci device\n");
215 return err;
216 }
217
218 err = pci_request_regions(pdev, driver_name);
219 if (err) {
220 dev_err(&pdev->dev, "failed to request pci region\n");
221 goto error_disable;
222 }
223
224 ptd->hw_base = pci_ioremap_bar(pdev, 0);
225 if (!ptd->hw_base) {
226 err = -ENOMEM;
227 dev_err(&pdev->dev, "failed to map mem base\n");
228 goto error_release;
229 }
230
231 err = ptd->ops->hw_init(ptd, &nr_trips);
232 if (err)
233 goto error_cleanup;
234
235 ptd->tzd = thermal_zone_device_register(dev_name, nr_trips, 0, ptd,
236 &tzd_ops, NULL, 0, 0);
237 if (IS_ERR(ptd->tzd)) {
238 dev_err(&pdev->dev, "Failed to register thermal zone %s\n",
239 dev_name);
240 err = PTR_ERR(ptd->tzd);
241 goto error_cleanup;
242 }
243
244 return 0;
245
246error_cleanup:
247 iounmap(ptd->hw_base);
248error_release:
249 pci_release_regions(pdev);
250error_disable:
251 pci_disable_device(pdev);
252 dev_err(&pdev->dev, "pci device failed to probe\n");
253 return err;
254}
255
256static void intel_pch_thermal_remove(struct pci_dev *pdev)
257{
258 struct pch_thermal_device *ptd = pci_get_drvdata(pdev);
259
260 thermal_zone_device_unregister(ptd->tzd);
261 iounmap(ptd->hw_base);
262 pci_set_drvdata(pdev, NULL);
263 pci_release_region(pdev, 0);
264 pci_disable_device(pdev);
265}
266
267static struct pci_device_id intel_pch_thermal_id[] = {
268 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) },
269 { 0, },
270};
271MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
272
273static struct pci_driver intel_pch_thermal_driver = {
274 .name = "intel_pch_thermal",
275 .id_table = intel_pch_thermal_id,
276 .probe = intel_pch_thermal_probe,
277 .remove = intel_pch_thermal_remove,
278};
279
280module_pci_driver(intel_pch_thermal_driver);
281
282MODULE_LICENSE("GPL v2");
283MODULE_DESCRIPTION("Intel PCH Thermal driver");
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 2ac0c704bcb8..6c79588251d5 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -693,11 +693,14 @@ static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = {
693 { X86_VENDOR_INTEL, 6, 0x3f}, 693 { X86_VENDOR_INTEL, 6, 0x3f},
694 { X86_VENDOR_INTEL, 6, 0x45}, 694 { X86_VENDOR_INTEL, 6, 0x45},
695 { X86_VENDOR_INTEL, 6, 0x46}, 695 { X86_VENDOR_INTEL, 6, 0x46},
696 { X86_VENDOR_INTEL, 6, 0x47},
696 { X86_VENDOR_INTEL, 6, 0x4c}, 697 { X86_VENDOR_INTEL, 6, 0x4c},
697 { X86_VENDOR_INTEL, 6, 0x4d}, 698 { X86_VENDOR_INTEL, 6, 0x4d},
699 { X86_VENDOR_INTEL, 6, 0x4e},
698 { X86_VENDOR_INTEL, 6, 0x4f}, 700 { X86_VENDOR_INTEL, 6, 0x4f},
699 { X86_VENDOR_INTEL, 6, 0x56}, 701 { X86_VENDOR_INTEL, 6, 0x56},
700 { X86_VENDOR_INTEL, 6, 0x57}, 702 { X86_VENDOR_INTEL, 6, 0x57},
703 { X86_VENDOR_INTEL, 6, 0x5e},
701 {} 704 {}
702}; 705};
703MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids); 706MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
diff --git a/drivers/thermal/intel_quark_dts_thermal.c b/drivers/thermal/intel_quark_dts_thermal.c
index 4434ec812cb7..5ed90e6c8a64 100644
--- a/drivers/thermal/intel_quark_dts_thermal.c
+++ b/drivers/thermal/intel_quark_dts_thermal.c
@@ -186,7 +186,7 @@ static int soc_dts_disable(struct thermal_zone_device *tzd)
186 return ret; 186 return ret;
187} 187}
188 188
189static int _get_trip_temp(int trip, unsigned long *temp) 189static int _get_trip_temp(int trip, int *temp)
190{ 190{
191 int status; 191 int status;
192 u32 out; 192 u32 out;
@@ -212,19 +212,18 @@ static int _get_trip_temp(int trip, unsigned long *temp)
212} 212}
213 213
214static inline int sys_get_trip_temp(struct thermal_zone_device *tzd, 214static inline int sys_get_trip_temp(struct thermal_zone_device *tzd,
215 int trip, unsigned long *temp) 215 int trip, int *temp)
216{ 216{
217 return _get_trip_temp(trip, temp); 217 return _get_trip_temp(trip, temp);
218} 218}
219 219
220static inline int sys_get_crit_temp(struct thermal_zone_device *tzd, 220static inline int sys_get_crit_temp(struct thermal_zone_device *tzd, int *temp)
221 unsigned long *temp)
222{ 221{
223 return _get_trip_temp(QRK_DTS_ID_TP_CRITICAL, temp); 222 return _get_trip_temp(QRK_DTS_ID_TP_CRITICAL, temp);
224} 223}
225 224
226static int update_trip_temp(struct soc_sensor_entry *aux_entry, 225static int update_trip_temp(struct soc_sensor_entry *aux_entry,
227 int trip, unsigned long temp) 226 int trip, int temp)
228{ 227{
229 u32 out; 228 u32 out;
230 u32 temp_out; 229 u32 temp_out;
@@ -272,7 +271,7 @@ failed:
272} 271}
273 272
274static inline int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, 273static inline int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
275 unsigned long temp) 274 int temp)
276{ 275{
277 return update_trip_temp(tzd->devdata, trip, temp); 276 return update_trip_temp(tzd->devdata, trip, temp);
278} 277}
@@ -289,7 +288,7 @@ static int sys_get_trip_type(struct thermal_zone_device *thermal,
289} 288}
290 289
291static int sys_get_curr_temp(struct thermal_zone_device *tzd, 290static int sys_get_curr_temp(struct thermal_zone_device *tzd,
292 unsigned long *temp) 291 int *temp)
293{ 292{
294 u32 out; 293 u32 out;
295 int ret; 294 int ret;
diff --git a/drivers/thermal/intel_soc_dts_iosf.c b/drivers/thermal/intel_soc_dts_iosf.c
index 42e4b6ac3875..5841d1d72996 100644
--- a/drivers/thermal/intel_soc_dts_iosf.c
+++ b/drivers/thermal/intel_soc_dts_iosf.c
@@ -80,7 +80,7 @@ err_ret:
80} 80}
81 81
82static int sys_get_trip_temp(struct thermal_zone_device *tzd, int trip, 82static int sys_get_trip_temp(struct thermal_zone_device *tzd, int trip,
83 unsigned long *temp) 83 int *temp)
84{ 84{
85 int status; 85 int status;
86 u32 out; 86 u32 out;
@@ -106,7 +106,7 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd, int trip,
106} 106}
107 107
108static int update_trip_temp(struct intel_soc_dts_sensor_entry *dts, 108static int update_trip_temp(struct intel_soc_dts_sensor_entry *dts,
109 int thres_index, unsigned long temp, 109 int thres_index, int temp,
110 enum thermal_trip_type trip_type) 110 enum thermal_trip_type trip_type)
111{ 111{
112 int status; 112 int status;
@@ -196,7 +196,7 @@ err_restore_ptps:
196} 196}
197 197
198static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, 198static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
199 unsigned long temp) 199 int temp)
200{ 200{
201 struct intel_soc_dts_sensor_entry *dts = tzd->devdata; 201 struct intel_soc_dts_sensor_entry *dts = tzd->devdata;
202 struct intel_soc_dts_sensors *sensors = dts->sensors; 202 struct intel_soc_dts_sensors *sensors = dts->sensors;
@@ -226,7 +226,7 @@ static int sys_get_trip_type(struct thermal_zone_device *tzd,
226} 226}
227 227
228static int sys_get_curr_temp(struct thermal_zone_device *tzd, 228static int sys_get_curr_temp(struct thermal_zone_device *tzd,
229 unsigned long *temp) 229 int *temp)
230{ 230{
231 int status; 231 int status;
232 u32 out; 232 u32 out;
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c
index 11041fe63dc2..892236621767 100644
--- a/drivers/thermal/kirkwood_thermal.c
+++ b/drivers/thermal/kirkwood_thermal.c
@@ -33,7 +33,7 @@ struct kirkwood_thermal_priv {
33}; 33};
34 34
35static int kirkwood_get_temp(struct thermal_zone_device *thermal, 35static int kirkwood_get_temp(struct thermal_zone_device *thermal,
36 unsigned long *temp) 36 int *temp)
37{ 37{
38 unsigned long reg; 38 unsigned long reg;
39 struct kirkwood_thermal_priv *priv = thermal->devdata; 39 struct kirkwood_thermal_priv *priv = thermal->devdata;
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index b295b2b6c191..42b7d4253b94 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -91,7 +91,7 @@ struct __thermal_zone {
91/*** DT thermal zone device callbacks ***/ 91/*** DT thermal zone device callbacks ***/
92 92
93static int of_thermal_get_temp(struct thermal_zone_device *tz, 93static int of_thermal_get_temp(struct thermal_zone_device *tz,
94 unsigned long *temp) 94 int *temp)
95{ 95{
96 struct __thermal_zone *data = tz->devdata; 96 struct __thermal_zone *data = tz->devdata;
97 97
@@ -177,7 +177,7 @@ EXPORT_SYMBOL_GPL(of_thermal_get_trip_points);
177 * Return: zero on success, error code otherwise 177 * Return: zero on success, error code otherwise
178 */ 178 */
179static int of_thermal_set_emul_temp(struct thermal_zone_device *tz, 179static int of_thermal_set_emul_temp(struct thermal_zone_device *tz,
180 unsigned long temp) 180 int temp)
181{ 181{
182 struct __thermal_zone *data = tz->devdata; 182 struct __thermal_zone *data = tz->devdata;
183 183
@@ -311,7 +311,7 @@ static int of_thermal_get_trip_type(struct thermal_zone_device *tz, int trip,
311} 311}
312 312
313static int of_thermal_get_trip_temp(struct thermal_zone_device *tz, int trip, 313static int of_thermal_get_trip_temp(struct thermal_zone_device *tz, int trip,
314 unsigned long *temp) 314 int *temp)
315{ 315{
316 struct __thermal_zone *data = tz->devdata; 316 struct __thermal_zone *data = tz->devdata;
317 317
@@ -324,7 +324,7 @@ static int of_thermal_get_trip_temp(struct thermal_zone_device *tz, int trip,
324} 324}
325 325
326static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip, 326static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip,
327 unsigned long temp) 327 int temp)
328{ 328{
329 struct __thermal_zone *data = tz->devdata; 329 struct __thermal_zone *data = tz->devdata;
330 330
@@ -338,7 +338,7 @@ static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip,
338} 338}
339 339
340static int of_thermal_get_trip_hyst(struct thermal_zone_device *tz, int trip, 340static int of_thermal_get_trip_hyst(struct thermal_zone_device *tz, int trip,
341 unsigned long *hyst) 341 int *hyst)
342{ 342{
343 struct __thermal_zone *data = tz->devdata; 343 struct __thermal_zone *data = tz->devdata;
344 344
@@ -351,7 +351,7 @@ static int of_thermal_get_trip_hyst(struct thermal_zone_device *tz, int trip,
351} 351}
352 352
353static int of_thermal_set_trip_hyst(struct thermal_zone_device *tz, int trip, 353static int of_thermal_set_trip_hyst(struct thermal_zone_device *tz, int trip,
354 unsigned long hyst) 354 int hyst)
355{ 355{
356 struct __thermal_zone *data = tz->devdata; 356 struct __thermal_zone *data = tz->devdata;
357 357
@@ -365,7 +365,7 @@ static int of_thermal_set_trip_hyst(struct thermal_zone_device *tz, int trip,
365} 365}
366 366
367static int of_thermal_get_crit_temp(struct thermal_zone_device *tz, 367static int of_thermal_get_crit_temp(struct thermal_zone_device *tz,
368 unsigned long *temp) 368 int *temp)
369{ 369{
370 struct __thermal_zone *data = tz->devdata; 370 struct __thermal_zone *data = tz->devdata;
371 int i; 371 int i;
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 251676902869..7ff96270c933 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -24,6 +24,8 @@
24 24
25#include "thermal_core.h" 25#include "thermal_core.h"
26 26
27#define INVALID_TRIP -1
28
27#define FRAC_BITS 10 29#define FRAC_BITS 10
28#define int_to_frac(x) ((x) << FRAC_BITS) 30#define int_to_frac(x) ((x) << FRAC_BITS)
29#define frac_to_int(x) ((x) >> FRAC_BITS) 31#define frac_to_int(x) ((x) >> FRAC_BITS)
@@ -56,16 +58,21 @@ static inline s64 div_frac(s64 x, s64 y)
56 58
57/** 59/**
58 * struct power_allocator_params - parameters for the power allocator governor 60 * struct power_allocator_params - parameters for the power allocator governor
61 * @allocated_tzp: whether we have allocated tzp for this thermal zone and
62 * it needs to be freed on unbind
59 * @err_integral: accumulated error in the PID controller. 63 * @err_integral: accumulated error in the PID controller.
60 * @prev_err: error in the previous iteration of the PID controller. 64 * @prev_err: error in the previous iteration of the PID controller.
61 * Used to calculate the derivative term. 65 * Used to calculate the derivative term.
62 * @trip_switch_on: first passive trip point of the thermal zone. The 66 * @trip_switch_on: first passive trip point of the thermal zone. The
63 * governor switches on when this trip point is crossed. 67 * governor switches on when this trip point is crossed.
68 * If the thermal zone only has one passive trip point,
69 * @trip_switch_on should be INVALID_TRIP.
64 * @trip_max_desired_temperature: last passive trip point of the thermal 70 * @trip_max_desired_temperature: last passive trip point of the thermal
65 * zone. The temperature we are 71 * zone. The temperature we are
66 * controlling for. 72 * controlling for.
67 */ 73 */
68struct power_allocator_params { 74struct power_allocator_params {
75 bool allocated_tzp;
69 s64 err_integral; 76 s64 err_integral;
70 s32 prev_err; 77 s32 prev_err;
71 int trip_switch_on; 78 int trip_switch_on;
@@ -73,6 +80,88 @@ struct power_allocator_params {
73}; 80};
74 81
75/** 82/**
83 * estimate_sustainable_power() - Estimate the sustainable power of a thermal zone
84 * @tz: thermal zone we are operating in
85 *
86 * For thermal zones that don't provide a sustainable_power in their
87 * thermal_zone_params, estimate one. Calculate it using the minimum
88 * power of all the cooling devices as that gives a valid value that
89 * can give some degree of functionality. For optimal performance of
90 * this governor, provide a sustainable_power in the thermal zone's
91 * thermal_zone_params.
92 */
93static u32 estimate_sustainable_power(struct thermal_zone_device *tz)
94{
95 u32 sustainable_power = 0;
96 struct thermal_instance *instance;
97 struct power_allocator_params *params = tz->governor_data;
98
99 list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
100 struct thermal_cooling_device *cdev = instance->cdev;
101 u32 min_power;
102
103 if (instance->trip != params->trip_max_desired_temperature)
104 continue;
105
106 if (power_actor_get_min_power(cdev, tz, &min_power))
107 continue;
108
109 sustainable_power += min_power;
110 }
111
112 return sustainable_power;
113}
114
115/**
116 * estimate_pid_constants() - Estimate the constants for the PID controller
117 * @tz: thermal zone for which to estimate the constants
118 * @sustainable_power: sustainable power for the thermal zone
119 * @trip_switch_on: trip point number for the switch on temperature
120 * @control_temp: target temperature for the power allocator governor
121 * @force: whether to force the update of the constants
122 *
123 * This function is used to update the estimation of the PID
124 * controller constants in struct thermal_zone_parameters.
125 * Sustainable power is provided in case it was estimated. The
126 * estimated sustainable_power should not be stored in the
127 * thermal_zone_parameters so it has to be passed explicitly to this
128 * function.
129 *
130 * If @force is not set, the values in the thermal zone's parameters
131 * are preserved if they are not zero. If @force is set, the values
132 * in thermal zone's parameters are overwritten.
133 */
134static void estimate_pid_constants(struct thermal_zone_device *tz,
135 u32 sustainable_power, int trip_switch_on,
136 int control_temp, bool force)
137{
138 int ret;
139 int switch_on_temp;
140 u32 temperature_threshold;
141
142 ret = tz->ops->get_trip_temp(tz, trip_switch_on, &switch_on_temp);
143 if (ret)
144 switch_on_temp = 0;
145
146 temperature_threshold = control_temp - switch_on_temp;
147
148 if (!tz->tzp->k_po || force)
149 tz->tzp->k_po = int_to_frac(sustainable_power) /
150 temperature_threshold;
151
152 if (!tz->tzp->k_pu || force)
153 tz->tzp->k_pu = int_to_frac(2 * sustainable_power) /
154 temperature_threshold;
155
156 if (!tz->tzp->k_i || force)
157 tz->tzp->k_i = int_to_frac(10) / 1000;
158 /*
159 * The default for k_d and integral_cutoff is 0, so we can
160 * leave them as they are.
161 */
162}
163
164/**
76 * pid_controller() - PID controller 165 * pid_controller() - PID controller
77 * @tz: thermal zone we are operating in 166 * @tz: thermal zone we are operating in
78 * @current_temp: the current temperature in millicelsius 167 * @current_temp: the current temperature in millicelsius
@@ -92,17 +181,27 @@ struct power_allocator_params {
92 * Return: The power budget for the next period. 181 * Return: The power budget for the next period.
93 */ 182 */
94static u32 pid_controller(struct thermal_zone_device *tz, 183static u32 pid_controller(struct thermal_zone_device *tz,
95 unsigned long current_temp, 184 int current_temp,
96 unsigned long control_temp, 185 int control_temp,
97 u32 max_allocatable_power) 186 u32 max_allocatable_power)
98{ 187{
99 s64 p, i, d, power_range; 188 s64 p, i, d, power_range;
100 s32 err, max_power_frac; 189 s32 err, max_power_frac;
190 u32 sustainable_power;
101 struct power_allocator_params *params = tz->governor_data; 191 struct power_allocator_params *params = tz->governor_data;
102 192
103 max_power_frac = int_to_frac(max_allocatable_power); 193 max_power_frac = int_to_frac(max_allocatable_power);
104 194
105 err = ((s32)control_temp - (s32)current_temp); 195 if (tz->tzp->sustainable_power) {
196 sustainable_power = tz->tzp->sustainable_power;
197 } else {
198 sustainable_power = estimate_sustainable_power(tz);
199 estimate_pid_constants(tz, sustainable_power,
200 params->trip_switch_on, control_temp,
201 true);
202 }
203
204 err = control_temp - current_temp;
106 err = int_to_frac(err); 205 err = int_to_frac(err);
107 206
108 /* Calculate the proportional term */ 207 /* Calculate the proportional term */
@@ -139,7 +238,7 @@ static u32 pid_controller(struct thermal_zone_device *tz,
139 power_range = p + i + d; 238 power_range = p + i + d;
140 239
141 /* feed-forward the known sustainable dissipatable power */ 240 /* feed-forward the known sustainable dissipatable power */
142 power_range = tz->tzp->sustainable_power + frac_to_int(power_range); 241 power_range = sustainable_power + frac_to_int(power_range);
143 242
144 power_range = clamp(power_range, (s64)0, (s64)max_allocatable_power); 243 power_range = clamp(power_range, (s64)0, (s64)max_allocatable_power);
145 244
@@ -223,8 +322,8 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
223} 322}
224 323
225static int allocate_power(struct thermal_zone_device *tz, 324static int allocate_power(struct thermal_zone_device *tz,
226 unsigned long current_temp, 325 int current_temp,
227 unsigned long control_temp) 326 int control_temp)
228{ 327{
229 struct thermal_instance *instance; 328 struct thermal_instance *instance;
230 struct power_allocator_params *params = tz->governor_data; 329 struct power_allocator_params *params = tz->governor_data;
@@ -247,6 +346,11 @@ static int allocate_power(struct thermal_zone_device *tz,
247 } 346 }
248 } 347 }
249 348
349 if (!num_actors) {
350 ret = -ENODEV;
351 goto unlock;
352 }
353
250 /* 354 /*
251 * We need to allocate five arrays of the same size: 355 * We need to allocate five arrays of the same size:
252 * req_power, max_power, granted_power, extra_actor_power and 356 * req_power, max_power, granted_power, extra_actor_power and
@@ -331,7 +435,7 @@ static int allocate_power(struct thermal_zone_device *tz,
331 granted_power, total_granted_power, 435 granted_power, total_granted_power,
332 num_actors, power_range, 436 num_actors, power_range,
333 max_allocatable_power, current_temp, 437 max_allocatable_power, current_temp,
334 (s32)control_temp - (s32)current_temp); 438 control_temp - current_temp);
335 439
336 kfree(req_power); 440 kfree(req_power);
337unlock: 441unlock:
@@ -340,43 +444,66 @@ unlock:
340 return ret; 444 return ret;
341} 445}
342 446
343static int get_governor_trips(struct thermal_zone_device *tz, 447/**
344 struct power_allocator_params *params) 448 * get_governor_trips() - get the number of the two trip points that are key for this governor
449 * @tz: thermal zone to operate on
450 * @params: pointer to private data for this governor
451 *
452 * The power allocator governor works optimally with two trips points:
453 * a "switch on" trip point and a "maximum desired temperature". These
454 * are defined as the first and last passive trip points.
455 *
456 * If there is only one trip point, then that's considered to be the
457 * "maximum desired temperature" trip point and the governor is always
458 * on. If there are no passive or active trip points, then the
459 * governor won't do anything. In fact, its throttle function
460 * won't be called at all.
461 */
462static void get_governor_trips(struct thermal_zone_device *tz,
463 struct power_allocator_params *params)
345{ 464{
346 int i, ret, last_passive; 465 int i, last_active, last_passive;
347 bool found_first_passive; 466 bool found_first_passive;
348 467
349 found_first_passive = false; 468 found_first_passive = false;
350 last_passive = -1; 469 last_active = INVALID_TRIP;
351 ret = -EINVAL; 470 last_passive = INVALID_TRIP;
352 471
353 for (i = 0; i < tz->trips; i++) { 472 for (i = 0; i < tz->trips; i++) {
354 enum thermal_trip_type type; 473 enum thermal_trip_type type;
474 int ret;
355 475
356 ret = tz->ops->get_trip_type(tz, i, &type); 476 ret = tz->ops->get_trip_type(tz, i, &type);
357 if (ret) 477 if (ret) {
358 return ret; 478 dev_warn(&tz->device,
479 "Failed to get trip point %d type: %d\n", i,
480 ret);
481 continue;
482 }
359 483
360 if (!found_first_passive) { 484 if (type == THERMAL_TRIP_PASSIVE) {
361 if (type == THERMAL_TRIP_PASSIVE) { 485 if (!found_first_passive) {
362 params->trip_switch_on = i; 486 params->trip_switch_on = i;
363 found_first_passive = true; 487 found_first_passive = true;
488 } else {
489 last_passive = i;
364 } 490 }
365 } else if (type == THERMAL_TRIP_PASSIVE) { 491 } else if (type == THERMAL_TRIP_ACTIVE) {
366 last_passive = i; 492 last_active = i;
367 } else { 493 } else {
368 break; 494 break;
369 } 495 }
370 } 496 }
371 497
372 if (last_passive != -1) { 498 if (last_passive != INVALID_TRIP) {
373 params->trip_max_desired_temperature = last_passive; 499 params->trip_max_desired_temperature = last_passive;
374 ret = 0; 500 } else if (found_first_passive) {
501 params->trip_max_desired_temperature = params->trip_switch_on;
502 params->trip_switch_on = INVALID_TRIP;
375 } else { 503 } else {
376 ret = -EINVAL; 504 params->trip_switch_on = INVALID_TRIP;
505 params->trip_max_desired_temperature = last_active;
377 } 506 }
378
379 return ret;
380} 507}
381 508
382static void reset_pid_controller(struct power_allocator_params *params) 509static void reset_pid_controller(struct power_allocator_params *params)
@@ -405,60 +532,45 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
405 * power_allocator_bind() - bind the power_allocator governor to a thermal zone 532 * power_allocator_bind() - bind the power_allocator governor to a thermal zone
406 * @tz: thermal zone to bind it to 533 * @tz: thermal zone to bind it to
407 * 534 *
408 * Check that the thermal zone is valid for this governor, that is, it 535 * Initialize the PID controller parameters and bind it to the thermal
409 * has two thermal trips. If so, initialize the PID controller 536 * zone.
410 * parameters and bind it to the thermal zone.
411 * 537 *
412 * Return: 0 on success, -EINVAL if the trips were invalid or -ENOMEM 538 * Return: 0 on success, or -ENOMEM if we ran out of memory.
413 * if we ran out of memory.
414 */ 539 */
415static int power_allocator_bind(struct thermal_zone_device *tz) 540static int power_allocator_bind(struct thermal_zone_device *tz)
416{ 541{
417 int ret; 542 int ret;
418 struct power_allocator_params *params; 543 struct power_allocator_params *params;
419 unsigned long switch_on_temp, control_temp; 544 int control_temp;
420 u32 temperature_threshold;
421
422 if (!tz->tzp || !tz->tzp->sustainable_power) {
423 dev_err(&tz->device,
424 "power_allocator: missing sustainable_power\n");
425 return -EINVAL;
426 }
427 545
428 params = kzalloc(sizeof(*params), GFP_KERNEL); 546 params = kzalloc(sizeof(*params), GFP_KERNEL);
429 if (!params) 547 if (!params)
430 return -ENOMEM; 548 return -ENOMEM;
431 549
432 ret = get_governor_trips(tz, params); 550 if (!tz->tzp) {
433 if (ret) { 551 tz->tzp = kzalloc(sizeof(*tz->tzp), GFP_KERNEL);
434 dev_err(&tz->device, 552 if (!tz->tzp) {
435 "thermal zone %s has wrong trip setup for power allocator\n", 553 ret = -ENOMEM;
436 tz->type); 554 goto free_params;
437 goto free; 555 }
438 }
439 556
440 ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, 557 params->allocated_tzp = true;
441 &switch_on_temp); 558 }
442 if (ret)
443 goto free;
444 559
445 ret = tz->ops->get_trip_temp(tz, params->trip_max_desired_temperature, 560 if (!tz->tzp->sustainable_power)
446 &control_temp); 561 dev_warn(&tz->device, "power_allocator: sustainable_power will be estimated\n");
447 if (ret)
448 goto free;
449 562
450 temperature_threshold = control_temp - switch_on_temp; 563 get_governor_trips(tz, params);
451 564
452 tz->tzp->k_po = tz->tzp->k_po ?: 565 if (tz->trips > 0) {
453 int_to_frac(tz->tzp->sustainable_power) / temperature_threshold; 566 ret = tz->ops->get_trip_temp(tz,
454 tz->tzp->k_pu = tz->tzp->k_pu ?: 567 params->trip_max_desired_temperature,
455 int_to_frac(2 * tz->tzp->sustainable_power) / 568 &control_temp);
456 temperature_threshold; 569 if (!ret)
457 tz->tzp->k_i = tz->tzp->k_i ?: int_to_frac(10) / 1000; 570 estimate_pid_constants(tz, tz->tzp->sustainable_power,
458 /* 571 params->trip_switch_on,
459 * The default for k_d and integral_cutoff is 0, so we can 572 control_temp, false);
460 * leave them as they are. 573 }
461 */
462 574
463 reset_pid_controller(params); 575 reset_pid_controller(params);
464 576
@@ -466,14 +578,23 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
466 578
467 return 0; 579 return 0;
468 580
469free: 581free_params:
470 kfree(params); 582 kfree(params);
583
471 return ret; 584 return ret;
472} 585}
473 586
474static void power_allocator_unbind(struct thermal_zone_device *tz) 587static void power_allocator_unbind(struct thermal_zone_device *tz)
475{ 588{
589 struct power_allocator_params *params = tz->governor_data;
590
476 dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id); 591 dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
592
593 if (params->allocated_tzp) {
594 kfree(tz->tzp);
595 tz->tzp = NULL;
596 }
597
477 kfree(tz->governor_data); 598 kfree(tz->governor_data);
478 tz->governor_data = NULL; 599 tz->governor_data = NULL;
479} 600}
@@ -481,7 +602,7 @@ static void power_allocator_unbind(struct thermal_zone_device *tz)
481static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) 602static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
482{ 603{
483 int ret; 604 int ret;
484 unsigned long switch_on_temp, control_temp, current_temp; 605 int switch_on_temp, control_temp, current_temp;
485 struct power_allocator_params *params = tz->governor_data; 606 struct power_allocator_params *params = tz->governor_data;
486 607
487 /* 608 /*
@@ -499,13 +620,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
499 620
500 ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, 621 ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
501 &switch_on_temp); 622 &switch_on_temp);
502 if (ret) { 623 if (!ret && (current_temp < switch_on_temp)) {
503 dev_warn(&tz->device,
504 "Failed to get switch on temperature: %d\n", ret);
505 return ret;
506 }
507
508 if (current_temp < switch_on_temp) {
509 tz->passive = 0; 624 tz->passive = 0;
510 reset_pid_controller(params); 625 reset_pid_controller(params);
511 allow_maximum_power(tz); 626 allow_maximum_power(tz);
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
index c8d27b8fb9ec..b677aada5b52 100644
--- a/drivers/thermal/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -117,7 +117,7 @@ static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
117 return 0; 117 return 0;
118} 118}
119 119
120static int qpnp_tm_get_temp(void *data, long *temp) 120static int qpnp_tm_get_temp(void *data, int *temp)
121{ 121{
122 struct qpnp_tm_chip *chip = data; 122 struct qpnp_tm_chip *chip = data;
123 int ret, mili_celsius; 123 int ret, mili_celsius;
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index fe4e767018c4..5d4ae7d705e0 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -200,8 +200,7 @@ err_out_unlock:
200 return ret; 200 return ret;
201} 201}
202 202
203static int rcar_thermal_get_temp(struct thermal_zone_device *zone, 203static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
204 unsigned long *temp)
205{ 204{
206 struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone); 205 struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
207 206
@@ -235,7 +234,7 @@ static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone,
235} 234}
236 235
237static int rcar_thermal_get_trip_temp(struct thermal_zone_device *zone, 236static int rcar_thermal_get_trip_temp(struct thermal_zone_device *zone,
238 int trip, unsigned long *temp) 237 int trip, int *temp)
239{ 238{
240 struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone); 239 struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
241 struct device *dev = rcar_priv_to_dev(priv); 240 struct device *dev = rcar_priv_to_dev(priv);
@@ -299,7 +298,7 @@ static void _rcar_thermal_irq_ctrl(struct rcar_thermal_priv *priv, int enable)
299static void rcar_thermal_work(struct work_struct *work) 298static void rcar_thermal_work(struct work_struct *work)
300{ 299{
301 struct rcar_thermal_priv *priv; 300 struct rcar_thermal_priv *priv;
302 unsigned long cctemp, nctemp; 301 int cctemp, nctemp;
303 302
304 priv = container_of(work, struct rcar_thermal_priv, work.work); 303 priv = container_of(work, struct rcar_thermal_priv, work.work);
305 304
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index cd8f5f93b42c..c89ffb26a354 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -64,7 +64,7 @@ struct rockchip_tsadc_chip {
64 void (*control)(void __iomem *reg, bool on); 64 void (*control)(void __iomem *reg, bool on);
65 65
66 /* Per-sensor methods */ 66 /* Per-sensor methods */
67 int (*get_temp)(int chn, void __iomem *reg, long *temp); 67 int (*get_temp)(int chn, void __iomem *reg, int *temp);
68 void (*set_tshut_temp)(int chn, void __iomem *reg, long temp); 68 void (*set_tshut_temp)(int chn, void __iomem *reg, long temp);
69 void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m); 69 void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
70}; 70};
@@ -191,7 +191,7 @@ static u32 rk_tsadcv2_temp_to_code(long temp)
191 return 0; 191 return 0;
192} 192}
193 193
194static long rk_tsadcv2_code_to_temp(u32 code) 194static int rk_tsadcv2_code_to_temp(u32 code)
195{ 195{
196 unsigned int low = 0; 196 unsigned int low = 0;
197 unsigned int high = ARRAY_SIZE(v2_code_table) - 1; 197 unsigned int high = ARRAY_SIZE(v2_code_table) - 1;
@@ -277,7 +277,7 @@ static void rk_tsadcv2_control(void __iomem *regs, bool enable)
277 writel_relaxed(val, regs + TSADCV2_AUTO_CON); 277 writel_relaxed(val, regs + TSADCV2_AUTO_CON);
278} 278}
279 279
280static int rk_tsadcv2_get_temp(int chn, void __iomem *regs, long *temp) 280static int rk_tsadcv2_get_temp(int chn, void __iomem *regs, int *temp)
281{ 281{
282 u32 val; 282 u32 val;
283 283
@@ -366,7 +366,7 @@ static irqreturn_t rockchip_thermal_alarm_irq_thread(int irq, void *dev)
366 return IRQ_HANDLED; 366 return IRQ_HANDLED;
367} 367}
368 368
369static int rockchip_thermal_get_temp(void *_sensor, long *out_temp) 369static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
370{ 370{
371 struct rockchip_thermal_sensor *sensor = _sensor; 371 struct rockchip_thermal_sensor *sensor = _sensor;
372 struct rockchip_thermal_data *thermal = sensor->thermal; 372 struct rockchip_thermal_data *thermal = sensor->thermal;
@@ -374,7 +374,7 @@ static int rockchip_thermal_get_temp(void *_sensor, long *out_temp)
374 int retval; 374 int retval;
375 375
376 retval = tsadc->get_temp(sensor->id, thermal->regs, out_temp); 376 retval = tsadc->get_temp(sensor->id, thermal->regs, out_temp);
377 dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %ld, retval: %d\n", 377 dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n",
378 sensor->id, *out_temp, retval); 378 sensor->id, *out_temp, retval);
379 379
380 return retval; 380 return retval;
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index c96ff10b869e..0bae8cc6c23a 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -207,8 +207,7 @@ struct exynos_tmu_data {
207 int (*tmu_initialize)(struct platform_device *pdev); 207 int (*tmu_initialize)(struct platform_device *pdev);
208 void (*tmu_control)(struct platform_device *pdev, bool on); 208 void (*tmu_control)(struct platform_device *pdev, bool on);
209 int (*tmu_read)(struct exynos_tmu_data *data); 209 int (*tmu_read)(struct exynos_tmu_data *data);
210 void (*tmu_set_emulation)(struct exynos_tmu_data *data, 210 void (*tmu_set_emulation)(struct exynos_tmu_data *data, int temp);
211 unsigned long temp);
212 void (*tmu_clear_irqs)(struct exynos_tmu_data *data); 211 void (*tmu_clear_irqs)(struct exynos_tmu_data *data);
213}; 212};
214 213
@@ -216,7 +215,7 @@ static void exynos_report_trigger(struct exynos_tmu_data *p)
216{ 215{
217 char data[10], *envp[] = { data, NULL }; 216 char data[10], *envp[] = { data, NULL };
218 struct thermal_zone_device *tz = p->tzd; 217 struct thermal_zone_device *tz = p->tzd;
219 unsigned long temp; 218 int temp;
220 unsigned int i; 219 unsigned int i;
221 220
222 if (!tz) { 221 if (!tz) {
@@ -517,7 +516,7 @@ static int exynos5433_tmu_initialize(struct platform_device *pdev)
517 struct thermal_zone_device *tz = data->tzd; 516 struct thermal_zone_device *tz = data->tzd;
518 unsigned int status, trim_info; 517 unsigned int status, trim_info;
519 unsigned int rising_threshold = 0, falling_threshold = 0; 518 unsigned int rising_threshold = 0, falling_threshold = 0;
520 unsigned long temp, temp_hist; 519 int temp, temp_hist;
521 int ret = 0, threshold_code, i, sensor_id, cal_type; 520 int ret = 0, threshold_code, i, sensor_id, cal_type;
522 521
523 status = readb(data->base + EXYNOS_TMU_REG_STATUS); 522 status = readb(data->base + EXYNOS_TMU_REG_STATUS);
@@ -610,7 +609,7 @@ static int exynos5440_tmu_initialize(struct platform_device *pdev)
610 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 609 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
611 unsigned int trim_info = 0, con, rising_threshold; 610 unsigned int trim_info = 0, con, rising_threshold;
612 int ret = 0, threshold_code; 611 int ret = 0, threshold_code;
613 unsigned long crit_temp = 0; 612 int crit_temp = 0;
614 613
615 /* 614 /*
616 * For exynos5440 soc triminfo value is swapped between TMU0 and 615 * For exynos5440 soc triminfo value is swapped between TMU0 and
@@ -663,7 +662,7 @@ static int exynos7_tmu_initialize(struct platform_device *pdev)
663 unsigned int status, trim_info; 662 unsigned int status, trim_info;
664 unsigned int rising_threshold = 0, falling_threshold = 0; 663 unsigned int rising_threshold = 0, falling_threshold = 0;
665 int ret = 0, threshold_code, i; 664 int ret = 0, threshold_code, i;
666 unsigned long temp, temp_hist; 665 int temp, temp_hist;
667 unsigned int reg_off, bit_off; 666 unsigned int reg_off, bit_off;
668 667
669 status = readb(data->base + EXYNOS_TMU_REG_STATUS); 668 status = readb(data->base + EXYNOS_TMU_REG_STATUS);
@@ -876,7 +875,7 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on)
876 writel(con, data->base + EXYNOS_TMU_REG_CONTROL); 875 writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
877} 876}
878 877
879static int exynos_get_temp(void *p, long *temp) 878static int exynos_get_temp(void *p, int *temp)
880{ 879{
881 struct exynos_tmu_data *data = p; 880 struct exynos_tmu_data *data = p;
882 881
@@ -896,7 +895,7 @@ static int exynos_get_temp(void *p, long *temp)
896 895
897#ifdef CONFIG_THERMAL_EMULATION 896#ifdef CONFIG_THERMAL_EMULATION
898static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val, 897static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val,
899 unsigned long temp) 898 int temp)
900{ 899{
901 if (temp) { 900 if (temp) {
902 temp /= MCELSIUS; 901 temp /= MCELSIUS;
@@ -926,7 +925,7 @@ static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val,
926} 925}
927 926
928static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data, 927static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
929 unsigned long temp) 928 int temp)
930{ 929{
931 unsigned int val; 930 unsigned int val;
932 u32 emul_con; 931 u32 emul_con;
@@ -946,7 +945,7 @@ static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
946} 945}
947 946
948static void exynos5440_tmu_set_emulation(struct exynos_tmu_data *data, 947static void exynos5440_tmu_set_emulation(struct exynos_tmu_data *data,
949 unsigned long temp) 948 int temp)
950{ 949{
951 unsigned int val; 950 unsigned int val;
952 951
@@ -955,7 +954,7 @@ static void exynos5440_tmu_set_emulation(struct exynos_tmu_data *data,
955 writel(val, data->base + EXYNOS5440_TMU_S0_7_DEBUG); 954 writel(val, data->base + EXYNOS5440_TMU_S0_7_DEBUG);
956} 955}
957 956
958static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp) 957static int exynos_tmu_set_emulation(void *drv_data, int temp)
959{ 958{
960 struct exynos_tmu_data *data = drv_data; 959 struct exynos_tmu_data *data = drv_data;
961 int ret = -EINVAL; 960 int ret = -EINVAL;
@@ -978,7 +977,7 @@ out:
978#else 977#else
979#define exynos4412_tmu_set_emulation NULL 978#define exynos4412_tmu_set_emulation NULL
980#define exynos5440_tmu_set_emulation NULL 979#define exynos5440_tmu_set_emulation NULL
981static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp) 980static int exynos_tmu_set_emulation(void *drv_data, int temp)
982 { return -EINVAL; } 981 { return -EINVAL; }
983#endif /* CONFIG_THERMAL_EMULATION */ 982#endif /* CONFIG_THERMAL_EMULATION */
984 983
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index bddb71744a6c..534dd9136662 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -38,7 +38,7 @@ struct spear_thermal_dev {
38}; 38};
39 39
40static inline int thermal_get_temp(struct thermal_zone_device *thermal, 40static inline int thermal_get_temp(struct thermal_zone_device *thermal,
41 unsigned long *temp) 41 int *temp)
42{ 42{
43 struct spear_thermal_dev *stdev = thermal->devdata; 43 struct spear_thermal_dev *stdev = thermal->devdata;
44 44
diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c
index 88c759d746c3..be637e6b01d2 100644
--- a/drivers/thermal/st/st_thermal.c
+++ b/drivers/thermal/st/st_thermal.c
@@ -111,8 +111,7 @@ static int st_thermal_calibration(struct st_thermal_sensor *sensor)
111} 111}
112 112
113/* Callback to get temperature from HW*/ 113/* Callback to get temperature from HW*/
114static int st_thermal_get_temp(struct thermal_zone_device *th, 114static int st_thermal_get_temp(struct thermal_zone_device *th, int *temperature)
115 unsigned long *temperature)
116{ 115{
117 struct st_thermal_sensor *sensor = th->devdata; 116 struct st_thermal_sensor *sensor = th->devdata;
118 struct device *dev = sensor->dev; 117 struct device *dev = sensor->dev;
@@ -159,7 +158,7 @@ static int st_thermal_get_trip_type(struct thermal_zone_device *th,
159} 158}
160 159
161static int st_thermal_get_trip_temp(struct thermal_zone_device *th, 160static int st_thermal_get_trip_temp(struct thermal_zone_device *th,
162 int trip, unsigned long *temp) 161 int trip, int *temp)
163{ 162{
164 struct st_thermal_sensor *sensor = th->devdata; 163 struct st_thermal_sensor *sensor = th->devdata;
165 struct device *dev = sensor->dev; 164 struct device *dev = sensor->dev;
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index 5a0f12d08e8b..2f9f7086ac3d 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -113,7 +113,7 @@ static void update_passive_instance(struct thermal_zone_device *tz,
113 113
114static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) 114static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
115{ 115{
116 long trip_temp; 116 int trip_temp;
117 enum thermal_trip_type trip_type; 117 enum thermal_trip_type trip_type;
118 enum thermal_trend trend; 118 enum thermal_trend trend;
119 struct thermal_instance *instance; 119 struct thermal_instance *instance;
@@ -135,7 +135,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
135 trace_thermal_zone_trip(tz, trip, trip_type); 135 trace_thermal_zone_trip(tz, trip, trip_type);
136 } 136 }
137 137
138 dev_dbg(&tz->device, "Trip%d[type=%d,temp=%ld]:trend=%d,throttle=%d\n", 138 dev_dbg(&tz->device, "Trip%d[type=%d,temp=%d]:trend=%d,throttle=%d\n",
139 trip, trip_type, trip_temp, trend, throttle); 139 trip, trip_type, trip_temp, trend, throttle);
140 140
141 mutex_lock(&tz->lock); 141 mutex_lock(&tz->lock);
diff --git a/drivers/thermal/tegra_soctherm.c b/drivers/thermal/tegra_soctherm.c
index 9197fc05c5cc..74ea5765938b 100644
--- a/drivers/thermal/tegra_soctherm.c
+++ b/drivers/thermal/tegra_soctherm.c
@@ -293,7 +293,7 @@ static int enable_tsensor(struct tegra_soctherm *tegra,
293 * H denotes an addition of 0.5 Celsius and N denotes negation 293 * H denotes an addition of 0.5 Celsius and N denotes negation
294 * of the final value. 294 * of the final value.
295 */ 295 */
296static long translate_temp(u16 val) 296static int translate_temp(u16 val)
297{ 297{
298 long t; 298 long t;
299 299
@@ -306,7 +306,7 @@ static long translate_temp(u16 val)
306 return t; 306 return t;
307} 307}
308 308
309static int tegra_thermctl_get_temp(void *data, long *out_temp) 309static int tegra_thermctl_get_temp(void *data, int *out_temp)
310{ 310{
311 struct tegra_thermctl_zone *zone = data; 311 struct tegra_thermctl_zone *zone = data;
312 u32 val; 312 u32 val;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 4ca211be4c0f..d9e525cc9c1c 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -426,7 +426,7 @@ static void handle_non_critical_trips(struct thermal_zone_device *tz,
426static void handle_critical_trips(struct thermal_zone_device *tz, 426static void handle_critical_trips(struct thermal_zone_device *tz,
427 int trip, enum thermal_trip_type trip_type) 427 int trip, enum thermal_trip_type trip_type)
428{ 428{
429 long trip_temp; 429 int trip_temp;
430 430
431 tz->ops->get_trip_temp(tz, trip, &trip_temp); 431 tz->ops->get_trip_temp(tz, trip, &trip_temp);
432 432
@@ -465,7 +465,7 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
465} 465}
466 466
467/** 467/**
468 * thermal_zone_get_temp() - returns its the temperature of thermal zone 468 * thermal_zone_get_temp() - returns the temperature of a thermal zone
469 * @tz: a valid pointer to a struct thermal_zone_device 469 * @tz: a valid pointer to a struct thermal_zone_device
470 * @temp: a valid pointer to where to store the resulting temperature. 470 * @temp: a valid pointer to where to store the resulting temperature.
471 * 471 *
@@ -474,14 +474,12 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
474 * 474 *
475 * Return: On success returns 0, an error code otherwise 475 * Return: On success returns 0, an error code otherwise
476 */ 476 */
477int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp) 477int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
478{ 478{
479 int ret = -EINVAL; 479 int ret = -EINVAL;
480#ifdef CONFIG_THERMAL_EMULATION
481 int count; 480 int count;
482 unsigned long crit_temp = -1UL; 481 int crit_temp = INT_MAX;
483 enum thermal_trip_type type; 482 enum thermal_trip_type type;
484#endif
485 483
486 if (!tz || IS_ERR(tz) || !tz->ops->get_temp) 484 if (!tz || IS_ERR(tz) || !tz->ops->get_temp)
487 goto exit; 485 goto exit;
@@ -489,25 +487,26 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
489 mutex_lock(&tz->lock); 487 mutex_lock(&tz->lock);
490 488
491 ret = tz->ops->get_temp(tz, temp); 489 ret = tz->ops->get_temp(tz, temp);
492#ifdef CONFIG_THERMAL_EMULATION
493 if (!tz->emul_temperature)
494 goto skip_emul;
495
496 for (count = 0; count < tz->trips; count++) {
497 ret = tz->ops->get_trip_type(tz, count, &type);
498 if (!ret && type == THERMAL_TRIP_CRITICAL) {
499 ret = tz->ops->get_trip_temp(tz, count, &crit_temp);
500 break;
501 }
502 }
503 490
504 if (ret) 491 if (IS_ENABLED(CONFIG_THERMAL_EMULATION) && tz->emul_temperature) {
505 goto skip_emul; 492 for (count = 0; count < tz->trips; count++) {
493 ret = tz->ops->get_trip_type(tz, count, &type);
494 if (!ret && type == THERMAL_TRIP_CRITICAL) {
495 ret = tz->ops->get_trip_temp(tz, count,
496 &crit_temp);
497 break;
498 }
499 }
506 500
507 if (*temp < crit_temp) 501 /*
508 *temp = tz->emul_temperature; 502 * Only allow emulating a temperature when the real temperature
509skip_emul: 503 * is below the critical temperature so that the emulation code
510#endif 504 * cannot hide critical conditions.
505 */
506 if (!ret && *temp < crit_temp)
507 *temp = tz->emul_temperature;
508 }
509
511 mutex_unlock(&tz->lock); 510 mutex_unlock(&tz->lock);
512exit: 511exit:
513 return ret; 512 return ret;
@@ -516,8 +515,7 @@ EXPORT_SYMBOL_GPL(thermal_zone_get_temp);
516 515
517static void update_temperature(struct thermal_zone_device *tz) 516static void update_temperature(struct thermal_zone_device *tz)
518{ 517{
519 long temp; 518 int temp, ret;
520 int ret;
521 519
522 ret = thermal_zone_get_temp(tz, &temp); 520 ret = thermal_zone_get_temp(tz, &temp);
523 if (ret) { 521 if (ret) {
@@ -577,15 +575,14 @@ static ssize_t
577temp_show(struct device *dev, struct device_attribute *attr, char *buf) 575temp_show(struct device *dev, struct device_attribute *attr, char *buf)
578{ 576{
579 struct thermal_zone_device *tz = to_thermal_zone(dev); 577 struct thermal_zone_device *tz = to_thermal_zone(dev);
580 long temperature; 578 int temperature, ret;
581 int ret;
582 579
583 ret = thermal_zone_get_temp(tz, &temperature); 580 ret = thermal_zone_get_temp(tz, &temperature);
584 581
585 if (ret) 582 if (ret)
586 return ret; 583 return ret;
587 584
588 return sprintf(buf, "%ld\n", temperature); 585 return sprintf(buf, "%d\n", temperature);
589} 586}
590 587
591static ssize_t 588static ssize_t
@@ -689,7 +686,7 @@ trip_point_temp_show(struct device *dev, struct device_attribute *attr,
689{ 686{
690 struct thermal_zone_device *tz = to_thermal_zone(dev); 687 struct thermal_zone_device *tz = to_thermal_zone(dev);
691 int trip, ret; 688 int trip, ret;
692 long temperature; 689 int temperature;
693 690
694 if (!tz->ops->get_trip_temp) 691 if (!tz->ops->get_trip_temp)
695 return -EPERM; 692 return -EPERM;
@@ -702,7 +699,7 @@ trip_point_temp_show(struct device *dev, struct device_attribute *attr,
702 if (ret) 699 if (ret)
703 return ret; 700 return ret;
704 701
705 return sprintf(buf, "%ld\n", temperature); 702 return sprintf(buf, "%d\n", temperature);
706} 703}
707 704
708static ssize_t 705static ssize_t
@@ -711,7 +708,7 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
711{ 708{
712 struct thermal_zone_device *tz = to_thermal_zone(dev); 709 struct thermal_zone_device *tz = to_thermal_zone(dev);
713 int trip, ret; 710 int trip, ret;
714 unsigned long temperature; 711 int temperature;
715 712
716 if (!tz->ops->set_trip_hyst) 713 if (!tz->ops->set_trip_hyst)
717 return -EPERM; 714 return -EPERM;
@@ -719,7 +716,7 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
719 if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip)) 716 if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip))
720 return -EINVAL; 717 return -EINVAL;
721 718
722 if (kstrtoul(buf, 10, &temperature)) 719 if (kstrtoint(buf, 10, &temperature))
723 return -EINVAL; 720 return -EINVAL;
724 721
725 /* 722 /*
@@ -738,7 +735,7 @@ trip_point_hyst_show(struct device *dev, struct device_attribute *attr,
738{ 735{
739 struct thermal_zone_device *tz = to_thermal_zone(dev); 736 struct thermal_zone_device *tz = to_thermal_zone(dev);
740 int trip, ret; 737 int trip, ret;
741 unsigned long temperature; 738 int temperature;
742 739
743 if (!tz->ops->get_trip_hyst) 740 if (!tz->ops->get_trip_hyst)
744 return -EPERM; 741 return -EPERM;
@@ -748,7 +745,7 @@ trip_point_hyst_show(struct device *dev, struct device_attribute *attr,
748 745
749 ret = tz->ops->get_trip_hyst(tz, trip, &temperature); 746 ret = tz->ops->get_trip_hyst(tz, trip, &temperature);
750 747
751 return ret ? ret : sprintf(buf, "%ld\n", temperature); 748 return ret ? ret : sprintf(buf, "%d\n", temperature);
752} 749}
753 750
754static ssize_t 751static ssize_t
@@ -847,7 +844,27 @@ policy_show(struct device *dev, struct device_attribute *devattr, char *buf)
847 return sprintf(buf, "%s\n", tz->governor->name); 844 return sprintf(buf, "%s\n", tz->governor->name);
848} 845}
849 846
850#ifdef CONFIG_THERMAL_EMULATION 847static ssize_t
848available_policies_show(struct device *dev, struct device_attribute *devattr,
849 char *buf)
850{
851 struct thermal_governor *pos;
852 ssize_t count = 0;
853 ssize_t size = PAGE_SIZE;
854
855 mutex_lock(&thermal_governor_lock);
856
857 list_for_each_entry(pos, &thermal_governor_list, governor_list) {
858 size = PAGE_SIZE - count;
859 count += scnprintf(buf + count, size, "%s ", pos->name);
860 }
861 count += scnprintf(buf + count, size, "\n");
862
863 mutex_unlock(&thermal_governor_lock);
864
865 return count;
866}
867
851static ssize_t 868static ssize_t
852emul_temp_store(struct device *dev, struct device_attribute *attr, 869emul_temp_store(struct device *dev, struct device_attribute *attr,
853 const char *buf, size_t count) 870 const char *buf, size_t count)
@@ -873,7 +890,6 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
873 return ret ? ret : count; 890 return ret ? ret : count;
874} 891}
875static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store); 892static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store);
876#endif/*CONFIG_THERMAL_EMULATION*/
877 893
878static ssize_t 894static ssize_t
879sustainable_power_show(struct device *dev, struct device_attribute *devattr, 895sustainable_power_show(struct device *dev, struct device_attribute *devattr,
@@ -997,6 +1013,34 @@ int power_actor_get_max_power(struct thermal_cooling_device *cdev,
997} 1013}
998 1014
999/** 1015/**
1016 * power_actor_get_min_power() - get the mainimum power that a cdev can consume
1017 * @cdev: pointer to &thermal_cooling_device
1018 * @tz: a valid thermal zone device pointer
1019 * @min_power: pointer in which to store the minimum power
1020 *
1021 * Calculate the minimum power consumption in milliwatts that the
1022 * cooling device can currently consume and store it in @min_power.
1023 *
1024 * Return: 0 on success, -EINVAL if @cdev doesn't support the
1025 * power_actor API or -E* on other error.
1026 */
1027int power_actor_get_min_power(struct thermal_cooling_device *cdev,
1028 struct thermal_zone_device *tz, u32 *min_power)
1029{
1030 unsigned long max_state;
1031 int ret;
1032
1033 if (!cdev_is_power_actor(cdev))
1034 return -EINVAL;
1035
1036 ret = cdev->ops->get_max_state(cdev, &max_state);
1037 if (ret)
1038 return ret;
1039
1040 return cdev->ops->state2power(cdev, tz, max_state, min_power);
1041}
1042
1043/**
1000 * power_actor_set_power() - limit the maximum power that a cooling device can consume 1044 * power_actor_set_power() - limit the maximum power that a cooling device can consume
1001 * @cdev: pointer to &thermal_cooling_device 1045 * @cdev: pointer to &thermal_cooling_device
1002 * @instance: thermal instance to update 1046 * @instance: thermal instance to update
@@ -1032,6 +1076,7 @@ static DEVICE_ATTR(temp, 0444, temp_show, NULL);
1032static DEVICE_ATTR(mode, 0644, mode_show, mode_store); 1076static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
1033static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store); 1077static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store);
1034static DEVICE_ATTR(policy, S_IRUGO | S_IWUSR, policy_show, policy_store); 1078static DEVICE_ATTR(policy, S_IRUGO | S_IWUSR, policy_show, policy_store);
1079static DEVICE_ATTR(available_policies, S_IRUGO, available_policies_show, NULL);
1035 1080
1036/* sys I/F for cooling device */ 1081/* sys I/F for cooling device */
1037#define to_cooling_device(_dev) \ 1082#define to_cooling_device(_dev) \
@@ -1803,11 +1848,12 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
1803 goto unregister; 1848 goto unregister;
1804 } 1849 }
1805 1850
1806#ifdef CONFIG_THERMAL_EMULATION 1851 if (IS_ENABLED(CONFIG_THERMAL_EMULATION)) {
1807 result = device_create_file(&tz->device, &dev_attr_emul_temp); 1852 result = device_create_file(&tz->device, &dev_attr_emul_temp);
1808 if (result) 1853 if (result)
1809 goto unregister; 1854 goto unregister;
1810#endif 1855 }
1856
1811 /* Create policy attribute */ 1857 /* Create policy attribute */
1812 result = device_create_file(&tz->device, &dev_attr_policy); 1858 result = device_create_file(&tz->device, &dev_attr_policy);
1813 if (result) 1859 if (result)
@@ -1818,6 +1864,11 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
1818 if (result) 1864 if (result)
1819 goto unregister; 1865 goto unregister;
1820 1866
1867 /* Create available_policies attribute */
1868 result = device_create_file(&tz->device, &dev_attr_available_policies);
1869 if (result)
1870 goto unregister;
1871
1821 /* Update 'this' zone's governor information */ 1872 /* Update 'this' zone's governor information */
1822 mutex_lock(&thermal_governor_lock); 1873 mutex_lock(&thermal_governor_lock);
1823 1874
@@ -1849,9 +1900,6 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
1849 1900
1850 INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check); 1901 INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check);
1851 1902
1852 if (!tz->ops->get_temp)
1853 thermal_zone_device_set_polling(tz, 0);
1854
1855 thermal_zone_device_update(tz); 1903 thermal_zone_device_update(tz);
1856 1904
1857 return tz; 1905 return tz;
@@ -1918,6 +1966,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
1918 if (tz->ops->get_mode) 1966 if (tz->ops->get_mode)
1919 device_remove_file(&tz->device, &dev_attr_mode); 1967 device_remove_file(&tz->device, &dev_attr_mode);
1920 device_remove_file(&tz->device, &dev_attr_policy); 1968 device_remove_file(&tz->device, &dev_attr_policy);
1969 device_remove_file(&tz->device, &dev_attr_available_policies);
1921 remove_trip_attrs(tz); 1970 remove_trip_attrs(tz);
1922 thermal_set_governor(tz, NULL); 1971 thermal_set_governor(tz, NULL);
1923 1972
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index 1967bee4f076..06fd2ed9ef9d 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -69,7 +69,7 @@ static DEVICE_ATTR(name, 0444, name_show, NULL);
69static ssize_t 69static ssize_t
70temp_input_show(struct device *dev, struct device_attribute *attr, char *buf) 70temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
71{ 71{
72 long temperature; 72 int temperature;
73 int ret; 73 int ret;
74 struct thermal_hwmon_attr *hwmon_attr 74 struct thermal_hwmon_attr *hwmon_attr
75 = container_of(attr, struct thermal_hwmon_attr, attr); 75 = container_of(attr, struct thermal_hwmon_attr, attr);
@@ -83,7 +83,7 @@ temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
83 if (ret) 83 if (ret)
84 return ret; 84 return ret;
85 85
86 return sprintf(buf, "%ld\n", temperature); 86 return sprintf(buf, "%d\n", temperature);
87} 87}
88 88
89static ssize_t 89static ssize_t
@@ -95,14 +95,14 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf)
95 = container_of(hwmon_attr, struct thermal_hwmon_temp, 95 = container_of(hwmon_attr, struct thermal_hwmon_temp,
96 temp_crit); 96 temp_crit);
97 struct thermal_zone_device *tz = temp->tz; 97 struct thermal_zone_device *tz = temp->tz;
98 long temperature; 98 int temperature;
99 int ret; 99 int ret;
100 100
101 ret = tz->ops->get_trip_temp(tz, 0, &temperature); 101 ret = tz->ops->get_trip_temp(tz, 0, &temperature);
102 if (ret) 102 if (ret)
103 return ret; 103 return ret;
104 104
105 return sprintf(buf, "%ld\n", temperature); 105 return sprintf(buf, "%d\n", temperature);
106} 106}
107 107
108 108
@@ -142,7 +142,7 @@ thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon,
142 142
143static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz) 143static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz)
144{ 144{
145 unsigned long temp; 145 int temp;
146 return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp); 146 return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp);
147} 147}
148 148
diff --git a/drivers/thermal/ti-soc-thermal/Kconfig b/drivers/thermal/ti-soc-thermal/Kconfig
index bd4c7beba679..cb6686ff09ae 100644
--- a/drivers/thermal/ti-soc-thermal/Kconfig
+++ b/drivers/thermal/ti-soc-thermal/Kconfig
@@ -1,7 +1,5 @@
1config TI_SOC_THERMAL 1config TI_SOC_THERMAL
2 tristate "Texas Instruments SoCs temperature sensor driver" 2 tristate "Texas Instruments SoCs temperature sensor driver"
3 depends on THERMAL
4 depends on ARCH_HAS_BANDGAP
5 help 3 help
6 If you say yes here you get support for the Texas Instruments 4 If you say yes here you get support for the Texas Instruments
7 OMAP4460+ on die bandgap temperature sensor support. The register 5 OMAP4460+ on die bandgap temperature sensor support. The register
@@ -24,7 +22,7 @@ config TI_THERMAL
24config OMAP4_THERMAL 22config OMAP4_THERMAL
25 bool "Texas Instruments OMAP4 thermal support" 23 bool "Texas Instruments OMAP4 thermal support"
26 depends on TI_SOC_THERMAL 24 depends on TI_SOC_THERMAL
27 depends on ARCH_OMAP4 25 depends on ARCH_OMAP4 || COMPILE_TEST
28 help 26 help
29 If you say yes here you get thermal support for the Texas Instruments 27 If you say yes here you get thermal support for the Texas Instruments
30 OMAP4 SoC family. The current chip supported are: 28 OMAP4 SoC family. The current chip supported are:
@@ -38,7 +36,7 @@ config OMAP4_THERMAL
38config OMAP5_THERMAL 36config OMAP5_THERMAL
39 bool "Texas Instruments OMAP5 thermal support" 37 bool "Texas Instruments OMAP5 thermal support"
40 depends on TI_SOC_THERMAL 38 depends on TI_SOC_THERMAL
41 depends on SOC_OMAP5 39 depends on SOC_OMAP5 || COMPILE_TEST
42 help 40 help
43 If you say yes here you get thermal support for the Texas Instruments 41 If you say yes here you get thermal support for the Texas Instruments
44 OMAP5 SoC family. The current chip supported are: 42 OMAP5 SoC family. The current chip supported are:
@@ -50,7 +48,7 @@ config OMAP5_THERMAL
50config DRA752_THERMAL 48config DRA752_THERMAL
51 bool "Texas Instruments DRA752 thermal support" 49 bool "Texas Instruments DRA752 thermal support"
52 depends on TI_SOC_THERMAL 50 depends on TI_SOC_THERMAL
53 depends on SOC_DRA7XX 51 depends on SOC_DRA7XX || COMPILE_TEST
54 help 52 help
55 If you say yes here you get thermal support for the Texas Instruments 53 If you say yes here you get thermal support for the Texas Instruments
56 DRA752 SoC family. The current chip supported are: 54 DRA752 SoC family. The current chip supported are:
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index c7c5b3779dac..b213a1222295 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -76,14 +76,14 @@ static inline int ti_thermal_hotspot_temperature(int t, int s, int c)
76 76
77/* thermal zone ops */ 77/* thermal zone ops */
78/* Get temperature callback function for thermal zone */ 78/* Get temperature callback function for thermal zone */
79static inline int __ti_thermal_get_temp(void *devdata, long *temp) 79static inline int __ti_thermal_get_temp(void *devdata, int *temp)
80{ 80{
81 struct thermal_zone_device *pcb_tz = NULL; 81 struct thermal_zone_device *pcb_tz = NULL;
82 struct ti_thermal_data *data = devdata; 82 struct ti_thermal_data *data = devdata;
83 struct ti_bandgap *bgp; 83 struct ti_bandgap *bgp;
84 const struct ti_temp_sensor *s; 84 const struct ti_temp_sensor *s;
85 int ret, tmp, slope, constant; 85 int ret, tmp, slope, constant;
86 unsigned long pcb_temp; 86 int pcb_temp;
87 87
88 if (!data) 88 if (!data)
89 return 0; 89 return 0;
@@ -119,7 +119,7 @@ static inline int __ti_thermal_get_temp(void *devdata, long *temp)
119} 119}
120 120
121static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal, 121static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal,
122 unsigned long *temp) 122 int *temp)
123{ 123{
124 struct ti_thermal_data *data = thermal->devdata; 124 struct ti_thermal_data *data = thermal->devdata;
125 125
@@ -229,7 +229,7 @@ static int ti_thermal_get_trip_type(struct thermal_zone_device *thermal,
229 229
230/* Get trip temperature callback functions for thermal zone */ 230/* Get trip temperature callback functions for thermal zone */
231static int ti_thermal_get_trip_temp(struct thermal_zone_device *thermal, 231static int ti_thermal_get_trip_temp(struct thermal_zone_device *thermal,
232 int trip, unsigned long *temp) 232 int trip, int *temp)
233{ 233{
234 if (!ti_thermal_is_valid_trip(trip)) 234 if (!ti_thermal_is_valid_trip(trip))
235 return -EINVAL; 235 return -EINVAL;
@@ -280,7 +280,7 @@ static int ti_thermal_get_trend(struct thermal_zone_device *thermal,
280 280
281/* Get critical temperature callback functions for thermal zone */ 281/* Get critical temperature callback functions for thermal zone */
282static int ti_thermal_get_crit_temp(struct thermal_zone_device *thermal, 282static int ti_thermal_get_crit_temp(struct thermal_zone_device *thermal,
283 unsigned long *temp) 283 int *temp)
284{ 284{
285 /* shutdown zone */ 285 /* shutdown zone */
286 return ti_thermal_get_trip_temp(thermal, OMAP_TRIP_NUMBER - 1, temp); 286 return ti_thermal_get_trip_temp(thermal, OMAP_TRIP_NUMBER - 1, temp);
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 50d1d2cb091a..7fc919f7da4d 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -164,7 +164,7 @@ err_ret:
164 return err; 164 return err;
165} 165}
166 166
167static int sys_get_curr_temp(struct thermal_zone_device *tzd, unsigned long *temp) 167static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
168{ 168{
169 u32 eax, edx; 169 u32 eax, edx;
170 struct phy_dev_entry *phy_dev_entry; 170 struct phy_dev_entry *phy_dev_entry;
@@ -175,7 +175,7 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd, unsigned long *tem
175 if (eax & 0x80000000) { 175 if (eax & 0x80000000) {
176 *temp = phy_dev_entry->tj_max - 176 *temp = phy_dev_entry->tj_max -
177 ((eax >> 16) & 0x7f) * 1000; 177 ((eax >> 16) & 0x7f) * 1000;
178 pr_debug("sys_get_curr_temp %ld\n", *temp); 178 pr_debug("sys_get_curr_temp %d\n", *temp);
179 return 0; 179 return 0;
180 } 180 }
181 181
@@ -183,7 +183,7 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd, unsigned long *tem
183} 183}
184 184
185static int sys_get_trip_temp(struct thermal_zone_device *tzd, 185static int sys_get_trip_temp(struct thermal_zone_device *tzd,
186 int trip, unsigned long *temp) 186 int trip, int *temp)
187{ 187{
188 u32 eax, edx; 188 u32 eax, edx;
189 struct phy_dev_entry *phy_dev_entry; 189 struct phy_dev_entry *phy_dev_entry;
@@ -214,13 +214,13 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd,
214 *temp = phy_dev_entry->tj_max - thres_reg_value * 1000; 214 *temp = phy_dev_entry->tj_max - thres_reg_value * 1000;
215 else 215 else
216 *temp = 0; 216 *temp = 0;
217 pr_debug("sys_get_trip_temp %ld\n", *temp); 217 pr_debug("sys_get_trip_temp %d\n", *temp);
218 218
219 return 0; 219 return 0;
220} 220}
221 221
222static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, 222static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
223 unsigned long temp) 223 int temp)
224{ 224{
225 u32 l, h; 225 u32 l, h;
226 struct phy_dev_entry *phy_dev_entry; 226 struct phy_dev_entry *phy_dev_entry;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 7d137a43cc86..9eda69e40678 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -61,8 +61,7 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
61enum { 61enum {
62 VHOST_NET_FEATURES = VHOST_FEATURES | 62 VHOST_NET_FEATURES = VHOST_FEATURES |
63 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | 63 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
64 (1ULL << VIRTIO_NET_F_MRG_RXBUF) | 64 (1ULL << VIRTIO_NET_F_MRG_RXBUF)
65 (1ULL << VIRTIO_F_VERSION_1),
66}; 65};
67 66
68enum { 67enum {
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index f114a9dbb48f..e25a23692822 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -166,9 +166,7 @@ enum {
166/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */ 166/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
167enum { 167enum {
168 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | 168 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
169 (1ULL << VIRTIO_SCSI_F_T10_PI) | 169 (1ULL << VIRTIO_SCSI_F_T10_PI)
170 (1ULL << VIRTIO_F_ANY_LAYOUT) |
171 (1ULL << VIRTIO_F_VERSION_1)
172}; 170};
173 171
174#define VHOST_SCSI_MAX_TARGET 256 172#define VHOST_SCSI_MAX_TARGET 256
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index d9c501eaa6c3..f2882ac98726 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -277,10 +277,13 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
277 return -EFAULT; 277 return -EFAULT;
278 return 0; 278 return 0;
279 case VHOST_SET_FEATURES: 279 case VHOST_SET_FEATURES:
280 printk(KERN_ERR "1\n");
280 if (copy_from_user(&features, featurep, sizeof features)) 281 if (copy_from_user(&features, featurep, sizeof features))
281 return -EFAULT; 282 return -EFAULT;
283 printk(KERN_ERR "2\n");
282 if (features & ~VHOST_FEATURES) 284 if (features & ~VHOST_FEATURES)
283 return -EOPNOTSUPP; 285 return -EOPNOTSUPP;
286 printk(KERN_ERR "3\n");
284 return vhost_test_set_features(n, features); 287 return vhost_test_set_features(n, features);
285 case VHOST_RESET_OWNER: 288 case VHOST_RESET_OWNER:
286 return vhost_test_reset_owner(n); 289 return vhost_test_reset_owner(n);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index ce6f6da4b09f..4772862b71a7 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -173,7 +173,9 @@ enum {
173 VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | 173 VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
174 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | 174 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
175 (1ULL << VIRTIO_RING_F_EVENT_IDX) | 175 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
176 (1ULL << VHOST_F_LOG_ALL), 176 (1ULL << VHOST_F_LOG_ALL) |
177 (1ULL << VIRTIO_F_ANY_LAYOUT) |
178 (1ULL << VIRTIO_F_VERSION_1)
177}; 179};
178 180
179static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit) 181static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 55c4b5b0a317..c68edc16aa54 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -188,6 +188,15 @@ config AT91SAM9X_WATCHDOG
188 Watchdog timer embedded into AT91SAM9X and AT91CAP9 chips. This will 188 Watchdog timer embedded into AT91SAM9X and AT91CAP9 chips. This will
189 reboot your system when the timeout is reached. 189 reboot your system when the timeout is reached.
190 190
191config SAMA5D4_WATCHDOG
192 tristate "Atmel SAMA5D4 Watchdog Timer"
193 depends on ARCH_AT91
194 select WATCHDOG_CORE
195 help
196 Atmel SAMA5D4 watchdog timer is embedded into SAMA5D4 chips.
197 Its Watchdog Timer Mode Register can be written more than once.
198 This will reboot your system when the timeout is reached.
199
191config CADENCE_WATCHDOG 200config CADENCE_WATCHDOG
192 tristate "Cadence Watchdog Timer" 201 tristate "Cadence Watchdog Timer"
193 depends on HAS_IOMEM 202 depends on HAS_IOMEM
@@ -558,6 +567,17 @@ config DIGICOLOR_WATCHDOG
558 To compile this driver as a module, choose M here: the 567 To compile this driver as a module, choose M here: the
559 module will be called digicolor_wdt. 568 module will be called digicolor_wdt.
560 569
570config LPC18XX_WATCHDOG
571 tristate "LPC18xx/43xx Watchdog"
572 depends on ARCH_LPC18XX || COMPILE_TEST
573 select WATCHDOG_CORE
574 help
575 Say Y here if to include support for the watchdog timer
576 in NXP LPC SoCs family, which includes LPC18xx/LPC43xx
577 processors.
578 To compile this driver as a module, choose M here: the
579 module will be called lpc18xx_wdt.
580
561# AVR32 Architecture 581# AVR32 Architecture
562 582
563config AT32AP700X_WDT 583config AT32AP700X_WDT
@@ -1334,7 +1354,7 @@ config MPC5200_WDT
1334 1354
1335config 8xxx_WDT 1355config 8xxx_WDT
1336 tristate "MPC8xxx Platform Watchdog Timer" 1356 tristate "MPC8xxx Platform Watchdog Timer"
1337 depends on PPC_8xx || PPC_83xx || PPC_86xx 1357 depends on PPC_8xx || PPC_83xx || PPC_86xx || PPC_MPC512x
1338 select WATCHDOG_CORE 1358 select WATCHDOG_CORE
1339 help 1359 help
1340 This driver is for a SoC level watchdog that exists on some 1360 This driver is for a SoC level watchdog that exists on some
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 59ea9a1b8e76..0c616e3f67bb 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_IXP4XX_WATCHDOG) += ixp4xx_wdt.o
41obj-$(CONFIG_KS8695_WATCHDOG) += ks8695_wdt.o 41obj-$(CONFIG_KS8695_WATCHDOG) += ks8695_wdt.o
42obj-$(CONFIG_S3C2410_WATCHDOG) += s3c2410_wdt.o 42obj-$(CONFIG_S3C2410_WATCHDOG) += s3c2410_wdt.o
43obj-$(CONFIG_SA1100_WATCHDOG) += sa1100_wdt.o 43obj-$(CONFIG_SA1100_WATCHDOG) += sa1100_wdt.o
44obj-$(CONFIG_SAMA5D4_WATCHDOG) += sama5d4_wdt.o
44obj-$(CONFIG_DW_WATCHDOG) += dw_wdt.o 45obj-$(CONFIG_DW_WATCHDOG) += dw_wdt.o
45obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o 46obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o
46obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o 47obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o
@@ -66,6 +67,7 @@ obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o
66obj-$(CONFIG_MESON_WATCHDOG) += meson_wdt.o 67obj-$(CONFIG_MESON_WATCHDOG) += meson_wdt.o
67obj-$(CONFIG_MEDIATEK_WATCHDOG) += mtk_wdt.o 68obj-$(CONFIG_MEDIATEK_WATCHDOG) += mtk_wdt.o
68obj-$(CONFIG_DIGICOLOR_WATCHDOG) += digicolor_wdt.o 69obj-$(CONFIG_DIGICOLOR_WATCHDOG) += digicolor_wdt.o
70obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o
69 71
70# AVR32 Architecture 72# AVR32 Architecture
71obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o 73obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index 9ba1153465ae..e12a797cb820 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -244,7 +244,7 @@ static int at91wdt_probe(struct platform_device *pdev)
244 } 244 }
245 245
246 regmap_st = syscon_node_to_regmap(parent->of_node); 246 regmap_st = syscon_node_to_regmap(parent->of_node);
247 if (!regmap_st) 247 if (IS_ERR(regmap_st))
248 return -ENODEV; 248 return -ENODEV;
249 249
250 res = misc_register(&at91wdt_miscdev); 250 res = misc_register(&at91wdt_miscdev);
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index e4698f7c5f93..7e6acaf3ece4 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -17,6 +17,7 @@
17 17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 19
20#include <linux/clk.h>
20#include <linux/errno.h> 21#include <linux/errno.h>
21#include <linux/init.h> 22#include <linux/init.h>
22#include <linux/interrupt.h> 23#include <linux/interrupt.h>
@@ -90,6 +91,7 @@ struct at91wdt {
90 unsigned long heartbeat; /* WDT heartbeat in jiffies */ 91 unsigned long heartbeat; /* WDT heartbeat in jiffies */
91 bool nowayout; 92 bool nowayout;
92 unsigned int irq; 93 unsigned int irq;
94 struct clk *sclk;
93}; 95};
94 96
95/* ......................................................................... */ 97/* ......................................................................... */
@@ -352,15 +354,25 @@ static int __init at91wdt_probe(struct platform_device *pdev)
352 if (IS_ERR(wdt->base)) 354 if (IS_ERR(wdt->base))
353 return PTR_ERR(wdt->base); 355 return PTR_ERR(wdt->base);
354 356
357 wdt->sclk = devm_clk_get(&pdev->dev, NULL);
358 if (IS_ERR(wdt->sclk))
359 return PTR_ERR(wdt->sclk);
360
361 err = clk_prepare_enable(wdt->sclk);
362 if (err) {
363 dev_err(&pdev->dev, "Could not enable slow clock\n");
364 return err;
365 }
366
355 if (pdev->dev.of_node) { 367 if (pdev->dev.of_node) {
356 err = of_at91wdt_init(pdev->dev.of_node, wdt); 368 err = of_at91wdt_init(pdev->dev.of_node, wdt);
357 if (err) 369 if (err)
358 return err; 370 goto err_clk;
359 } 371 }
360 372
361 err = at91_wdt_init(pdev, wdt); 373 err = at91_wdt_init(pdev, wdt);
362 if (err) 374 if (err)
363 return err; 375 goto err_clk;
364 376
365 platform_set_drvdata(pdev, wdt); 377 platform_set_drvdata(pdev, wdt);
366 378
@@ -368,6 +380,11 @@ static int __init at91wdt_probe(struct platform_device *pdev)
368 wdt->wdd.timeout, wdt->nowayout); 380 wdt->wdd.timeout, wdt->nowayout);
369 381
370 return 0; 382 return 0;
383
384err_clk:
385 clk_disable_unprepare(wdt->sclk);
386
387 return err;
371} 388}
372 389
373static int __exit at91wdt_remove(struct platform_device *pdev) 390static int __exit at91wdt_remove(struct platform_device *pdev)
@@ -377,6 +394,7 @@ static int __exit at91wdt_remove(struct platform_device *pdev)
377 394
378 pr_warn("I quit now, hardware will probably reboot!\n"); 395 pr_warn("I quit now, hardware will probably reboot!\n");
379 del_timer(&wdt->timer); 396 del_timer(&wdt->timer);
397 clk_disable_unprepare(wdt->sclk);
380 398
381 return 0; 399 return 0;
382} 400}
diff --git a/drivers/watchdog/at91sam9_wdt.h b/drivers/watchdog/at91sam9_wdt.h
index c6fbb2e6c41b..b79a83b467ce 100644
--- a/drivers/watchdog/at91sam9_wdt.h
+++ b/drivers/watchdog/at91sam9_wdt.h
@@ -22,11 +22,13 @@
22 22
23#define AT91_WDT_MR 0x04 /* Watchdog Mode Register */ 23#define AT91_WDT_MR 0x04 /* Watchdog Mode Register */
24#define AT91_WDT_WDV (0xfff << 0) /* Counter Value */ 24#define AT91_WDT_WDV (0xfff << 0) /* Counter Value */
25#define AT91_WDT_SET_WDV(x) ((x) & AT91_WDT_WDV)
25#define AT91_WDT_WDFIEN (1 << 12) /* Fault Interrupt Enable */ 26#define AT91_WDT_WDFIEN (1 << 12) /* Fault Interrupt Enable */
26#define AT91_WDT_WDRSTEN (1 << 13) /* Reset Processor */ 27#define AT91_WDT_WDRSTEN (1 << 13) /* Reset Processor */
27#define AT91_WDT_WDRPROC (1 << 14) /* Timer Restart */ 28#define AT91_WDT_WDRPROC (1 << 14) /* Timer Restart */
28#define AT91_WDT_WDDIS (1 << 15) /* Watchdog Disable */ 29#define AT91_WDT_WDDIS (1 << 15) /* Watchdog Disable */
29#define AT91_WDT_WDD (0xfff << 16) /* Delta Value */ 30#define AT91_WDT_WDD (0xfff << 16) /* Delta Value */
31#define AT91_WDT_SET_WDD(x) (((x) << 16) & AT91_WDT_WDD)
30#define AT91_WDT_WDDBGHLT (1 << 28) /* Debug Halt */ 32#define AT91_WDT_WDDBGHLT (1 << 28) /* Debug Halt */
31#define AT91_WDT_WDIDLEHLT (1 << 29) /* Idle Halt */ 33#define AT91_WDT_WDIDLEHLT (1 << 29) /* Idle Halt */
32 34
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index 7116968dee12..66c3e656a616 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -182,6 +182,7 @@ static int bcm2835_wdt_probe(struct platform_device *pdev)
182 watchdog_set_drvdata(&bcm2835_wdt_wdd, wdt); 182 watchdog_set_drvdata(&bcm2835_wdt_wdd, wdt);
183 watchdog_init_timeout(&bcm2835_wdt_wdd, heartbeat, dev); 183 watchdog_init_timeout(&bcm2835_wdt_wdd, heartbeat, dev);
184 watchdog_set_nowayout(&bcm2835_wdt_wdd, nowayout); 184 watchdog_set_nowayout(&bcm2835_wdt_wdd, nowayout);
185 bcm2835_wdt_wdd.parent = &pdev->dev;
185 err = watchdog_register_device(&bcm2835_wdt_wdd); 186 err = watchdog_register_device(&bcm2835_wdt_wdd);
186 if (err) { 187 if (err) {
187 dev_err(dev, "Failed to register watchdog device"); 188 dev_err(dev, "Failed to register watchdog device");
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index b28a072abf78..4064a43f1360 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -209,6 +209,7 @@ static int bcm47xx_wdt_probe(struct platform_device *pdev)
209 209
210 wdt->wdd.info = &bcm47xx_wdt_info; 210 wdt->wdd.info = &bcm47xx_wdt_info;
211 wdt->wdd.timeout = WDT_DEFAULT_TIME; 211 wdt->wdd.timeout = WDT_DEFAULT_TIME;
212 wdt->wdd.parent = &pdev->dev;
212 ret = wdt->wdd.ops->set_timeout(&wdt->wdd, timeout); 213 ret = wdt->wdd.ops->set_timeout(&wdt->wdd, timeout);
213 if (ret) 214 if (ret)
214 goto err_timer; 215 goto err_timer;
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
index 22d8ae65772a..e0c98423f2c9 100644
--- a/drivers/watchdog/bcm_kona_wdt.c
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -319,6 +319,7 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
319 spin_lock_init(&wdt->lock); 319 spin_lock_init(&wdt->lock);
320 platform_set_drvdata(pdev, wdt); 320 platform_set_drvdata(pdev, wdt);
321 watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt); 321 watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
322 bcm_kona_wdt_wdd.parent = &pdev->dev;
322 323
323 ret = bcm_kona_wdt_set_timeout_reg(&bcm_kona_wdt_wdd, 0); 324 ret = bcm_kona_wdt_set_timeout_reg(&bcm_kona_wdt_wdd, 0);
324 if (ret) { 325 if (ret) {
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index e96b09b135c8..04da4b66c75e 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -186,8 +186,6 @@ static int booke_wdt_stop(struct watchdog_device *wdog)
186static int booke_wdt_set_timeout(struct watchdog_device *wdt_dev, 186static int booke_wdt_set_timeout(struct watchdog_device *wdt_dev,
187 unsigned int timeout) 187 unsigned int timeout)
188{ 188{
189 if (timeout > MAX_WDT_TIMEOUT)
190 return -EINVAL;
191 wdt_dev->timeout = timeout; 189 wdt_dev->timeout = timeout;
192 booke_wdt_set(wdt_dev); 190 booke_wdt_set(wdt_dev);
193 191
@@ -211,7 +209,6 @@ static struct watchdog_device booke_wdt_dev = {
211 .info = &booke_wdt_info, 209 .info = &booke_wdt_info,
212 .ops = &booke_wdt_ops, 210 .ops = &booke_wdt_ops,
213 .min_timeout = 1, 211 .min_timeout = 1,
214 .max_timeout = 0xFFFF
215}; 212};
216 213
217static void __exit booke_wdt_exit(void) 214static void __exit booke_wdt_exit(void)
@@ -229,6 +226,7 @@ static int __init booke_wdt_init(void)
229 booke_wdt_set_timeout(&booke_wdt_dev, 226 booke_wdt_set_timeout(&booke_wdt_dev,
230 period_to_sec(booke_wdt_period)); 227 period_to_sec(booke_wdt_period));
231 watchdog_set_nowayout(&booke_wdt_dev, nowayout); 228 watchdog_set_nowayout(&booke_wdt_dev, nowayout);
229 booke_wdt_dev.max_timeout = MAX_WDT_TIMEOUT;
232 if (booke_wdt_enabled) 230 if (booke_wdt_enabled)
233 booke_wdt_start(&booke_wdt_dev); 231 booke_wdt_start(&booke_wdt_dev);
234 232
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index ce12f437f195..a099b77fc0b9 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -358,6 +358,7 @@ static int __init coh901327_probe(struct platform_device *pdev)
358 if (ret < 0) 358 if (ret < 0)
359 coh901327_wdt.timeout = 60; 359 coh901327_wdt.timeout = 60;
360 360
361 coh901327_wdt.parent = &pdev->dev;
361 ret = watchdog_register_device(&coh901327_wdt); 362 ret = watchdog_register_device(&coh901327_wdt);
362 if (ret == 0) 363 if (ret == 0)
363 dev_info(&pdev->dev, 364 dev_info(&pdev->dev,
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
index 2e9589652e1e..67e67977bd29 100644
--- a/drivers/watchdog/da9052_wdt.c
+++ b/drivers/watchdog/da9052_wdt.c
@@ -195,6 +195,7 @@ static int da9052_wdt_probe(struct platform_device *pdev)
195 da9052_wdt->timeout = DA9052_DEF_TIMEOUT; 195 da9052_wdt->timeout = DA9052_DEF_TIMEOUT;
196 da9052_wdt->info = &da9052_wdt_info; 196 da9052_wdt->info = &da9052_wdt_info;
197 da9052_wdt->ops = &da9052_wdt_ops; 197 da9052_wdt->ops = &da9052_wdt_ops;
198 da9052_wdt->parent = &pdev->dev;
198 watchdog_set_drvdata(da9052_wdt, driver_data); 199 watchdog_set_drvdata(da9052_wdt, driver_data);
199 200
200 kref_init(&driver_data->kref); 201 kref_init(&driver_data->kref);
diff --git a/drivers/watchdog/da9055_wdt.c b/drivers/watchdog/da9055_wdt.c
index 495089d8dbfe..04d1430d93d2 100644
--- a/drivers/watchdog/da9055_wdt.c
+++ b/drivers/watchdog/da9055_wdt.c
@@ -161,6 +161,7 @@ static int da9055_wdt_probe(struct platform_device *pdev)
161 da9055_wdt->timeout = DA9055_DEF_TIMEOUT; 161 da9055_wdt->timeout = DA9055_DEF_TIMEOUT;
162 da9055_wdt->info = &da9055_wdt_info; 162 da9055_wdt->info = &da9055_wdt_info;
163 da9055_wdt->ops = &da9055_wdt_ops; 163 da9055_wdt->ops = &da9055_wdt_ops;
164 da9055_wdt->parent = &pdev->dev;
164 watchdog_set_nowayout(da9055_wdt, nowayout); 165 watchdog_set_nowayout(da9055_wdt, nowayout);
165 watchdog_set_drvdata(da9055_wdt, driver_data); 166 watchdog_set_drvdata(da9055_wdt, driver_data);
166 167
diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
index b3a870ce85be..7386111220d5 100644
--- a/drivers/watchdog/da9062_wdt.c
+++ b/drivers/watchdog/da9062_wdt.c
@@ -210,6 +210,7 @@ static int da9062_wdt_probe(struct platform_device *pdev)
210 wdt->wdtdev.max_timeout = DA9062_WDT_MAX_TIMEOUT; 210 wdt->wdtdev.max_timeout = DA9062_WDT_MAX_TIMEOUT;
211 wdt->wdtdev.timeout = DA9062_WDG_DEFAULT_TIMEOUT; 211 wdt->wdtdev.timeout = DA9062_WDG_DEFAULT_TIMEOUT;
212 wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS; 212 wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
213 wdt->wdtdev.parent = &pdev->dev;
213 214
214 watchdog_set_drvdata(&wdt->wdtdev, wdt); 215 watchdog_set_drvdata(&wdt->wdtdev, wdt);
215 dev_set_drvdata(&pdev->dev, wdt); 216 dev_set_drvdata(&pdev->dev, wdt);
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
index e2fe2ebdebd4..6bf130bd863d 100644
--- a/drivers/watchdog/da9063_wdt.c
+++ b/drivers/watchdog/da9063_wdt.c
@@ -175,6 +175,7 @@ static int da9063_wdt_probe(struct platform_device *pdev)
175 wdt->wdtdev.min_timeout = DA9063_WDT_MIN_TIMEOUT; 175 wdt->wdtdev.min_timeout = DA9063_WDT_MIN_TIMEOUT;
176 wdt->wdtdev.max_timeout = DA9063_WDT_MAX_TIMEOUT; 176 wdt->wdtdev.max_timeout = DA9063_WDT_MAX_TIMEOUT;
177 wdt->wdtdev.timeout = DA9063_WDG_TIMEOUT; 177 wdt->wdtdev.timeout = DA9063_WDG_TIMEOUT;
178 wdt->wdtdev.parent = &pdev->dev;
178 179
179 wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS; 180 wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
180 181
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index cfdf8a408aea..17454ca653f4 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -179,6 +179,7 @@ static int davinci_wdt_probe(struct platform_device *pdev)
179 wdd->min_timeout = 1; 179 wdd->min_timeout = 1;
180 wdd->max_timeout = MAX_HEARTBEAT; 180 wdd->max_timeout = MAX_HEARTBEAT;
181 wdd->timeout = DEFAULT_HEARTBEAT; 181 wdd->timeout = DEFAULT_HEARTBEAT;
182 wdd->parent = &pdev->dev;
182 183
183 watchdog_init_timeout(wdd, heartbeat, dev); 184 watchdog_init_timeout(wdd, heartbeat, dev);
184 185
diff --git a/drivers/watchdog/digicolor_wdt.c b/drivers/watchdog/digicolor_wdt.c
index 31d8e4936611..50abe1bf62a5 100644
--- a/drivers/watchdog/digicolor_wdt.c
+++ b/drivers/watchdog/digicolor_wdt.c
@@ -143,6 +143,7 @@ static int dc_wdt_probe(struct platform_device *pdev)
143 } 143 }
144 dc_wdt_wdd.max_timeout = U32_MAX / clk_get_rate(wdt->clk); 144 dc_wdt_wdd.max_timeout = U32_MAX / clk_get_rate(wdt->clk);
145 dc_wdt_wdd.timeout = dc_wdt_wdd.max_timeout; 145 dc_wdt_wdd.timeout = dc_wdt_wdd.max_timeout;
146 dc_wdt_wdd.parent = &pdev->dev;
146 147
147 spin_lock_init(&wdt->lock); 148 spin_lock_init(&wdt->lock);
148 149
diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c
index 7a2cc7191c58..0a4d7cc05d54 100644
--- a/drivers/watchdog/ep93xx_wdt.c
+++ b/drivers/watchdog/ep93xx_wdt.c
@@ -132,6 +132,7 @@ static int ep93xx_wdt_probe(struct platform_device *pdev)
132 val = readl(mmio_base + EP93XX_WATCHDOG); 132 val = readl(mmio_base + EP93XX_WATCHDOG);
133 ep93xx_wdt_wdd.bootstatus = (val & 0x01) ? WDIOF_CARDRESET : 0; 133 ep93xx_wdt_wdd.bootstatus = (val & 0x01) ? WDIOF_CARDRESET : 0;
134 ep93xx_wdt_wdd.timeout = timeout; 134 ep93xx_wdt_wdd.timeout = timeout;
135 ep93xx_wdt_wdd.parent = &pdev->dev;
135 136
136 watchdog_set_nowayout(&ep93xx_wdt_wdd, nowayout); 137 watchdog_set_nowayout(&ep93xx_wdt_wdd, nowayout);
137 138
diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c
index 1687cc2d7122..90d59d3f38a3 100644
--- a/drivers/watchdog/gpio_wdt.c
+++ b/drivers/watchdog/gpio_wdt.c
@@ -50,12 +50,41 @@ static void gpio_wdt_disable(struct gpio_wdt_priv *priv)
50 gpio_direction_input(priv->gpio); 50 gpio_direction_input(priv->gpio);
51} 51}
52 52
53static void gpio_wdt_hwping(unsigned long data)
54{
55 struct watchdog_device *wdd = (struct watchdog_device *)data;
56 struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
57
58 if (priv->armed && time_after(jiffies, priv->last_jiffies +
59 msecs_to_jiffies(wdd->timeout * 1000))) {
60 dev_crit(wdd->dev, "Timer expired. System will reboot soon!\n");
61 return;
62 }
63
64 /* Restart timer */
65 mod_timer(&priv->timer, jiffies + priv->hw_margin);
66
67 switch (priv->hw_algo) {
68 case HW_ALGO_TOGGLE:
69 /* Toggle output pin */
70 priv->state = !priv->state;
71 gpio_set_value_cansleep(priv->gpio, priv->state);
72 break;
73 case HW_ALGO_LEVEL:
74 /* Pulse */
75 gpio_set_value_cansleep(priv->gpio, !priv->active_low);
76 udelay(1);
77 gpio_set_value_cansleep(priv->gpio, priv->active_low);
78 break;
79 }
80}
81
53static void gpio_wdt_start_impl(struct gpio_wdt_priv *priv) 82static void gpio_wdt_start_impl(struct gpio_wdt_priv *priv)
54{ 83{
55 priv->state = priv->active_low; 84 priv->state = priv->active_low;
56 gpio_direction_output(priv->gpio, priv->state); 85 gpio_direction_output(priv->gpio, priv->state);
57 priv->last_jiffies = jiffies; 86 priv->last_jiffies = jiffies;
58 mod_timer(&priv->timer, priv->last_jiffies + priv->hw_margin); 87 gpio_wdt_hwping((unsigned long)&priv->wdd);
59} 88}
60 89
61static int gpio_wdt_start(struct watchdog_device *wdd) 90static int gpio_wdt_start(struct watchdog_device *wdd)
@@ -97,35 +126,6 @@ static int gpio_wdt_set_timeout(struct watchdog_device *wdd, unsigned int t)
97 return gpio_wdt_ping(wdd); 126 return gpio_wdt_ping(wdd);
98} 127}
99 128
100static void gpio_wdt_hwping(unsigned long data)
101{
102 struct watchdog_device *wdd = (struct watchdog_device *)data;
103 struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
104
105 if (priv->armed && time_after(jiffies, priv->last_jiffies +
106 msecs_to_jiffies(wdd->timeout * 1000))) {
107 dev_crit(wdd->dev, "Timer expired. System will reboot soon!\n");
108 return;
109 }
110
111 /* Restart timer */
112 mod_timer(&priv->timer, jiffies + priv->hw_margin);
113
114 switch (priv->hw_algo) {
115 case HW_ALGO_TOGGLE:
116 /* Toggle output pin */
117 priv->state = !priv->state;
118 gpio_set_value_cansleep(priv->gpio, priv->state);
119 break;
120 case HW_ALGO_LEVEL:
121 /* Pulse */
122 gpio_set_value_cansleep(priv->gpio, !priv->active_low);
123 udelay(1);
124 gpio_set_value_cansleep(priv->gpio, priv->active_low);
125 break;
126 }
127}
128
129static int gpio_wdt_notify_sys(struct notifier_block *nb, unsigned long code, 129static int gpio_wdt_notify_sys(struct notifier_block *nb, unsigned long code,
130 void *unused) 130 void *unused)
131{ 131{
@@ -182,10 +182,10 @@ static int gpio_wdt_probe(struct platform_device *pdev)
182 ret = of_property_read_string(pdev->dev.of_node, "hw_algo", &algo); 182 ret = of_property_read_string(pdev->dev.of_node, "hw_algo", &algo);
183 if (ret) 183 if (ret)
184 return ret; 184 return ret;
185 if (!strncmp(algo, "toggle", 6)) { 185 if (!strcmp(algo, "toggle")) {
186 priv->hw_algo = HW_ALGO_TOGGLE; 186 priv->hw_algo = HW_ALGO_TOGGLE;
187 f = GPIOF_IN; 187 f = GPIOF_IN;
188 } else if (!strncmp(algo, "level", 5)) { 188 } else if (!strcmp(algo, "level")) {
189 priv->hw_algo = HW_ALGO_LEVEL; 189 priv->hw_algo = HW_ALGO_LEVEL;
190 f = priv->active_low ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW; 190 f = priv->active_low ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
191 } else { 191 } else {
@@ -217,6 +217,7 @@ static int gpio_wdt_probe(struct platform_device *pdev)
217 priv->wdd.ops = &gpio_wdt_ops; 217 priv->wdd.ops = &gpio_wdt_ops;
218 priv->wdd.min_timeout = SOFT_TIMEOUT_MIN; 218 priv->wdd.min_timeout = SOFT_TIMEOUT_MIN;
219 priv->wdd.max_timeout = SOFT_TIMEOUT_MAX; 219 priv->wdd.max_timeout = SOFT_TIMEOUT_MAX;
220 priv->wdd.parent = &pdev->dev;
220 221
221 if (watchdog_init_timeout(&priv->wdd, 0, &pdev->dev) < 0) 222 if (watchdog_init_timeout(&priv->wdd, 0, &pdev->dev) < 0)
222 priv->wdd.timeout = SOFT_TIMEOUT_DEF; 223 priv->wdd.timeout = SOFT_TIMEOUT_DEF;
diff --git a/drivers/watchdog/ie6xx_wdt.c b/drivers/watchdog/ie6xx_wdt.c
index 9bc39ae51624..78c2541f5d52 100644
--- a/drivers/watchdog/ie6xx_wdt.c
+++ b/drivers/watchdog/ie6xx_wdt.c
@@ -267,6 +267,7 @@ static int ie6xx_wdt_probe(struct platform_device *pdev)
267 267
268 ie6xx_wdt_dev.timeout = timeout; 268 ie6xx_wdt_dev.timeout = timeout;
269 watchdog_set_nowayout(&ie6xx_wdt_dev, nowayout); 269 watchdog_set_nowayout(&ie6xx_wdt_dev, nowayout);
270 ie6xx_wdt_dev.parent = &pdev->dev;
270 271
271 spin_lock_init(&ie6xx_wdt_data.unlock_sequence); 272 spin_lock_init(&ie6xx_wdt_data.unlock_sequence);
272 273
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
index 0f73621827ab..15ab07230960 100644
--- a/drivers/watchdog/imgpdc_wdt.c
+++ b/drivers/watchdog/imgpdc_wdt.c
@@ -316,6 +316,7 @@ static int pdc_wdt_remove(struct platform_device *pdev)
316{ 316{
317 struct pdc_wdt_dev *pdc_wdt = platform_get_drvdata(pdev); 317 struct pdc_wdt_dev *pdc_wdt = platform_get_drvdata(pdev);
318 318
319 unregister_restart_handler(&pdc_wdt->restart_handler);
319 pdc_wdt_stop(&pdc_wdt->wdt_dev); 320 pdc_wdt_stop(&pdc_wdt->wdt_dev);
320 watchdog_unregister_device(&pdc_wdt->wdt_dev); 321 watchdog_unregister_device(&pdc_wdt->wdt_dev);
321 clk_disable_unprepare(pdc_wdt->wdt_clk); 322 clk_disable_unprepare(pdc_wdt->wdt_clk);
diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
index 84f6701c391f..0a436b5d1e84 100644
--- a/drivers/watchdog/intel-mid_wdt.c
+++ b/drivers/watchdog/intel-mid_wdt.c
@@ -137,6 +137,7 @@ static int mid_wdt_probe(struct platform_device *pdev)
137 wdt_dev->min_timeout = MID_WDT_TIMEOUT_MIN; 137 wdt_dev->min_timeout = MID_WDT_TIMEOUT_MIN;
138 wdt_dev->max_timeout = MID_WDT_TIMEOUT_MAX; 138 wdt_dev->max_timeout = MID_WDT_TIMEOUT_MAX;
139 wdt_dev->timeout = MID_WDT_DEFAULT_TIMEOUT; 139 wdt_dev->timeout = MID_WDT_DEFAULT_TIMEOUT;
140 wdt_dev->parent = &pdev->dev;
140 141
141 watchdog_set_drvdata(wdt_dev, &pdev->dev); 142 watchdog_set_drvdata(wdt_dev, &pdev->dev);
142 platform_set_drvdata(pdev, wdt_dev); 143 platform_set_drvdata(pdev, wdt_dev);
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index 4c2cc09c0c57..6a7d5c365438 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -174,6 +174,7 @@ static int jz4740_wdt_probe(struct platform_device *pdev)
174 jz4740_wdt->timeout = heartbeat; 174 jz4740_wdt->timeout = heartbeat;
175 jz4740_wdt->min_timeout = 1; 175 jz4740_wdt->min_timeout = 1;
176 jz4740_wdt->max_timeout = MAX_HEARTBEAT; 176 jz4740_wdt->max_timeout = MAX_HEARTBEAT;
177 jz4740_wdt->parent = &pdev->dev;
177 watchdog_set_nowayout(jz4740_wdt, nowayout); 178 watchdog_set_nowayout(jz4740_wdt, nowayout);
178 watchdog_set_drvdata(jz4740_wdt, drvdata); 179 watchdog_set_drvdata(jz4740_wdt, drvdata);
179 180
diff --git a/drivers/watchdog/lpc18xx_wdt.c b/drivers/watchdog/lpc18xx_wdt.c
new file mode 100644
index 000000000000..ab7b8b185d99
--- /dev/null
+++ b/drivers/watchdog/lpc18xx_wdt.c
@@ -0,0 +1,340 @@
1/*
2 * NXP LPC18xx Watchdog Timer (WDT)
3 *
4 * Copyright (c) 2015 Ariel D'Alessandro <ariel@vanguardiasur.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * Notes
11 * -----
12 * The Watchdog consists of a fixed divide-by-4 clock pre-scaler and a 24-bit
13 * counter which decrements on every clock cycle.
14 */
15
16#include <linux/clk.h>
17#include <linux/io.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/platform_device.h>
21#include <linux/reboot.h>
22#include <linux/watchdog.h>
23
24/* Registers */
25#define LPC18XX_WDT_MOD 0x00
26#define LPC18XX_WDT_MOD_WDEN BIT(0)
27#define LPC18XX_WDT_MOD_WDRESET BIT(1)
28
29#define LPC18XX_WDT_TC 0x04
30#define LPC18XX_WDT_TC_MIN 0xff
31#define LPC18XX_WDT_TC_MAX 0xffffff
32
33#define LPC18XX_WDT_FEED 0x08
34#define LPC18XX_WDT_FEED_MAGIC1 0xaa
35#define LPC18XX_WDT_FEED_MAGIC2 0x55
36
37#define LPC18XX_WDT_TV 0x0c
38
39/* Clock pre-scaler */
40#define LPC18XX_WDT_CLK_DIV 4
41
42/* Timeout values in seconds */
43#define LPC18XX_WDT_DEF_TIMEOUT 30U
44
45static int heartbeat;
46module_param(heartbeat, int, 0);
47MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds (default="
48 __MODULE_STRING(LPC18XX_WDT_DEF_TIMEOUT) ")");
49
50static bool nowayout = WATCHDOG_NOWAYOUT;
51module_param(nowayout, bool, 0);
52MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
53 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
54
55struct lpc18xx_wdt_dev {
56 struct watchdog_device wdt_dev;
57 struct clk *reg_clk;
58 struct clk *wdt_clk;
59 unsigned long clk_rate;
60 void __iomem *base;
61 struct timer_list timer;
62 struct notifier_block restart_handler;
63 spinlock_t lock;
64};
65
66static int lpc18xx_wdt_feed(struct watchdog_device *wdt_dev)
67{
68 struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
69 unsigned long flags;
70
71 /*
72 * An abort condition will occur if an interrupt happens during the feed
73 * sequence.
74 */
75 spin_lock_irqsave(&lpc18xx_wdt->lock, flags);
76 writel(LPC18XX_WDT_FEED_MAGIC1, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
77 writel(LPC18XX_WDT_FEED_MAGIC2, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
78 spin_unlock_irqrestore(&lpc18xx_wdt->lock, flags);
79
80 return 0;
81}
82
83static void lpc18xx_wdt_timer_feed(unsigned long data)
84{
85 struct watchdog_device *wdt_dev = (struct watchdog_device *)data;
86 struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
87
88 lpc18xx_wdt_feed(wdt_dev);
89
90 /* Use safe value (1/2 of real timeout) */
91 mod_timer(&lpc18xx_wdt->timer, jiffies +
92 msecs_to_jiffies((wdt_dev->timeout * MSEC_PER_SEC) / 2));
93}
94
95/*
96 * Since LPC18xx Watchdog cannot be disabled in hardware, we must keep feeding
97 * it with a timer until userspace watchdog software takes over.
98 */
99static int lpc18xx_wdt_stop(struct watchdog_device *wdt_dev)
100{
101 lpc18xx_wdt_timer_feed((unsigned long)wdt_dev);
102
103 return 0;
104}
105
106static void __lpc18xx_wdt_set_timeout(struct lpc18xx_wdt_dev *lpc18xx_wdt)
107{
108 unsigned int val;
109
110 val = DIV_ROUND_UP(lpc18xx_wdt->wdt_dev.timeout * lpc18xx_wdt->clk_rate,
111 LPC18XX_WDT_CLK_DIV);
112 writel(val, lpc18xx_wdt->base + LPC18XX_WDT_TC);
113}
114
115static int lpc18xx_wdt_set_timeout(struct watchdog_device *wdt_dev,
116 unsigned int new_timeout)
117{
118 struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
119
120 lpc18xx_wdt->wdt_dev.timeout = new_timeout;
121 __lpc18xx_wdt_set_timeout(lpc18xx_wdt);
122
123 return 0;
124}
125
126static unsigned int lpc18xx_wdt_get_timeleft(struct watchdog_device *wdt_dev)
127{
128 struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
129 unsigned int val;
130
131 val = readl(lpc18xx_wdt->base + LPC18XX_WDT_TV);
132 return (val * LPC18XX_WDT_CLK_DIV) / lpc18xx_wdt->clk_rate;
133}
134
135static int lpc18xx_wdt_start(struct watchdog_device *wdt_dev)
136{
137 struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
138 unsigned int val;
139
140 if (timer_pending(&lpc18xx_wdt->timer))
141 del_timer(&lpc18xx_wdt->timer);
142
143 val = readl(lpc18xx_wdt->base + LPC18XX_WDT_MOD);
144 val |= LPC18XX_WDT_MOD_WDEN;
145 val |= LPC18XX_WDT_MOD_WDRESET;
146 writel(val, lpc18xx_wdt->base + LPC18XX_WDT_MOD);
147
148 /*
149 * Setting the WDEN bit in the WDMOD register is not sufficient to
150 * enable the Watchdog. A valid feed sequence must be completed after
151 * setting WDEN before the Watchdog is capable of generating a reset.
152 */
153 lpc18xx_wdt_feed(wdt_dev);
154
155 return 0;
156}
157
158static struct watchdog_info lpc18xx_wdt_info = {
159 .identity = "NXP LPC18xx Watchdog",
160 .options = WDIOF_SETTIMEOUT |
161 WDIOF_KEEPALIVEPING |
162 WDIOF_MAGICCLOSE,
163};
164
165static const struct watchdog_ops lpc18xx_wdt_ops = {
166 .owner = THIS_MODULE,
167 .start = lpc18xx_wdt_start,
168 .stop = lpc18xx_wdt_stop,
169 .ping = lpc18xx_wdt_feed,
170 .set_timeout = lpc18xx_wdt_set_timeout,
171 .get_timeleft = lpc18xx_wdt_get_timeleft,
172};
173
174static int lpc18xx_wdt_restart(struct notifier_block *this, unsigned long mode,
175 void *cmd)
176{
177 struct lpc18xx_wdt_dev *lpc18xx_wdt = container_of(this,
178 struct lpc18xx_wdt_dev, restart_handler);
179 unsigned long flags;
180 int val;
181
182 /*
183 * Incorrect feed sequence causes immediate watchdog reset if enabled.
184 */
185 spin_lock_irqsave(&lpc18xx_wdt->lock, flags);
186
187 val = readl(lpc18xx_wdt->base + LPC18XX_WDT_MOD);
188 val |= LPC18XX_WDT_MOD_WDEN;
189 val |= LPC18XX_WDT_MOD_WDRESET;
190 writel(val, lpc18xx_wdt->base + LPC18XX_WDT_MOD);
191
192 writel(LPC18XX_WDT_FEED_MAGIC1, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
193 writel(LPC18XX_WDT_FEED_MAGIC2, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
194
195 writel(LPC18XX_WDT_FEED_MAGIC1, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
196 writel(LPC18XX_WDT_FEED_MAGIC1, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
197
198 spin_unlock_irqrestore(&lpc18xx_wdt->lock, flags);
199
200 return NOTIFY_OK;
201}
202
203static int lpc18xx_wdt_probe(struct platform_device *pdev)
204{
205 struct lpc18xx_wdt_dev *lpc18xx_wdt;
206 struct device *dev = &pdev->dev;
207 struct resource *res;
208 int ret;
209
210 lpc18xx_wdt = devm_kzalloc(dev, sizeof(*lpc18xx_wdt), GFP_KERNEL);
211 if (!lpc18xx_wdt)
212 return -ENOMEM;
213
214 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
215 lpc18xx_wdt->base = devm_ioremap_resource(dev, res);
216 if (IS_ERR(lpc18xx_wdt->base))
217 return PTR_ERR(lpc18xx_wdt->base);
218
219 lpc18xx_wdt->reg_clk = devm_clk_get(dev, "reg");
220 if (IS_ERR(lpc18xx_wdt->reg_clk)) {
221 dev_err(dev, "failed to get the reg clock\n");
222 return PTR_ERR(lpc18xx_wdt->reg_clk);
223 }
224
225 lpc18xx_wdt->wdt_clk = devm_clk_get(dev, "wdtclk");
226 if (IS_ERR(lpc18xx_wdt->wdt_clk)) {
227 dev_err(dev, "failed to get the wdt clock\n");
228 return PTR_ERR(lpc18xx_wdt->wdt_clk);
229 }
230
231 ret = clk_prepare_enable(lpc18xx_wdt->reg_clk);
232 if (ret) {
233 dev_err(dev, "could not prepare or enable sys clock\n");
234 return ret;
235 }
236
237 ret = clk_prepare_enable(lpc18xx_wdt->wdt_clk);
238 if (ret) {
239 dev_err(dev, "could not prepare or enable wdt clock\n");
240 goto disable_reg_clk;
241 }
242
243 /* We use the clock rate to calculate timeouts */
244 lpc18xx_wdt->clk_rate = clk_get_rate(lpc18xx_wdt->wdt_clk);
245 if (lpc18xx_wdt->clk_rate == 0) {
246 dev_err(dev, "failed to get clock rate\n");
247 ret = -EINVAL;
248 goto disable_wdt_clk;
249 }
250
251 lpc18xx_wdt->wdt_dev.info = &lpc18xx_wdt_info;
252 lpc18xx_wdt->wdt_dev.ops = &lpc18xx_wdt_ops;
253
254 lpc18xx_wdt->wdt_dev.min_timeout = DIV_ROUND_UP(LPC18XX_WDT_TC_MIN *
255 LPC18XX_WDT_CLK_DIV, lpc18xx_wdt->clk_rate);
256
257 lpc18xx_wdt->wdt_dev.max_timeout = (LPC18XX_WDT_TC_MAX *
258 LPC18XX_WDT_CLK_DIV) / lpc18xx_wdt->clk_rate;
259
260 lpc18xx_wdt->wdt_dev.timeout = min(lpc18xx_wdt->wdt_dev.max_timeout,
261 LPC18XX_WDT_DEF_TIMEOUT);
262
263 spin_lock_init(&lpc18xx_wdt->lock);
264
265 lpc18xx_wdt->wdt_dev.parent = dev;
266 watchdog_set_drvdata(&lpc18xx_wdt->wdt_dev, lpc18xx_wdt);
267
268 ret = watchdog_init_timeout(&lpc18xx_wdt->wdt_dev, heartbeat, dev);
269
270 __lpc18xx_wdt_set_timeout(lpc18xx_wdt);
271
272 setup_timer(&lpc18xx_wdt->timer, lpc18xx_wdt_timer_feed,
273 (unsigned long)&lpc18xx_wdt->wdt_dev);
274
275 watchdog_set_nowayout(&lpc18xx_wdt->wdt_dev, nowayout);
276
277 platform_set_drvdata(pdev, lpc18xx_wdt);
278
279 ret = watchdog_register_device(&lpc18xx_wdt->wdt_dev);
280 if (ret)
281 goto disable_wdt_clk;
282
283 lpc18xx_wdt->restart_handler.notifier_call = lpc18xx_wdt_restart;
284 lpc18xx_wdt->restart_handler.priority = 128;
285 ret = register_restart_handler(&lpc18xx_wdt->restart_handler);
286 if (ret)
287 dev_warn(dev, "failed to register restart handler: %d\n", ret);
288
289 return 0;
290
291disable_wdt_clk:
292 clk_disable_unprepare(lpc18xx_wdt->wdt_clk);
293disable_reg_clk:
294 clk_disable_unprepare(lpc18xx_wdt->reg_clk);
295 return ret;
296}
297
298static void lpc18xx_wdt_shutdown(struct platform_device *pdev)
299{
300 struct lpc18xx_wdt_dev *lpc18xx_wdt = platform_get_drvdata(pdev);
301
302 lpc18xx_wdt_stop(&lpc18xx_wdt->wdt_dev);
303}
304
305static int lpc18xx_wdt_remove(struct platform_device *pdev)
306{
307 struct lpc18xx_wdt_dev *lpc18xx_wdt = platform_get_drvdata(pdev);
308
309 unregister_restart_handler(&lpc18xx_wdt->restart_handler);
310
311 dev_warn(&pdev->dev, "I quit now, hardware will probably reboot!\n");
312 del_timer(&lpc18xx_wdt->timer);
313
314 watchdog_unregister_device(&lpc18xx_wdt->wdt_dev);
315 clk_disable_unprepare(lpc18xx_wdt->wdt_clk);
316 clk_disable_unprepare(lpc18xx_wdt->reg_clk);
317
318 return 0;
319}
320
321static const struct of_device_id lpc18xx_wdt_match[] = {
322 { .compatible = "nxp,lpc1850-wwdt" },
323 {}
324};
325MODULE_DEVICE_TABLE(of, lpc18xx_wdt_match);
326
327static struct platform_driver lpc18xx_wdt_driver = {
328 .driver = {
329 .name = "lpc18xx-wdt",
330 .of_match_table = lpc18xx_wdt_match,
331 },
332 .probe = lpc18xx_wdt_probe,
333 .remove = lpc18xx_wdt_remove,
334 .shutdown = lpc18xx_wdt_shutdown,
335};
336module_platform_driver(lpc18xx_wdt_driver);
337
338MODULE_AUTHOR("Ariel D'Alessandro <ariel@vanguardiasur.com.ar>");
339MODULE_DESCRIPTION("NXP LPC18xx Watchdog Timer Driver");
340MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c
index d193a5e79c38..69013007dc47 100644
--- a/drivers/watchdog/mena21_wdt.c
+++ b/drivers/watchdog/mena21_wdt.c
@@ -197,6 +197,7 @@ static int a21_wdt_probe(struct platform_device *pdev)
197 watchdog_init_timeout(&a21_wdt, 30, &pdev->dev); 197 watchdog_init_timeout(&a21_wdt, 30, &pdev->dev);
198 watchdog_set_nowayout(&a21_wdt, nowayout); 198 watchdog_set_nowayout(&a21_wdt, nowayout);
199 watchdog_set_drvdata(&a21_wdt, drv); 199 watchdog_set_drvdata(&a21_wdt, drv);
200 a21_wdt.parent = &pdev->dev;
200 201
201 reset = a21_wdt_get_bootstatus(drv); 202 reset = a21_wdt_get_bootstatus(drv);
202 if (reset == 2) 203 if (reset == 2)
diff --git a/drivers/watchdog/menf21bmc_wdt.c b/drivers/watchdog/menf21bmc_wdt.c
index 59f0913c7341..3aefddebb386 100644
--- a/drivers/watchdog/menf21bmc_wdt.c
+++ b/drivers/watchdog/menf21bmc_wdt.c
@@ -130,6 +130,7 @@ static int menf21bmc_wdt_probe(struct platform_device *pdev)
130 drv_data->wdt.info = &menf21bmc_wdt_info; 130 drv_data->wdt.info = &menf21bmc_wdt_info;
131 drv_data->wdt.min_timeout = BMC_WD_TIMEOUT_MIN; 131 drv_data->wdt.min_timeout = BMC_WD_TIMEOUT_MIN;
132 drv_data->wdt.max_timeout = BMC_WD_TIMEOUT_MAX; 132 drv_data->wdt.max_timeout = BMC_WD_TIMEOUT_MAX;
133 drv_data->wdt.parent = &pdev->dev;
133 drv_data->i2c_client = i2c_client; 134 drv_data->i2c_client = i2c_client;
134 135
135 /* 136 /*
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 689381a24887..5f2273aac37d 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -50,8 +50,12 @@ struct mpc8xxx_wdt_type {
50 bool hw_enabled; 50 bool hw_enabled;
51}; 51};
52 52
53static struct mpc8xxx_wdt __iomem *wd_base; 53struct mpc8xxx_wdt_ddata {
54static int mpc8xxx_wdt_init_late(void); 54 struct mpc8xxx_wdt __iomem *base;
55 struct watchdog_device wdd;
56 struct timer_list timer;
57 spinlock_t lock;
58};
55 59
56static u16 timeout = 0xffff; 60static u16 timeout = 0xffff;
57module_param(timeout, ushort, 0); 61module_param(timeout, ushort, 0);
@@ -68,65 +72,59 @@ module_param(nowayout, bool, 0);
68MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " 72MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
69 "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 73 "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
70 74
71/* 75static void mpc8xxx_wdt_keepalive(struct mpc8xxx_wdt_ddata *ddata)
72 * We always prescale, but if someone really doesn't want to they can set this
73 * to 0
74 */
75static int prescale = 1;
76
77static DEFINE_SPINLOCK(wdt_spinlock);
78
79static void mpc8xxx_wdt_keepalive(void)
80{ 76{
81 /* Ping the WDT */ 77 /* Ping the WDT */
82 spin_lock(&wdt_spinlock); 78 spin_lock(&ddata->lock);
83 out_be16(&wd_base->swsrr, 0x556c); 79 out_be16(&ddata->base->swsrr, 0x556c);
84 out_be16(&wd_base->swsrr, 0xaa39); 80 out_be16(&ddata->base->swsrr, 0xaa39);
85 spin_unlock(&wdt_spinlock); 81 spin_unlock(&ddata->lock);
86} 82}
87 83
88static struct watchdog_device mpc8xxx_wdt_dev;
89static void mpc8xxx_wdt_timer_ping(unsigned long arg);
90static DEFINE_TIMER(wdt_timer, mpc8xxx_wdt_timer_ping, 0,
91 (unsigned long)&mpc8xxx_wdt_dev);
92
93static void mpc8xxx_wdt_timer_ping(unsigned long arg) 84static void mpc8xxx_wdt_timer_ping(unsigned long arg)
94{ 85{
95 struct watchdog_device *w = (struct watchdog_device *)arg; 86 struct mpc8xxx_wdt_ddata *ddata = (void *)arg;
96 87
97 mpc8xxx_wdt_keepalive(); 88 mpc8xxx_wdt_keepalive(ddata);
98 /* We're pinging it twice faster than needed, just to be sure. */ 89 /* We're pinging it twice faster than needed, just to be sure. */
99 mod_timer(&wdt_timer, jiffies + HZ * w->timeout / 2); 90 mod_timer(&ddata->timer, jiffies + HZ * ddata->wdd.timeout / 2);
100} 91}
101 92
102static int mpc8xxx_wdt_start(struct watchdog_device *w) 93static int mpc8xxx_wdt_start(struct watchdog_device *w)
103{ 94{
104 u32 tmp = SWCRR_SWEN; 95 struct mpc8xxx_wdt_ddata *ddata =
96 container_of(w, struct mpc8xxx_wdt_ddata, wdd);
97
98 u32 tmp = SWCRR_SWEN | SWCRR_SWPR;
105 99
106 /* Good, fire up the show */ 100 /* Good, fire up the show */
107 if (prescale)
108 tmp |= SWCRR_SWPR;
109 if (reset) 101 if (reset)
110 tmp |= SWCRR_SWRI; 102 tmp |= SWCRR_SWRI;
111 103
112 tmp |= timeout << 16; 104 tmp |= timeout << 16;
113 105
114 out_be32(&wd_base->swcrr, tmp); 106 out_be32(&ddata->base->swcrr, tmp);
115 107
116 del_timer_sync(&wdt_timer); 108 del_timer_sync(&ddata->timer);
117 109
118 return 0; 110 return 0;
119} 111}
120 112
121static int mpc8xxx_wdt_ping(struct watchdog_device *w) 113static int mpc8xxx_wdt_ping(struct watchdog_device *w)
122{ 114{
123 mpc8xxx_wdt_keepalive(); 115 struct mpc8xxx_wdt_ddata *ddata =
116 container_of(w, struct mpc8xxx_wdt_ddata, wdd);
117
118 mpc8xxx_wdt_keepalive(ddata);
124 return 0; 119 return 0;
125} 120}
126 121
127static int mpc8xxx_wdt_stop(struct watchdog_device *w) 122static int mpc8xxx_wdt_stop(struct watchdog_device *w)
128{ 123{
129 mod_timer(&wdt_timer, jiffies); 124 struct mpc8xxx_wdt_ddata *ddata =
125 container_of(w, struct mpc8xxx_wdt_ddata, wdd);
126
127 mod_timer(&ddata->timer, jiffies);
130 return 0; 128 return 0;
131} 129}
132 130
@@ -143,53 +141,57 @@ static struct watchdog_ops mpc8xxx_wdt_ops = {
143 .stop = mpc8xxx_wdt_stop, 141 .stop = mpc8xxx_wdt_stop,
144}; 142};
145 143
146static struct watchdog_device mpc8xxx_wdt_dev = {
147 .info = &mpc8xxx_wdt_info,
148 .ops = &mpc8xxx_wdt_ops,
149};
150
151static const struct of_device_id mpc8xxx_wdt_match[];
152static int mpc8xxx_wdt_probe(struct platform_device *ofdev) 144static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
153{ 145{
154 int ret; 146 int ret;
155 const struct of_device_id *match; 147 struct resource *res;
156 struct device_node *np = ofdev->dev.of_node;
157 const struct mpc8xxx_wdt_type *wdt_type; 148 const struct mpc8xxx_wdt_type *wdt_type;
149 struct mpc8xxx_wdt_ddata *ddata;
158 u32 freq = fsl_get_sys_freq(); 150 u32 freq = fsl_get_sys_freq();
159 bool enabled; 151 bool enabled;
160 unsigned int timeout_sec; 152 unsigned int timeout_sec;
161 153
162 match = of_match_device(mpc8xxx_wdt_match, &ofdev->dev); 154 wdt_type = of_device_get_match_data(&ofdev->dev);
163 if (!match) 155 if (!wdt_type)
164 return -EINVAL; 156 return -EINVAL;
165 wdt_type = match->data;
166 157
167 if (!freq || freq == -1) 158 if (!freq || freq == -1)
168 return -EINVAL; 159 return -EINVAL;
169 160
170 wd_base = of_iomap(np, 0); 161 ddata = devm_kzalloc(&ofdev->dev, sizeof(*ddata), GFP_KERNEL);
171 if (!wd_base) 162 if (!ddata)
172 return -ENOMEM; 163 return -ENOMEM;
173 164
174 enabled = in_be32(&wd_base->swcrr) & SWCRR_SWEN; 165 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
166 ddata->base = devm_ioremap_resource(&ofdev->dev, res);
167 if (IS_ERR(ddata->base))
168 return PTR_ERR(ddata->base);
169
170 enabled = in_be32(&ddata->base->swcrr) & SWCRR_SWEN;
175 if (!enabled && wdt_type->hw_enabled) { 171 if (!enabled && wdt_type->hw_enabled) {
176 pr_info("could not be enabled in software\n"); 172 pr_info("could not be enabled in software\n");
177 ret = -ENOSYS; 173 return -ENODEV;
178 goto err_unmap;
179 } 174 }
180 175
176 spin_lock_init(&ddata->lock);
177 setup_timer(&ddata->timer, mpc8xxx_wdt_timer_ping,
178 (unsigned long)ddata);
179
180 ddata->wdd.info = &mpc8xxx_wdt_info,
181 ddata->wdd.ops = &mpc8xxx_wdt_ops,
182
181 /* Calculate the timeout in seconds */ 183 /* Calculate the timeout in seconds */
182 if (prescale) 184 timeout_sec = (timeout * wdt_type->prescaler) / freq;
183 timeout_sec = (timeout * wdt_type->prescaler) / freq; 185
184 else 186 ddata->wdd.timeout = timeout_sec;
185 timeout_sec = timeout / freq; 187
186 188 watchdog_set_nowayout(&ddata->wdd, nowayout);
187 mpc8xxx_wdt_dev.timeout = timeout_sec; 189
188#ifdef MODULE 190 ret = watchdog_register_device(&ddata->wdd);
189 ret = mpc8xxx_wdt_init_late(); 191 if (ret) {
190 if (ret) 192 pr_err("cannot register watchdog device (err=%d)\n", ret);
191 goto err_unmap; 193 return ret;
192#endif 194 }
193 195
194 pr_info("WDT driver for MPC8xxx initialized. mode:%s timeout=%d (%d seconds)\n", 196 pr_info("WDT driver for MPC8xxx initialized. mode:%s timeout=%d (%d seconds)\n",
195 reset ? "reset" : "interrupt", timeout, timeout_sec); 197 reset ? "reset" : "interrupt", timeout, timeout_sec);
@@ -200,21 +202,20 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
200 * userspace handles it. 202 * userspace handles it.
201 */ 203 */
202 if (enabled) 204 if (enabled)
203 mod_timer(&wdt_timer, jiffies); 205 mod_timer(&ddata->timer, jiffies);
206
207 platform_set_drvdata(ofdev, ddata);
204 return 0; 208 return 0;
205err_unmap:
206 iounmap(wd_base);
207 wd_base = NULL;
208 return ret;
209} 209}
210 210
211static int mpc8xxx_wdt_remove(struct platform_device *ofdev) 211static int mpc8xxx_wdt_remove(struct platform_device *ofdev)
212{ 212{
213 struct mpc8xxx_wdt_ddata *ddata = platform_get_drvdata(ofdev);
214
213 pr_crit("Watchdog removed, expect the %s soon!\n", 215 pr_crit("Watchdog removed, expect the %s soon!\n",
214 reset ? "reset" : "machine check exception"); 216 reset ? "reset" : "machine check exception");
215 del_timer_sync(&wdt_timer); 217 del_timer_sync(&ddata->timer);
216 watchdog_unregister_device(&mpc8xxx_wdt_dev); 218 watchdog_unregister_device(&ddata->wdd);
217 iounmap(wd_base);
218 219
219 return 0; 220 return 0;
220} 221}
@@ -253,31 +254,6 @@ static struct platform_driver mpc8xxx_wdt_driver = {
253 }, 254 },
254}; 255};
255 256
256/*
257 * We do wdt initialization in two steps: arch_initcall probes the wdt
258 * very early to start pinging the watchdog (misc devices are not yet
259 * available), and later module_init() just registers the misc device.
260 */
261static int mpc8xxx_wdt_init_late(void)
262{
263 int ret;
264
265 if (!wd_base)
266 return -ENODEV;
267
268 watchdog_set_nowayout(&mpc8xxx_wdt_dev, nowayout);
269
270 ret = watchdog_register_device(&mpc8xxx_wdt_dev);
271 if (ret) {
272 pr_err("cannot register watchdog device (err=%d)\n", ret);
273 return ret;
274 }
275 return 0;
276}
277#ifndef MODULE
278module_init(mpc8xxx_wdt_init_late);
279#endif
280
281static int __init mpc8xxx_wdt_init(void) 257static int __init mpc8xxx_wdt_init(void)
282{ 258{
283 return platform_driver_register(&mpc8xxx_wdt_driver); 259 return platform_driver_register(&mpc8xxx_wdt_driver);
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index 938b987de551..6ad9df948711 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -210,6 +210,14 @@ static int mtk_wdt_probe(struct platform_device *pdev)
210 return 0; 210 return 0;
211} 211}
212 212
213static void mtk_wdt_shutdown(struct platform_device *pdev)
214{
215 struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev);
216
217 if (watchdog_active(&mtk_wdt->wdt_dev))
218 mtk_wdt_stop(&mtk_wdt->wdt_dev);
219}
220
213static int mtk_wdt_remove(struct platform_device *pdev) 221static int mtk_wdt_remove(struct platform_device *pdev)
214{ 222{
215 struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev); 223 struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev);
@@ -221,17 +229,48 @@ static int mtk_wdt_remove(struct platform_device *pdev)
221 return 0; 229 return 0;
222} 230}
223 231
232#ifdef CONFIG_PM_SLEEP
233static int mtk_wdt_suspend(struct device *dev)
234{
235 struct mtk_wdt_dev *mtk_wdt = dev_get_drvdata(dev);
236
237 if (watchdog_active(&mtk_wdt->wdt_dev))
238 mtk_wdt_stop(&mtk_wdt->wdt_dev);
239
240 return 0;
241}
242
243static int mtk_wdt_resume(struct device *dev)
244{
245 struct mtk_wdt_dev *mtk_wdt = dev_get_drvdata(dev);
246
247 if (watchdog_active(&mtk_wdt->wdt_dev)) {
248 mtk_wdt_start(&mtk_wdt->wdt_dev);
249 mtk_wdt_ping(&mtk_wdt->wdt_dev);
250 }
251
252 return 0;
253}
254#endif
255
224static const struct of_device_id mtk_wdt_dt_ids[] = { 256static const struct of_device_id mtk_wdt_dt_ids[] = {
225 { .compatible = "mediatek,mt6589-wdt" }, 257 { .compatible = "mediatek,mt6589-wdt" },
226 { /* sentinel */ } 258 { /* sentinel */ }
227}; 259};
228MODULE_DEVICE_TABLE(of, mtk_wdt_dt_ids); 260MODULE_DEVICE_TABLE(of, mtk_wdt_dt_ids);
229 261
262static const struct dev_pm_ops mtk_wdt_pm_ops = {
263 SET_SYSTEM_SLEEP_PM_OPS(mtk_wdt_suspend,
264 mtk_wdt_resume)
265};
266
230static struct platform_driver mtk_wdt_driver = { 267static struct platform_driver mtk_wdt_driver = {
231 .probe = mtk_wdt_probe, 268 .probe = mtk_wdt_probe,
232 .remove = mtk_wdt_remove, 269 .remove = mtk_wdt_remove,
270 .shutdown = mtk_wdt_shutdown,
233 .driver = { 271 .driver = {
234 .name = DRV_NAME, 272 .name = DRV_NAME,
273 .pm = &mtk_wdt_pm_ops,
235 .of_match_table = mtk_wdt_dt_ids, 274 .of_match_table = mtk_wdt_dt_ids,
236 }, 275 },
237}; 276};
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index c028454be66c..bd917bb757b8 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -294,6 +294,8 @@ static const struct pci_device_id tco_pci_tbl[] = {
294 PCI_ANY_ID, PCI_ANY_ID, }, 294 PCI_ANY_ID, PCI_ANY_ID, },
295 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS, 295 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS,
296 PCI_ANY_ID, PCI_ANY_ID, }, 296 PCI_ANY_ID, PCI_ANY_ID, },
297 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS,
298 PCI_ANY_ID, PCI_ANY_ID, },
297 { 0, }, /* End of list */ 299 { 0, }, /* End of list */
298}; 300};
299MODULE_DEVICE_TABLE(pci, tco_pci_tbl); 301MODULE_DEVICE_TABLE(pci, tco_pci_tbl);
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index de911c7e477c..d96bee017fd3 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -253,6 +253,7 @@ static int omap_wdt_probe(struct platform_device *pdev)
253 wdev->wdog.ops = &omap_wdt_ops; 253 wdev->wdog.ops = &omap_wdt_ops;
254 wdev->wdog.min_timeout = TIMER_MARGIN_MIN; 254 wdev->wdog.min_timeout = TIMER_MARGIN_MIN;
255 wdev->wdog.max_timeout = TIMER_MARGIN_MAX; 255 wdev->wdog.max_timeout = TIMER_MARGIN_MAX;
256 wdev->wdog.parent = &pdev->dev;
256 257
257 if (watchdog_init_timeout(&wdev->wdog, timer_margin, &pdev->dev) < 0) 258 if (watchdog_init_timeout(&wdev->wdog, timer_margin, &pdev->dev) < 0)
258 wdev->wdog.timeout = TIMER_MARGIN_DEFAULT; 259 wdev->wdog.timeout = TIMER_MARGIN_DEFAULT;
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index ef0c628d5037..c6b8f4a43bde 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -567,6 +567,7 @@ static int orion_wdt_probe(struct platform_device *pdev)
567 567
568 dev->wdt.timeout = wdt_max_duration; 568 dev->wdt.timeout = wdt_max_duration;
569 dev->wdt.max_timeout = wdt_max_duration; 569 dev->wdt.max_timeout = wdt_max_duration;
570 dev->wdt.parent = &pdev->dev;
570 watchdog_init_timeout(&dev->wdt, heartbeat, &pdev->dev); 571 watchdog_init_timeout(&dev->wdt, heartbeat, &pdev->dev);
571 572
572 platform_set_drvdata(pdev, &dev->wdt); 573 platform_set_drvdata(pdev, &dev->wdt);
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index b9c6049c3e78..4224b3ec83a5 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -167,6 +167,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
167 167
168 pnx4008_wdd.bootstatus = (readl(WDTIM_RES(wdt_base)) & WDOG_RESET) ? 168 pnx4008_wdd.bootstatus = (readl(WDTIM_RES(wdt_base)) & WDOG_RESET) ?
169 WDIOF_CARDRESET : 0; 169 WDIOF_CARDRESET : 0;
170 pnx4008_wdd.parent = &pdev->dev;
170 watchdog_set_nowayout(&pnx4008_wdd, nowayout); 171 watchdog_set_nowayout(&pnx4008_wdd, nowayout);
171 172
172 pnx4008_wdt_stop(&pnx4008_wdd); /* disable for now */ 173 pnx4008_wdt_stop(&pnx4008_wdd); /* disable for now */
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index aa03ca8f2d9b..773dcfaee7b2 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -171,6 +171,7 @@ static int qcom_wdt_probe(struct platform_device *pdev)
171 wdt->wdd.ops = &qcom_wdt_ops; 171 wdt->wdd.ops = &qcom_wdt_ops;
172 wdt->wdd.min_timeout = 1; 172 wdt->wdd.min_timeout = 1;
173 wdt->wdd.max_timeout = 0x10000000U / wdt->rate; 173 wdt->wdd.max_timeout = 0x10000000U / wdt->rate;
174 wdt->wdd.parent = &pdev->dev;
174 175
175 /* 176 /*
176 * If 'timeout-sec' unspecified in devicetree, assume a 30 second 177 * If 'timeout-sec' unspecified in devicetree, assume a 30 second
diff --git a/drivers/watchdog/retu_wdt.c b/drivers/watchdog/retu_wdt.c
index b7c68e275aeb..39cd51df2ffc 100644
--- a/drivers/watchdog/retu_wdt.c
+++ b/drivers/watchdog/retu_wdt.c
@@ -127,6 +127,7 @@ static int retu_wdt_probe(struct platform_device *pdev)
127 retu_wdt->timeout = RETU_WDT_MAX_TIMER; 127 retu_wdt->timeout = RETU_WDT_MAX_TIMER;
128 retu_wdt->min_timeout = 0; 128 retu_wdt->min_timeout = 0;
129 retu_wdt->max_timeout = RETU_WDT_MAX_TIMER; 129 retu_wdt->max_timeout = RETU_WDT_MAX_TIMER;
130 retu_wdt->parent = &pdev->dev;
130 131
131 watchdog_set_drvdata(retu_wdt, wdev); 132 watchdog_set_drvdata(retu_wdt, wdev);
132 watchdog_set_nowayout(retu_wdt, nowayout); 133 watchdog_set_nowayout(retu_wdt, nowayout);
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index a6f7e2e29beb..1967919ae743 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -161,6 +161,7 @@ static int rt288x_wdt_probe(struct platform_device *pdev)
161 rt288x_wdt_dev.dev = &pdev->dev; 161 rt288x_wdt_dev.dev = &pdev->dev;
162 rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause(); 162 rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause();
163 rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq); 163 rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq);
164 rt288x_wdt_dev.parent = &pdev->dev;
164 165
165 watchdog_init_timeout(&rt288x_wdt_dev, rt288x_wdt_dev.max_timeout, 166 watchdog_init_timeout(&rt288x_wdt_dev, rt288x_wdt_dev.max_timeout,
166 &pdev->dev); 167 &pdev->dev);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index e89ae027c91d..d781000c7825 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -607,6 +607,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
607 watchdog_set_nowayout(&wdt->wdt_device, nowayout); 607 watchdog_set_nowayout(&wdt->wdt_device, nowayout);
608 608
609 wdt->wdt_device.bootstatus = s3c2410wdt_get_bootstatus(wdt); 609 wdt->wdt_device.bootstatus = s3c2410wdt_get_bootstatus(wdt);
610 wdt->wdt_device.parent = &pdev->dev;
610 611
611 ret = watchdog_register_device(&wdt->wdt_device); 612 ret = watchdog_register_device(&wdt->wdt_device);
612 if (ret) { 613 if (ret) {
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
new file mode 100644
index 000000000000..a49634cdc1cc
--- /dev/null
+++ b/drivers/watchdog/sama5d4_wdt.c
@@ -0,0 +1,280 @@
1/*
2 * Driver for Atmel SAMA5D4 Watchdog Timer
3 *
4 * Copyright (C) 2015 Atmel Corporation
5 *
6 * Licensed under GPLv2.
7 */
8
9#include <linux/interrupt.h>
10#include <linux/io.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/of_irq.h>
15#include <linux/platform_device.h>
16#include <linux/reboot.h>
17#include <linux/watchdog.h>
18
19#include "at91sam9_wdt.h"
20
21/* minimum and maximum watchdog timeout, in seconds */
22#define MIN_WDT_TIMEOUT 1
23#define MAX_WDT_TIMEOUT 16
24#define WDT_DEFAULT_TIMEOUT MAX_WDT_TIMEOUT
25
26#define WDT_SEC2TICKS(s) ((s) ? (((s) << 8) - 1) : 0)
27
28struct sama5d4_wdt {
29 struct watchdog_device wdd;
30 void __iomem *reg_base;
31 u32 config;
32};
33
34static int wdt_timeout = WDT_DEFAULT_TIMEOUT;
35static bool nowayout = WATCHDOG_NOWAYOUT;
36
37module_param(wdt_timeout, int, 0);
38MODULE_PARM_DESC(wdt_timeout,
39 "Watchdog timeout in seconds. (default = "
40 __MODULE_STRING(WDT_DEFAULT_TIMEOUT) ")");
41
42module_param(nowayout, bool, 0);
43MODULE_PARM_DESC(nowayout,
44 "Watchdog cannot be stopped once started (default="
45 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
46
47#define wdt_read(wdt, field) \
48 readl_relaxed((wdt)->reg_base + (field))
49
50#define wdt_write(wtd, field, val) \
51 writel_relaxed((val), (wdt)->reg_base + (field))
52
53static int sama5d4_wdt_start(struct watchdog_device *wdd)
54{
55 struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
56 u32 reg;
57
58 reg = wdt_read(wdt, AT91_WDT_MR);
59 reg &= ~AT91_WDT_WDDIS;
60 wdt_write(wdt, AT91_WDT_MR, reg);
61
62 return 0;
63}
64
65static int sama5d4_wdt_stop(struct watchdog_device *wdd)
66{
67 struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
68 u32 reg;
69
70 reg = wdt_read(wdt, AT91_WDT_MR);
71 reg |= AT91_WDT_WDDIS;
72 wdt_write(wdt, AT91_WDT_MR, reg);
73
74 return 0;
75}
76
77static int sama5d4_wdt_ping(struct watchdog_device *wdd)
78{
79 struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
80
81 wdt_write(wdt, AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
82
83 return 0;
84}
85
86static int sama5d4_wdt_set_timeout(struct watchdog_device *wdd,
87 unsigned int timeout)
88{
89 struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
90 u32 value = WDT_SEC2TICKS(timeout);
91 u32 reg;
92
93 reg = wdt_read(wdt, AT91_WDT_MR);
94 reg &= ~AT91_WDT_WDV;
95 reg &= ~AT91_WDT_WDD;
96 reg |= AT91_WDT_SET_WDV(value);
97 reg |= AT91_WDT_SET_WDD(value);
98 wdt_write(wdt, AT91_WDT_MR, reg);
99
100 wdd->timeout = timeout;
101
102 return 0;
103}
104
105static const struct watchdog_info sama5d4_wdt_info = {
106 .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
107 .identity = "Atmel SAMA5D4 Watchdog",
108};
109
110static struct watchdog_ops sama5d4_wdt_ops = {
111 .owner = THIS_MODULE,
112 .start = sama5d4_wdt_start,
113 .stop = sama5d4_wdt_stop,
114 .ping = sama5d4_wdt_ping,
115 .set_timeout = sama5d4_wdt_set_timeout,
116};
117
118static irqreturn_t sama5d4_wdt_irq_handler(int irq, void *dev_id)
119{
120 struct sama5d4_wdt *wdt = platform_get_drvdata(dev_id);
121
122 if (wdt_read(wdt, AT91_WDT_SR)) {
123 pr_crit("Atmel Watchdog Software Reset\n");
124 emergency_restart();
125 pr_crit("Reboot didn't succeed\n");
126 }
127
128 return IRQ_HANDLED;
129}
130
131static int of_sama5d4_wdt_init(struct device_node *np, struct sama5d4_wdt *wdt)
132{
133 const char *tmp;
134
135 wdt->config = AT91_WDT_WDDIS;
136
137 if (!of_property_read_string(np, "atmel,watchdog-type", &tmp) &&
138 !strcmp(tmp, "software"))
139 wdt->config |= AT91_WDT_WDFIEN;
140 else
141 wdt->config |= AT91_WDT_WDRSTEN;
142
143 if (of_property_read_bool(np, "atmel,idle-halt"))
144 wdt->config |= AT91_WDT_WDIDLEHLT;
145
146 if (of_property_read_bool(np, "atmel,dbg-halt"))
147 wdt->config |= AT91_WDT_WDDBGHLT;
148
149 return 0;
150}
151
152static int sama5d4_wdt_init(struct sama5d4_wdt *wdt)
153{
154 struct watchdog_device *wdd = &wdt->wdd;
155 u32 value = WDT_SEC2TICKS(wdd->timeout);
156 u32 reg;
157
158 /*
159 * Because the fields WDV and WDD must not be modified when the WDDIS
160 * bit is set, so clear the WDDIS bit before writing the WDT_MR.
161 */
162 reg = wdt_read(wdt, AT91_WDT_MR);
163 reg &= ~AT91_WDT_WDDIS;
164 wdt_write(wdt, AT91_WDT_MR, reg);
165
166 reg = wdt->config;
167 reg |= AT91_WDT_SET_WDD(value);
168 reg |= AT91_WDT_SET_WDV(value);
169
170 wdt_write(wdt, AT91_WDT_MR, reg);
171
172 return 0;
173}
174
175static int sama5d4_wdt_probe(struct platform_device *pdev)
176{
177 struct watchdog_device *wdd;
178 struct sama5d4_wdt *wdt;
179 struct resource *res;
180 void __iomem *regs;
181 u32 irq = 0;
182 int ret;
183
184 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
185 if (!wdt)
186 return -ENOMEM;
187
188 wdd = &wdt->wdd;
189 wdd->timeout = wdt_timeout;
190 wdd->info = &sama5d4_wdt_info;
191 wdd->ops = &sama5d4_wdt_ops;
192 wdd->min_timeout = MIN_WDT_TIMEOUT;
193 wdd->max_timeout = MAX_WDT_TIMEOUT;
194
195 watchdog_set_drvdata(wdd, wdt);
196
197 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
198 regs = devm_ioremap_resource(&pdev->dev, res);
199 if (IS_ERR(regs))
200 return PTR_ERR(regs);
201
202 wdt->reg_base = regs;
203
204 if (pdev->dev.of_node) {
205 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
206 if (!irq)
207 dev_warn(&pdev->dev, "failed to get IRQ from DT\n");
208
209 ret = of_sama5d4_wdt_init(pdev->dev.of_node, wdt);
210 if (ret)
211 return ret;
212 }
213
214 if ((wdt->config & AT91_WDT_WDFIEN) && irq) {
215 ret = devm_request_irq(&pdev->dev, irq, sama5d4_wdt_irq_handler,
216 IRQF_SHARED | IRQF_IRQPOLL |
217 IRQF_NO_SUSPEND, pdev->name, pdev);
218 if (ret) {
219 dev_err(&pdev->dev,
220 "cannot register interrupt handler\n");
221 return ret;
222 }
223 }
224
225 ret = watchdog_init_timeout(wdd, wdt_timeout, &pdev->dev);
226 if (ret) {
227 dev_err(&pdev->dev, "unable to set timeout value\n");
228 return ret;
229 }
230
231 ret = sama5d4_wdt_init(wdt);
232 if (ret)
233 return ret;
234
235 watchdog_set_nowayout(wdd, nowayout);
236
237 ret = watchdog_register_device(wdd);
238 if (ret) {
239 dev_err(&pdev->dev, "failed to register watchdog device\n");
240 return ret;
241 }
242
243 platform_set_drvdata(pdev, wdt);
244
245 dev_info(&pdev->dev, "initialized (timeout = %d sec, nowayout = %d)\n",
246 wdt_timeout, nowayout);
247
248 return 0;
249}
250
251static int sama5d4_wdt_remove(struct platform_device *pdev)
252{
253 struct sama5d4_wdt *wdt = platform_get_drvdata(pdev);
254
255 sama5d4_wdt_stop(&wdt->wdd);
256
257 watchdog_unregister_device(&wdt->wdd);
258
259 return 0;
260}
261
262static const struct of_device_id sama5d4_wdt_of_match[] = {
263 { .compatible = "atmel,sama5d4-wdt", },
264 { }
265};
266MODULE_DEVICE_TABLE(of, sama5d4_wdt_of_match);
267
268static struct platform_driver sama5d4_wdt_driver = {
269 .probe = sama5d4_wdt_probe,
270 .remove = sama5d4_wdt_remove,
271 .driver = {
272 .name = "sama5d4_wdt",
273 .of_match_table = sama5d4_wdt_of_match,
274 }
275};
276module_platform_driver(sama5d4_wdt_driver);
277
278MODULE_AUTHOR("Atmel Corporation");
279MODULE_DESCRIPTION("Atmel SAMA5D4 Watchdog Timer driver");
280MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index 567458b137a6..f90812170657 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -252,6 +252,7 @@ static int sh_wdt_probe(struct platform_device *pdev)
252 252
253 watchdog_set_nowayout(&sh_wdt_dev, nowayout); 253 watchdog_set_nowayout(&sh_wdt_dev, nowayout);
254 watchdog_set_drvdata(&sh_wdt_dev, wdt); 254 watchdog_set_drvdata(&sh_wdt_dev, wdt);
255 sh_wdt_dev.parent = &pdev->dev;
255 256
256 spin_lock_init(&wdt->lock); 257 spin_lock_init(&wdt->lock);
257 258
diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c
index 42fa5c0c518a..d0578ab2e636 100644
--- a/drivers/watchdog/sirfsoc_wdt.c
+++ b/drivers/watchdog/sirfsoc_wdt.c
@@ -154,6 +154,7 @@ static int sirfsoc_wdt_probe(struct platform_device *pdev)
154 154
155 watchdog_init_timeout(&sirfsoc_wdd, timeout, &pdev->dev); 155 watchdog_init_timeout(&sirfsoc_wdd, timeout, &pdev->dev);
156 watchdog_set_nowayout(&sirfsoc_wdd, nowayout); 156 watchdog_set_nowayout(&sirfsoc_wdd, nowayout);
157 sirfsoc_wdd.parent = &pdev->dev;
157 158
158 ret = watchdog_register_device(&sirfsoc_wdd); 159 ret = watchdog_register_device(&sirfsoc_wdd);
159 if (ret) 160 if (ret)
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 4e7fec36f5c3..01d816251302 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -226,6 +226,7 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
226 wdt->adev = adev; 226 wdt->adev = adev;
227 wdt->wdd.info = &wdt_info; 227 wdt->wdd.info = &wdt_info;
228 wdt->wdd.ops = &wdt_ops; 228 wdt->wdd.ops = &wdt_ops;
229 wdt->wdd.parent = &adev->dev;
229 230
230 spin_lock_init(&wdt->lock); 231 spin_lock_init(&wdt->lock);
231 watchdog_set_nowayout(&wdt->wdd, nowayout); 232 watchdog_set_nowayout(&wdt->wdd, nowayout);
diff --git a/drivers/watchdog/st_lpc_wdt.c b/drivers/watchdog/st_lpc_wdt.c
index 6785afdc0fca..14e9badf2bfa 100644
--- a/drivers/watchdog/st_lpc_wdt.c
+++ b/drivers/watchdog/st_lpc_wdt.c
@@ -241,6 +241,7 @@ static int st_wdog_probe(struct platform_device *pdev)
241 return -EINVAL; 241 return -EINVAL;
242 } 242 }
243 st_wdog_dev.max_timeout = 0xFFFFFFFF / st_wdog->clkrate; 243 st_wdog_dev.max_timeout = 0xFFFFFFFF / st_wdog->clkrate;
244 st_wdog_dev.parent = &pdev->dev;
244 245
245 ret = clk_prepare_enable(clk); 246 ret = clk_prepare_enable(clk);
246 if (ret) { 247 if (ret) {
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
index e7f0d5b60d3d..3ee6128a540e 100644
--- a/drivers/watchdog/stmp3xxx_rtc_wdt.c
+++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c
@@ -76,6 +76,7 @@ static int stmp3xxx_wdt_probe(struct platform_device *pdev)
76 watchdog_set_drvdata(&stmp3xxx_wdd, &pdev->dev); 76 watchdog_set_drvdata(&stmp3xxx_wdd, &pdev->dev);
77 77
78 stmp3xxx_wdd.timeout = clamp_t(unsigned, heartbeat, 1, STMP3XXX_MAX_TIMEOUT); 78 stmp3xxx_wdd.timeout = clamp_t(unsigned, heartbeat, 1, STMP3XXX_MAX_TIMEOUT);
79 stmp3xxx_wdd.parent = &pdev->dev;
79 80
80 ret = watchdog_register_device(&stmp3xxx_wdd); 81 ret = watchdog_register_device(&stmp3xxx_wdd);
81 if (ret < 0) { 82 if (ret < 0) {
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index a29afb37c48c..47bd8a14d01f 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -184,7 +184,7 @@ static int sunxi_wdt_start(struct watchdog_device *wdt_dev)
184 /* Set system reset function */ 184 /* Set system reset function */
185 reg = readl(wdt_base + regs->wdt_cfg); 185 reg = readl(wdt_base + regs->wdt_cfg);
186 reg &= ~(regs->wdt_reset_mask); 186 reg &= ~(regs->wdt_reset_mask);
187 reg |= ~(regs->wdt_reset_val); 187 reg |= regs->wdt_reset_val;
188 writel(reg, wdt_base + regs->wdt_cfg); 188 writel(reg, wdt_base + regs->wdt_cfg);
189 189
190 /* Enable watchdog */ 190 /* Enable watchdog */
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
index 30451ea46902..7f97cdd53f29 100644
--- a/drivers/watchdog/tegra_wdt.c
+++ b/drivers/watchdog/tegra_wdt.c
@@ -218,6 +218,7 @@ static int tegra_wdt_probe(struct platform_device *pdev)
218 wdd->ops = &tegra_wdt_ops; 218 wdd->ops = &tegra_wdt_ops;
219 wdd->min_timeout = MIN_WDT_TIMEOUT; 219 wdd->min_timeout = MIN_WDT_TIMEOUT;
220 wdd->max_timeout = MAX_WDT_TIMEOUT; 220 wdd->max_timeout = MAX_WDT_TIMEOUT;
221 wdd->parent = &pdev->dev;
221 222
222 watchdog_set_drvdata(wdd, wdt); 223 watchdog_set_drvdata(wdd, wdt);
223 224
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index 2c1db6fa9a27..9bf3cc0f3961 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -83,6 +83,7 @@ static int twl4030_wdt_probe(struct platform_device *pdev)
83 wdt->timeout = 30; 83 wdt->timeout = 30;
84 wdt->min_timeout = 1; 84 wdt->min_timeout = 1;
85 wdt->max_timeout = 30; 85 wdt->max_timeout = 30;
86 wdt->parent = &pdev->dev;
86 87
87 watchdog_set_nowayout(wdt, nowayout); 88 watchdog_set_nowayout(wdt, nowayout);
88 platform_set_drvdata(pdev, wdt); 89 platform_set_drvdata(pdev, wdt);
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c
index 7f615933d31a..c2da880292bc 100644
--- a/drivers/watchdog/txx9wdt.c
+++ b/drivers/watchdog/txx9wdt.c
@@ -131,6 +131,7 @@ static int __init txx9wdt_probe(struct platform_device *dev)
131 txx9wdt.timeout = timeout; 131 txx9wdt.timeout = timeout;
132 txx9wdt.min_timeout = 1; 132 txx9wdt.min_timeout = 1;
133 txx9wdt.max_timeout = WD_MAX_TIMEOUT; 133 txx9wdt.max_timeout = WD_MAX_TIMEOUT;
134 txx9wdt.parent = &dev->dev;
134 watchdog_set_nowayout(&txx9wdt, nowayout); 135 watchdog_set_nowayout(&txx9wdt, nowayout);
135 136
136 ret = watchdog_register_device(&txx9wdt); 137 ret = watchdog_register_device(&txx9wdt);
diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c
index 9de09ab00838..37c084353cce 100644
--- a/drivers/watchdog/ux500_wdt.c
+++ b/drivers/watchdog/ux500_wdt.c
@@ -96,6 +96,7 @@ static int ux500_wdt_probe(struct platform_device *pdev)
96 ux500_wdt.max_timeout = WATCHDOG_MAX28; 96 ux500_wdt.max_timeout = WATCHDOG_MAX28;
97 } 97 }
98 98
99 ux500_wdt.parent = &pdev->dev;
99 watchdog_set_nowayout(&ux500_wdt, nowayout); 100 watchdog_set_nowayout(&ux500_wdt, nowayout);
100 101
101 /* disable auto off on sleep */ 102 /* disable auto off on sleep */
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
index 56369c4f1961..5f9cbc37520d 100644
--- a/drivers/watchdog/via_wdt.c
+++ b/drivers/watchdog/via_wdt.c
@@ -206,6 +206,7 @@ static int wdt_probe(struct pci_dev *pdev,
206 timeout = WDT_TIMEOUT; 206 timeout = WDT_TIMEOUT;
207 207
208 wdt_dev.timeout = timeout; 208 wdt_dev.timeout = timeout;
209 wdt_dev.parent = &pdev->dev;
209 watchdog_set_nowayout(&wdt_dev, nowayout); 210 watchdog_set_nowayout(&wdt_dev, nowayout);
210 if (readl(wdt_mem) & VIA_WDT_FIRED) 211 if (readl(wdt_mem) & VIA_WDT_FIRED)
211 wdt_dev.bootstatus |= WDIOF_CARDRESET; 212 wdt_dev.bootstatus |= WDIOF_CARDRESET;
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index 2fa17e746ff6..8d1184aee932 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -215,6 +215,7 @@ static int wm831x_wdt_probe(struct platform_device *pdev)
215 215
216 wm831x_wdt->info = &wm831x_wdt_info; 216 wm831x_wdt->info = &wm831x_wdt_info;
217 wm831x_wdt->ops = &wm831x_wdt_ops; 217 wm831x_wdt->ops = &wm831x_wdt_ops;
218 wm831x_wdt->parent = &pdev->dev;
218 watchdog_set_nowayout(wm831x_wdt, nowayout); 219 watchdog_set_nowayout(wm831x_wdt, nowayout);
219 watchdog_set_drvdata(wm831x_wdt, driver_data); 220 watchdog_set_drvdata(wm831x_wdt, driver_data);
220 221
diff --git a/drivers/watchdog/wm8350_wdt.c b/drivers/watchdog/wm8350_wdt.c
index 34d272ada23d..4ab4b8347d45 100644
--- a/drivers/watchdog/wm8350_wdt.c
+++ b/drivers/watchdog/wm8350_wdt.c
@@ -151,6 +151,7 @@ static int wm8350_wdt_probe(struct platform_device *pdev)
151 151
152 watchdog_set_nowayout(&wm8350_wdt, nowayout); 152 watchdog_set_nowayout(&wm8350_wdt, nowayout);
153 watchdog_set_drvdata(&wm8350_wdt, wm8350); 153 watchdog_set_drvdata(&wm8350_wdt, wm8350);
154 wm8350_wdt.parent = &pdev->dev;
154 155
155 /* Default to 4s timeout */ 156 /* Default to 4s timeout */
156 wm8350_wdt_set_timeout(&wm8350_wdt, 4); 157 wm8350_wdt_set_timeout(&wm8350_wdt, 4);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 22ea424ee741..073bb57adab1 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1242,6 +1242,13 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1242 goto out_clear; 1242 goto out_clear;
1243 } 1243 }
1244 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); 1244 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
1245 /*
1246 * If the partition is not aligned on a page
1247 * boundary, we can't do dax I/O to it.
1248 */
1249 if ((bdev->bd_part->start_sect % (PAGE_SIZE / 512)) ||
1250 (bdev->bd_part->nr_sects % (PAGE_SIZE / 512)))
1251 bdev->bd_inode->i_flags &= ~S_DAX;
1245 } 1252 }
1246 } else { 1253 } else {
1247 if (bdev->bd_contains == bdev) { 1254 if (bdev->bd_contains == bdev) {
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 1ce06c849a86..3e36e4adc4a3 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -42,8 +42,14 @@ struct __btrfs_workqueue {
42 42
43 /* Thresholding related variants */ 43 /* Thresholding related variants */
44 atomic_t pending; 44 atomic_t pending;
45 int max_active; 45
46 int current_max; 46 /* Up limit of concurrency workers */
47 int limit_active;
48
49 /* Current number of concurrency workers */
50 int current_active;
51
52 /* Threshold to change current_active */
47 int thresh; 53 int thresh;
48 unsigned int count; 54 unsigned int count;
49 spinlock_t thres_lock; 55 spinlock_t thres_lock;
@@ -88,7 +94,7 @@ BTRFS_WORK_HELPER(scrubnc_helper);
88BTRFS_WORK_HELPER(scrubparity_helper); 94BTRFS_WORK_HELPER(scrubparity_helper);
89 95
90static struct __btrfs_workqueue * 96static struct __btrfs_workqueue *
91__btrfs_alloc_workqueue(const char *name, unsigned int flags, int max_active, 97__btrfs_alloc_workqueue(const char *name, unsigned int flags, int limit_active,
92 int thresh) 98 int thresh)
93{ 99{
94 struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS); 100 struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
@@ -96,26 +102,31 @@ __btrfs_alloc_workqueue(const char *name, unsigned int flags, int max_active,
96 if (!ret) 102 if (!ret)
97 return NULL; 103 return NULL;
98 104
99 ret->max_active = max_active; 105 ret->limit_active = limit_active;
100 atomic_set(&ret->pending, 0); 106 atomic_set(&ret->pending, 0);
101 if (thresh == 0) 107 if (thresh == 0)
102 thresh = DFT_THRESHOLD; 108 thresh = DFT_THRESHOLD;
103 /* For low threshold, disabling threshold is a better choice */ 109 /* For low threshold, disabling threshold is a better choice */
104 if (thresh < DFT_THRESHOLD) { 110 if (thresh < DFT_THRESHOLD) {
105 ret->current_max = max_active; 111 ret->current_active = limit_active;
106 ret->thresh = NO_THRESHOLD; 112 ret->thresh = NO_THRESHOLD;
107 } else { 113 } else {
108 ret->current_max = 1; 114 /*
115 * For threshold-able wq, let its concurrency grow on demand.
116 * Use minimal max_active at alloc time to reduce resource
117 * usage.
118 */
119 ret->current_active = 1;
109 ret->thresh = thresh; 120 ret->thresh = thresh;
110 } 121 }
111 122
112 if (flags & WQ_HIGHPRI) 123 if (flags & WQ_HIGHPRI)
113 ret->normal_wq = alloc_workqueue("%s-%s-high", flags, 124 ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
114 ret->max_active, 125 ret->current_active, "btrfs",
115 "btrfs", name); 126 name);
116 else 127 else
117 ret->normal_wq = alloc_workqueue("%s-%s", flags, 128 ret->normal_wq = alloc_workqueue("%s-%s", flags,
118 ret->max_active, "btrfs", 129 ret->current_active, "btrfs",
119 name); 130 name);
120 if (!ret->normal_wq) { 131 if (!ret->normal_wq) {
121 kfree(ret); 132 kfree(ret);
@@ -134,7 +145,7 @@ __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
134 145
135struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name, 146struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
136 unsigned int flags, 147 unsigned int flags,
137 int max_active, 148 int limit_active,
138 int thresh) 149 int thresh)
139{ 150{
140 struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS); 151 struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
@@ -143,14 +154,14 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
143 return NULL; 154 return NULL;
144 155
145 ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI, 156 ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
146 max_active, thresh); 157 limit_active, thresh);
147 if (!ret->normal) { 158 if (!ret->normal) {
148 kfree(ret); 159 kfree(ret);
149 return NULL; 160 return NULL;
150 } 161 }
151 162
152 if (flags & WQ_HIGHPRI) { 163 if (flags & WQ_HIGHPRI) {
153 ret->high = __btrfs_alloc_workqueue(name, flags, max_active, 164 ret->high = __btrfs_alloc_workqueue(name, flags, limit_active,
154 thresh); 165 thresh);
155 if (!ret->high) { 166 if (!ret->high) {
156 __btrfs_destroy_workqueue(ret->normal); 167 __btrfs_destroy_workqueue(ret->normal);
@@ -180,7 +191,7 @@ static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
180 */ 191 */
181static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) 192static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
182{ 193{
183 int new_max_active; 194 int new_current_active;
184 long pending; 195 long pending;
185 int need_change = 0; 196 int need_change = 0;
186 197
@@ -197,7 +208,7 @@ static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
197 wq->count %= (wq->thresh / 4); 208 wq->count %= (wq->thresh / 4);
198 if (!wq->count) 209 if (!wq->count)
199 goto out; 210 goto out;
200 new_max_active = wq->current_max; 211 new_current_active = wq->current_active;
201 212
202 /* 213 /*
203 * pending may be changed later, but it's OK since we really 214 * pending may be changed later, but it's OK since we really
@@ -205,19 +216,19 @@ static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
205 */ 216 */
206 pending = atomic_read(&wq->pending); 217 pending = atomic_read(&wq->pending);
207 if (pending > wq->thresh) 218 if (pending > wq->thresh)
208 new_max_active++; 219 new_current_active++;
209 if (pending < wq->thresh / 2) 220 if (pending < wq->thresh / 2)
210 new_max_active--; 221 new_current_active--;
211 new_max_active = clamp_val(new_max_active, 1, wq->max_active); 222 new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
212 if (new_max_active != wq->current_max) { 223 if (new_current_active != wq->current_active) {
213 need_change = 1; 224 need_change = 1;
214 wq->current_max = new_max_active; 225 wq->current_active = new_current_active;
215 } 226 }
216out: 227out:
217 spin_unlock(&wq->thres_lock); 228 spin_unlock(&wq->thres_lock);
218 229
219 if (need_change) { 230 if (need_change) {
220 workqueue_set_max_active(wq->normal_wq, wq->current_max); 231 workqueue_set_max_active(wq->normal_wq, wq->current_active);
221 } 232 }
222} 233}
223 234
@@ -351,13 +362,13 @@ void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
351 kfree(wq); 362 kfree(wq);
352} 363}
353 364
354void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max) 365void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
355{ 366{
356 if (!wq) 367 if (!wq)
357 return; 368 return;
358 wq->normal->max_active = max; 369 wq->normal->limit_active = limit_active;
359 if (wq->high) 370 if (wq->high)
360 wq->high->max_active = max; 371 wq->high->limit_active = limit_active;
361} 372}
362 373
363void btrfs_set_work_high_priority(struct btrfs_work *work) 374void btrfs_set_work_high_priority(struct btrfs_work *work)
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index b0b093b6afec..ad4d0647d1a6 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -69,7 +69,7 @@ BTRFS_WORK_HELPER_PROTO(scrubparity_helper);
69 69
70struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name, 70struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
71 unsigned int flags, 71 unsigned int flags,
72 int max_active, 72 int limit_active,
73 int thresh); 73 int thresh);
74void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper, 74void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper,
75 btrfs_func_t func, 75 btrfs_func_t func,
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 81220b2203c6..0ef5cc13fae2 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -44,8 +44,6 @@
44#define BTRFS_INODE_IN_DELALLOC_LIST 9 44#define BTRFS_INODE_IN_DELALLOC_LIST 9
45#define BTRFS_INODE_READDIO_NEED_LOCK 10 45#define BTRFS_INODE_READDIO_NEED_LOCK 10
46#define BTRFS_INODE_HAS_PROPS 11 46#define BTRFS_INODE_HAS_PROPS 11
47/* DIO is ready to submit */
48#define BTRFS_INODE_DIO_READY 12
49/* 47/*
50 * The following 3 bits are meant only for the btree inode. 48 * The following 3 bits are meant only for the btree inode.
51 * When any of them is set, it means an error happened while writing an 49 * When any of them is set, it means an error happened while writing an
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 564a7de17d99..e54dd5905cee 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -183,8 +183,7 @@ no_valid_dev_replace_entry_found:
183 } 183 }
184 184
185out: 185out:
186 if (path) 186 btrfs_free_path(path);
187 btrfs_free_path(path);
188 return ret; 187 return ret;
189} 188}
190 189
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 9ebd34f1c677..295795aebe0b 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3443,6 +3443,26 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
3443 return 0; 3443 return 0;
3444} 3444}
3445 3445
3446int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3447{
3448 if ((flags & (BTRFS_BLOCK_GROUP_DUP |
3449 BTRFS_BLOCK_GROUP_RAID0 |
3450 BTRFS_AVAIL_ALLOC_BIT_SINGLE)) ||
3451 ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0))
3452 return 0;
3453
3454 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3455 BTRFS_BLOCK_GROUP_RAID5 |
3456 BTRFS_BLOCK_GROUP_RAID10))
3457 return 1;
3458
3459 if (flags & BTRFS_BLOCK_GROUP_RAID6)
3460 return 2;
3461
3462 pr_warn("BTRFS: unknown raid type: %llu\n", flags);
3463 return 0;
3464}
3465
3446int btrfs_calc_num_tolerated_disk_barrier_failures( 3466int btrfs_calc_num_tolerated_disk_barrier_failures(
3447 struct btrfs_fs_info *fs_info) 3467 struct btrfs_fs_info *fs_info)
3448{ 3468{
@@ -3452,13 +3472,12 @@ int btrfs_calc_num_tolerated_disk_barrier_failures(
3452 BTRFS_BLOCK_GROUP_SYSTEM, 3472 BTRFS_BLOCK_GROUP_SYSTEM,
3453 BTRFS_BLOCK_GROUP_METADATA, 3473 BTRFS_BLOCK_GROUP_METADATA,
3454 BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA}; 3474 BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
3455 int num_types = 4;
3456 int i; 3475 int i;
3457 int c; 3476 int c;
3458 int num_tolerated_disk_barrier_failures = 3477 int num_tolerated_disk_barrier_failures =
3459 (int)fs_info->fs_devices->num_devices; 3478 (int)fs_info->fs_devices->num_devices;
3460 3479
3461 for (i = 0; i < num_types; i++) { 3480 for (i = 0; i < ARRAY_SIZE(types); i++) {
3462 struct btrfs_space_info *tmp; 3481 struct btrfs_space_info *tmp;
3463 3482
3464 sinfo = NULL; 3483 sinfo = NULL;
@@ -3476,44 +3495,21 @@ int btrfs_calc_num_tolerated_disk_barrier_failures(
3476 3495
3477 down_read(&sinfo->groups_sem); 3496 down_read(&sinfo->groups_sem);
3478 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { 3497 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
3479 if (!list_empty(&sinfo->block_groups[c])) { 3498 u64 flags;
3480 u64 flags; 3499
3481 3500 if (list_empty(&sinfo->block_groups[c]))
3482 btrfs_get_block_group_info( 3501 continue;
3483 &sinfo->block_groups[c], &space); 3502
3484 if (space.total_bytes == 0 || 3503 btrfs_get_block_group_info(&sinfo->block_groups[c],
3485 space.used_bytes == 0) 3504 &space);
3486 continue; 3505 if (space.total_bytes == 0 || space.used_bytes == 0)
3487 flags = space.flags; 3506 continue;
3488 /* 3507 flags = space.flags;
3489 * return 3508
3490 * 0: if dup, single or RAID0 is configured for 3509 num_tolerated_disk_barrier_failures = min(
3491 * any of metadata, system or data, else 3510 num_tolerated_disk_barrier_failures,
3492 * 1: if RAID5 is configured, or if RAID1 or 3511 btrfs_get_num_tolerated_disk_barrier_failures(
3493 * RAID10 is configured and only two mirrors 3512 flags));
3494 * are used, else
3495 * 2: if RAID6 is configured, else
3496 * num_mirrors - 1: if RAID1 or RAID10 is
3497 * configured and more than
3498 * 2 mirrors are used.
3499 */
3500 if (num_tolerated_disk_barrier_failures > 0 &&
3501 ((flags & (BTRFS_BLOCK_GROUP_DUP |
3502 BTRFS_BLOCK_GROUP_RAID0)) ||
3503 ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
3504 == 0)))
3505 num_tolerated_disk_barrier_failures = 0;
3506 else if (num_tolerated_disk_barrier_failures > 1) {
3507 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3508 BTRFS_BLOCK_GROUP_RAID5 |
3509 BTRFS_BLOCK_GROUP_RAID10)) {
3510 num_tolerated_disk_barrier_failures = 1;
3511 } else if (flags &
3512 BTRFS_BLOCK_GROUP_RAID6) {
3513 num_tolerated_disk_barrier_failures = 2;
3514 }
3515 }
3516 }
3517 } 3513 }
3518 up_read(&sinfo->groups_sem); 3514 up_read(&sinfo->groups_sem);
3519 } 3515 }
@@ -3769,9 +3765,7 @@ void close_ctree(struct btrfs_root *root)
3769 * block groups queued for removal, the deletion will be 3765 * block groups queued for removal, the deletion will be
3770 * skipped when we quit the cleaner thread. 3766 * skipped when we quit the cleaner thread.
3771 */ 3767 */
3772 mutex_lock(&root->fs_info->cleaner_mutex);
3773 btrfs_delete_unused_bgs(root->fs_info); 3768 btrfs_delete_unused_bgs(root->fs_info);
3774 mutex_unlock(&root->fs_info->cleaner_mutex);
3775 3769
3776 ret = btrfs_commit_super(root); 3770 ret = btrfs_commit_super(root);
3777 if (ret) 3771 if (ret)
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index d4cbfeeeedd4..bdfb479ea859 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -139,6 +139,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
139 u64 objectid); 139 u64 objectid);
140int btree_lock_page_hook(struct page *page, void *data, 140int btree_lock_page_hook(struct page *page, void *data,
141 void (*flush_fn)(void *)); 141 void (*flush_fn)(void *));
142int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags);
142int btrfs_calc_num_tolerated_disk_barrier_failures( 143int btrfs_calc_num_tolerated_disk_barrier_failures(
143 struct btrfs_fs_info *fs_info); 144 struct btrfs_fs_info *fs_info);
144int __init btrfs_end_io_wq_init(void); 145int __init btrfs_end_io_wq_init(void);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 5411f0ab5683..9f9604201333 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3742,10 +3742,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3742 found->bytes_reserved = 0; 3742 found->bytes_reserved = 0;
3743 found->bytes_readonly = 0; 3743 found->bytes_readonly = 0;
3744 found->bytes_may_use = 0; 3744 found->bytes_may_use = 0;
3745 if (total_bytes > 0) 3745 found->full = 0;
3746 found->full = 0;
3747 else
3748 found->full = 1;
3749 found->force_alloc = CHUNK_ALLOC_NO_FORCE; 3746 found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3750 found->chunk_alloc = 0; 3747 found->chunk_alloc = 0;
3751 found->flush = 0; 3748 found->flush = 0;
@@ -8668,7 +8665,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
8668 } 8665 }
8669 8666
8670 if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) { 8667 if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8671 btrfs_drop_and_free_fs_root(tree_root->fs_info, root); 8668 btrfs_add_dropped_root(trans, root);
8672 } else { 8669 } else {
8673 free_extent_buffer(root->node); 8670 free_extent_buffer(root->node);
8674 free_extent_buffer(root->commit_root); 8671 free_extent_buffer(root->commit_root);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index f1018cfbfefa..e2357e31609a 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2798,7 +2798,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
2798 bio_end_io_t end_io_func, 2798 bio_end_io_t end_io_func,
2799 int mirror_num, 2799 int mirror_num,
2800 unsigned long prev_bio_flags, 2800 unsigned long prev_bio_flags,
2801 unsigned long bio_flags) 2801 unsigned long bio_flags,
2802 bool force_bio_submit)
2802{ 2803{
2803 int ret = 0; 2804 int ret = 0;
2804 struct bio *bio; 2805 struct bio *bio;
@@ -2814,6 +2815,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
2814 contig = bio_end_sector(bio) == sector; 2815 contig = bio_end_sector(bio) == sector;
2815 2816
2816 if (prev_bio_flags != bio_flags || !contig || 2817 if (prev_bio_flags != bio_flags || !contig ||
2818 force_bio_submit ||
2817 merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) || 2819 merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
2818 bio_add_page(bio, page, page_size, offset) < page_size) { 2820 bio_add_page(bio, page, page_size, offset) < page_size) {
2819 ret = submit_one_bio(rw, bio, mirror_num, 2821 ret = submit_one_bio(rw, bio, mirror_num,
@@ -2910,7 +2912,8 @@ static int __do_readpage(struct extent_io_tree *tree,
2910 get_extent_t *get_extent, 2912 get_extent_t *get_extent,
2911 struct extent_map **em_cached, 2913 struct extent_map **em_cached,
2912 struct bio **bio, int mirror_num, 2914 struct bio **bio, int mirror_num,
2913 unsigned long *bio_flags, int rw) 2915 unsigned long *bio_flags, int rw,
2916 u64 *prev_em_start)
2914{ 2917{
2915 struct inode *inode = page->mapping->host; 2918 struct inode *inode = page->mapping->host;
2916 u64 start = page_offset(page); 2919 u64 start = page_offset(page);
@@ -2958,6 +2961,7 @@ static int __do_readpage(struct extent_io_tree *tree,
2958 } 2961 }
2959 while (cur <= end) { 2962 while (cur <= end) {
2960 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; 2963 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2964 bool force_bio_submit = false;
2961 2965
2962 if (cur >= last_byte) { 2966 if (cur >= last_byte) {
2963 char *userpage; 2967 char *userpage;
@@ -3008,6 +3012,49 @@ static int __do_readpage(struct extent_io_tree *tree,
3008 block_start = em->block_start; 3012 block_start = em->block_start;
3009 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 3013 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3010 block_start = EXTENT_MAP_HOLE; 3014 block_start = EXTENT_MAP_HOLE;
3015
3016 /*
3017 * If we have a file range that points to a compressed extent
3018 * and it's followed by a consecutive file range that points to
3019 * to the same compressed extent (possibly with a different
3020 * offset and/or length, so it either points to the whole extent
3021 * or only part of it), we must make sure we do not submit a
3022 * single bio to populate the pages for the 2 ranges because
3023 * this makes the compressed extent read zero out the pages
3024 * belonging to the 2nd range. Imagine the following scenario:
3025 *
3026 * File layout
3027 * [0 - 8K] [8K - 24K]
3028 * | |
3029 * | |
3030 * points to extent X, points to extent X,
3031 * offset 4K, length of 8K offset 0, length 16K
3032 *
3033 * [extent X, compressed length = 4K uncompressed length = 16K]
3034 *
3035 * If the bio to read the compressed extent covers both ranges,
3036 * it will decompress extent X into the pages belonging to the
3037 * first range and then it will stop, zeroing out the remaining
3038 * pages that belong to the other range that points to extent X.
3039 * So here we make sure we submit 2 bios, one for the first
3040 * range and another one for the third range. Both will target
3041 * the same physical extent from disk, but we can't currently
3042 * make the compressed bio endio callback populate the pages
3043 * for both ranges because each compressed bio is tightly
3044 * coupled with a single extent map, and each range can have
3045 * an extent map with a different offset value relative to the
3046 * uncompressed data of our extent and different lengths. This
3047 * is a corner case so we prioritize correctness over
3048 * non-optimal behavior (submitting 2 bios for the same extent).
3049 */
3050 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3051 prev_em_start && *prev_em_start != (u64)-1 &&
3052 *prev_em_start != em->orig_start)
3053 force_bio_submit = true;
3054
3055 if (prev_em_start)
3056 *prev_em_start = em->orig_start;
3057
3011 free_extent_map(em); 3058 free_extent_map(em);
3012 em = NULL; 3059 em = NULL;
3013 3060
@@ -3057,7 +3104,8 @@ static int __do_readpage(struct extent_io_tree *tree,
3057 bdev, bio, pnr, 3104 bdev, bio, pnr,
3058 end_bio_extent_readpage, mirror_num, 3105 end_bio_extent_readpage, mirror_num,
3059 *bio_flags, 3106 *bio_flags,
3060 this_bio_flag); 3107 this_bio_flag,
3108 force_bio_submit);
3061 if (!ret) { 3109 if (!ret) {
3062 nr++; 3110 nr++;
3063 *bio_flags = this_bio_flag; 3111 *bio_flags = this_bio_flag;
@@ -3089,6 +3137,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3089 struct inode *inode; 3137 struct inode *inode;
3090 struct btrfs_ordered_extent *ordered; 3138 struct btrfs_ordered_extent *ordered;
3091 int index; 3139 int index;
3140 u64 prev_em_start = (u64)-1;
3092 3141
3093 inode = pages[0]->mapping->host; 3142 inode = pages[0]->mapping->host;
3094 while (1) { 3143 while (1) {
@@ -3104,7 +3153,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3104 3153
3105 for (index = 0; index < nr_pages; index++) { 3154 for (index = 0; index < nr_pages; index++) {
3106 __do_readpage(tree, pages[index], get_extent, em_cached, bio, 3155 __do_readpage(tree, pages[index], get_extent, em_cached, bio,
3107 mirror_num, bio_flags, rw); 3156 mirror_num, bio_flags, rw, &prev_em_start);
3108 page_cache_release(pages[index]); 3157 page_cache_release(pages[index]);
3109 } 3158 }
3110} 3159}
@@ -3172,7 +3221,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
3172 } 3221 }
3173 3222
3174 ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num, 3223 ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
3175 bio_flags, rw); 3224 bio_flags, rw, NULL);
3176 return ret; 3225 return ret;
3177} 3226}
3178 3227
@@ -3198,7 +3247,7 @@ int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
3198 int ret; 3247 int ret;
3199 3248
3200 ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num, 3249 ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
3201 &bio_flags, READ); 3250 &bio_flags, READ, NULL);
3202 if (bio) 3251 if (bio)
3203 ret = submit_one_bio(READ, bio, mirror_num, bio_flags); 3252 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3204 return ret; 3253 return ret;
@@ -3451,7 +3500,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3451 sector, iosize, pg_offset, 3500 sector, iosize, pg_offset,
3452 bdev, &epd->bio, max_nr, 3501 bdev, &epd->bio, max_nr,
3453 end_bio_extent_writepage, 3502 end_bio_extent_writepage,
3454 0, 0, 0); 3503 0, 0, 0, false);
3455 if (ret) 3504 if (ret)
3456 SetPageError(page); 3505 SetPageError(page);
3457 } 3506 }
@@ -3754,7 +3803,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3754 ret = submit_extent_page(rw, tree, wbc, p, offset >> 9, 3803 ret = submit_extent_page(rw, tree, wbc, p, offset >> 9,
3755 PAGE_CACHE_SIZE, 0, bdev, &epd->bio, 3804 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3756 -1, end_bio_extent_buffer_writepage, 3805 -1, end_bio_extent_buffer_writepage,
3757 0, epd->bio_flags, bio_flags); 3806 0, epd->bio_flags, bio_flags, false);
3758 epd->bio_flags = bio_flags; 3807 epd->bio_flags = bio_flags;
3759 if (ret) { 3808 if (ret) {
3760 set_btree_ioerr(p); 3809 set_btree_ioerr(p);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 237da012f7d0..611b66d73e80 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5084,7 +5084,8 @@ void btrfs_evict_inode(struct inode *inode)
5084 goto no_delete; 5084 goto no_delete;
5085 } 5085 }
5086 /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */ 5086 /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
5087 btrfs_wait_ordered_range(inode, 0, (u64)-1); 5087 if (!special_file(inode->i_mode))
5088 btrfs_wait_ordered_range(inode, 0, (u64)-1);
5088 5089
5089 btrfs_free_io_failure_record(inode, 0, (u64)-1); 5090 btrfs_free_io_failure_record(inode, 0, (u64)-1);
5090 5091
@@ -6909,8 +6910,7 @@ out:
6909 6910
6910 trace_btrfs_get_extent(root, em); 6911 trace_btrfs_get_extent(root, em);
6911 6912
6912 if (path) 6913 btrfs_free_path(path);
6913 btrfs_free_path(path);
6914 if (trans) { 6914 if (trans) {
6915 ret = btrfs_end_transaction(trans, root); 6915 ret = btrfs_end_transaction(trans, root);
6916 if (!err) 6916 if (!err)
@@ -7409,6 +7409,10 @@ static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
7409 return em; 7409 return em;
7410} 7410}
7411 7411
7412struct btrfs_dio_data {
7413 u64 outstanding_extents;
7414 u64 reserve;
7415};
7412 7416
7413static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, 7417static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7414 struct buffer_head *bh_result, int create) 7418 struct buffer_head *bh_result, int create)
@@ -7416,10 +7420,10 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7416 struct extent_map *em; 7420 struct extent_map *em;
7417 struct btrfs_root *root = BTRFS_I(inode)->root; 7421 struct btrfs_root *root = BTRFS_I(inode)->root;
7418 struct extent_state *cached_state = NULL; 7422 struct extent_state *cached_state = NULL;
7423 struct btrfs_dio_data *dio_data = NULL;
7419 u64 start = iblock << inode->i_blkbits; 7424 u64 start = iblock << inode->i_blkbits;
7420 u64 lockstart, lockend; 7425 u64 lockstart, lockend;
7421 u64 len = bh_result->b_size; 7426 u64 len = bh_result->b_size;
7422 u64 *outstanding_extents = NULL;
7423 int unlock_bits = EXTENT_LOCKED; 7427 int unlock_bits = EXTENT_LOCKED;
7424 int ret = 0; 7428 int ret = 0;
7425 7429
@@ -7437,7 +7441,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7437 * that anything that needs to check if there's a transction doesn't get 7441 * that anything that needs to check if there's a transction doesn't get
7438 * confused. 7442 * confused.
7439 */ 7443 */
7440 outstanding_extents = current->journal_info; 7444 dio_data = current->journal_info;
7441 current->journal_info = NULL; 7445 current->journal_info = NULL;
7442 } 7446 }
7443 7447
@@ -7569,17 +7573,18 @@ unlock:
7569 * within our reservation, otherwise we need to adjust our inode 7573 * within our reservation, otherwise we need to adjust our inode
7570 * counter appropriately. 7574 * counter appropriately.
7571 */ 7575 */
7572 if (*outstanding_extents) { 7576 if (dio_data->outstanding_extents) {
7573 (*outstanding_extents)--; 7577 (dio_data->outstanding_extents)--;
7574 } else { 7578 } else {
7575 spin_lock(&BTRFS_I(inode)->lock); 7579 spin_lock(&BTRFS_I(inode)->lock);
7576 BTRFS_I(inode)->outstanding_extents++; 7580 BTRFS_I(inode)->outstanding_extents++;
7577 spin_unlock(&BTRFS_I(inode)->lock); 7581 spin_unlock(&BTRFS_I(inode)->lock);
7578 } 7582 }
7579 7583
7580 current->journal_info = outstanding_extents;
7581 btrfs_free_reserved_data_space(inode, len); 7584 btrfs_free_reserved_data_space(inode, len);
7582 set_bit(BTRFS_INODE_DIO_READY, &BTRFS_I(inode)->runtime_flags); 7585 WARN_ON(dio_data->reserve < len);
7586 dio_data->reserve -= len;
7587 current->journal_info = dio_data;
7583 } 7588 }
7584 7589
7585 /* 7590 /*
@@ -7602,8 +7607,8 @@ unlock:
7602unlock_err: 7607unlock_err:
7603 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7608 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7604 unlock_bits, 1, 0, &cached_state, GFP_NOFS); 7609 unlock_bits, 1, 0, &cached_state, GFP_NOFS);
7605 if (outstanding_extents) 7610 if (dio_data)
7606 current->journal_info = outstanding_extents; 7611 current->journal_info = dio_data;
7607 return ret; 7612 return ret;
7608} 7613}
7609 7614
@@ -8330,7 +8335,8 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
8330{ 8335{
8331 struct file *file = iocb->ki_filp; 8336 struct file *file = iocb->ki_filp;
8332 struct inode *inode = file->f_mapping->host; 8337 struct inode *inode = file->f_mapping->host;
8333 u64 outstanding_extents = 0; 8338 struct btrfs_root *root = BTRFS_I(inode)->root;
8339 struct btrfs_dio_data dio_data = { 0 };
8334 size_t count = 0; 8340 size_t count = 0;
8335 int flags = 0; 8341 int flags = 0;
8336 bool wakeup = true; 8342 bool wakeup = true;
@@ -8368,7 +8374,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
8368 ret = btrfs_delalloc_reserve_space(inode, count); 8374 ret = btrfs_delalloc_reserve_space(inode, count);
8369 if (ret) 8375 if (ret)
8370 goto out; 8376 goto out;
8371 outstanding_extents = div64_u64(count + 8377 dio_data.outstanding_extents = div64_u64(count +
8372 BTRFS_MAX_EXTENT_SIZE - 1, 8378 BTRFS_MAX_EXTENT_SIZE - 1,
8373 BTRFS_MAX_EXTENT_SIZE); 8379 BTRFS_MAX_EXTENT_SIZE);
8374 8380
@@ -8377,7 +8383,8 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
8377 * do the accounting properly if we go over the number we 8383 * do the accounting properly if we go over the number we
8378 * originally calculated. Abuse current->journal_info for this. 8384 * originally calculated. Abuse current->journal_info for this.
8379 */ 8385 */
8380 current->journal_info = &outstanding_extents; 8386 dio_data.reserve = round_up(count, root->sectorsize);
8387 current->journal_info = &dio_data;
8381 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, 8388 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8382 &BTRFS_I(inode)->runtime_flags)) { 8389 &BTRFS_I(inode)->runtime_flags)) {
8383 inode_dio_end(inode); 8390 inode_dio_end(inode);
@@ -8392,16 +8399,9 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
8392 if (iov_iter_rw(iter) == WRITE) { 8399 if (iov_iter_rw(iter) == WRITE) {
8393 current->journal_info = NULL; 8400 current->journal_info = NULL;
8394 if (ret < 0 && ret != -EIOCBQUEUED) { 8401 if (ret < 0 && ret != -EIOCBQUEUED) {
8395 /* 8402 if (dio_data.reserve)
8396 * If the error comes from submitting stage, 8403 btrfs_delalloc_release_space(inode,
8397 * btrfs_get_blocsk_direct() has free'd data space, 8404 dio_data.reserve);
8398 * and metadata space will be handled by
8399 * finish_ordered_fn, don't do that again to make
8400 * sure bytes_may_use is correct.
8401 */
8402 if (!test_and_clear_bit(BTRFS_INODE_DIO_READY,
8403 &BTRFS_I(inode)->runtime_flags))
8404 btrfs_delalloc_release_space(inode, count);
8405 } else if (ret >= 0 && (size_t)ret < count) 8405 } else if (ret >= 0 && (size_t)ret < count)
8406 btrfs_delalloc_release_space(inode, 8406 btrfs_delalloc_release_space(inode,
8407 count - (size_t)ret); 8407 count - (size_t)ret);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 9a11db0c47ee..a39f5d1144e8 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3267,13 +3267,13 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3267 scrub_blocked_if_needed(fs_info); 3267 scrub_blocked_if_needed(fs_info);
3268 } 3268 }
3269 3269
3270 /* for raid56, we skip parity stripe */
3271 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 3270 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3272 ret = get_raid56_logic_offset(physical, num, map, 3271 ret = get_raid56_logic_offset(physical, num, map,
3273 &logical, 3272 &logical,
3274 &stripe_logical); 3273 &stripe_logical);
3275 logical += base; 3274 logical += base;
3276 if (ret) { 3275 if (ret) {
3276 /* it is parity strip */
3277 stripe_logical += base; 3277 stripe_logical += base;
3278 stripe_end = stripe_logical + increment; 3278 stripe_end = stripe_logical + increment;
3279 ret = scrub_raid56_parity(sctx, map, scrub_dev, 3279 ret = scrub_raid56_parity(sctx, map, scrub_dev,
@@ -3480,7 +3480,6 @@ out:
3480 3480
3481static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, 3481static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3482 struct btrfs_device *scrub_dev, 3482 struct btrfs_device *scrub_dev,
3483 u64 chunk_tree, u64 chunk_objectid,
3484 u64 chunk_offset, u64 length, 3483 u64 chunk_offset, u64 length,
3485 u64 dev_offset, int is_dev_replace) 3484 u64 dev_offset, int is_dev_replace)
3486{ 3485{
@@ -3531,8 +3530,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3531 struct btrfs_root *root = sctx->dev_root; 3530 struct btrfs_root *root = sctx->dev_root;
3532 struct btrfs_fs_info *fs_info = root->fs_info; 3531 struct btrfs_fs_info *fs_info = root->fs_info;
3533 u64 length; 3532 u64 length;
3534 u64 chunk_tree;
3535 u64 chunk_objectid;
3536 u64 chunk_offset; 3533 u64 chunk_offset;
3537 int ret = 0; 3534 int ret = 0;
3538 int slot; 3535 int slot;
@@ -3596,8 +3593,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3596 if (found_key.offset + length <= start) 3593 if (found_key.offset + length <= start)
3597 goto skip; 3594 goto skip;
3598 3595
3599 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3600 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3601 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 3596 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3602 3597
3603 /* 3598 /*
@@ -3630,9 +3625,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3630 dev_replace->cursor_right = found_key.offset + length; 3625 dev_replace->cursor_right = found_key.offset + length;
3631 dev_replace->cursor_left = found_key.offset; 3626 dev_replace->cursor_left = found_key.offset;
3632 dev_replace->item_needs_writeback = 1; 3627 dev_replace->item_needs_writeback = 1;
3633 ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid, 3628 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3634 chunk_offset, length, found_key.offset, 3629 found_key.offset, is_dev_replace);
3635 is_dev_replace);
3636 3630
3637 /* 3631 /*
3638 * flush, submit all pending read and write bios, afterwards 3632 * flush, submit all pending read and write bios, afterwards
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 2b07b3581781..11d1eab9234d 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1658,9 +1658,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1658 * groups on disk until we're mounted read-write again 1658 * groups on disk until we're mounted read-write again
1659 * unless we clean them up here. 1659 * unless we clean them up here.
1660 */ 1660 */
1661 mutex_lock(&root->fs_info->cleaner_mutex);
1662 btrfs_delete_unused_bgs(fs_info); 1661 btrfs_delete_unused_bgs(fs_info);
1663 mutex_unlock(&root->fs_info->cleaner_mutex);
1664 1662
1665 btrfs_dev_replace_suspend_for_unmount(fs_info); 1663 btrfs_dev_replace_suspend_for_unmount(fs_info);
1666 btrfs_scrub_cancel(fs_info); 1664 btrfs_scrub_cancel(fs_info);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 8f259b3a66b3..74bc3338418b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -117,6 +117,18 @@ static noinline void switch_commit_roots(struct btrfs_transaction *trans,
117 btrfs_unpin_free_ino(root); 117 btrfs_unpin_free_ino(root);
118 clear_btree_io_tree(&root->dirty_log_pages); 118 clear_btree_io_tree(&root->dirty_log_pages);
119 } 119 }
120
121 /* We can free old roots now. */
122 spin_lock(&trans->dropped_roots_lock);
123 while (!list_empty(&trans->dropped_roots)) {
124 root = list_first_entry(&trans->dropped_roots,
125 struct btrfs_root, root_list);
126 list_del_init(&root->root_list);
127 spin_unlock(&trans->dropped_roots_lock);
128 btrfs_drop_and_free_fs_root(fs_info, root);
129 spin_lock(&trans->dropped_roots_lock);
130 }
131 spin_unlock(&trans->dropped_roots_lock);
120 up_write(&fs_info->commit_root_sem); 132 up_write(&fs_info->commit_root_sem);
121} 133}
122 134
@@ -255,11 +267,13 @@ loop:
255 INIT_LIST_HEAD(&cur_trans->pending_ordered); 267 INIT_LIST_HEAD(&cur_trans->pending_ordered);
256 INIT_LIST_HEAD(&cur_trans->dirty_bgs); 268 INIT_LIST_HEAD(&cur_trans->dirty_bgs);
257 INIT_LIST_HEAD(&cur_trans->io_bgs); 269 INIT_LIST_HEAD(&cur_trans->io_bgs);
270 INIT_LIST_HEAD(&cur_trans->dropped_roots);
258 mutex_init(&cur_trans->cache_write_mutex); 271 mutex_init(&cur_trans->cache_write_mutex);
259 cur_trans->num_dirty_bgs = 0; 272 cur_trans->num_dirty_bgs = 0;
260 spin_lock_init(&cur_trans->dirty_bgs_lock); 273 spin_lock_init(&cur_trans->dirty_bgs_lock);
261 INIT_LIST_HEAD(&cur_trans->deleted_bgs); 274 INIT_LIST_HEAD(&cur_trans->deleted_bgs);
262 spin_lock_init(&cur_trans->deleted_bgs_lock); 275 spin_lock_init(&cur_trans->deleted_bgs_lock);
276 spin_lock_init(&cur_trans->dropped_roots_lock);
263 list_add_tail(&cur_trans->list, &fs_info->trans_list); 277 list_add_tail(&cur_trans->list, &fs_info->trans_list);
264 extent_io_tree_init(&cur_trans->dirty_pages, 278 extent_io_tree_init(&cur_trans->dirty_pages,
265 fs_info->btree_inode->i_mapping); 279 fs_info->btree_inode->i_mapping);
@@ -336,6 +350,24 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
336} 350}
337 351
338 352
353void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
354 struct btrfs_root *root)
355{
356 struct btrfs_transaction *cur_trans = trans->transaction;
357
358 /* Add ourselves to the transaction dropped list */
359 spin_lock(&cur_trans->dropped_roots_lock);
360 list_add_tail(&root->root_list, &cur_trans->dropped_roots);
361 spin_unlock(&cur_trans->dropped_roots_lock);
362
363 /* Make sure we don't try to update the root at commit time */
364 spin_lock(&root->fs_info->fs_roots_radix_lock);
365 radix_tree_tag_clear(&root->fs_info->fs_roots_radix,
366 (unsigned long)root->root_key.objectid,
367 BTRFS_ROOT_TRANS_TAG);
368 spin_unlock(&root->fs_info->fs_roots_radix_lock);
369}
370
339int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 371int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
340 struct btrfs_root *root) 372 struct btrfs_root *root)
341{ 373{
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index edc2fbc262d7..87964bf8892d 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -65,6 +65,7 @@ struct btrfs_transaction {
65 struct list_head switch_commits; 65 struct list_head switch_commits;
66 struct list_head dirty_bgs; 66 struct list_head dirty_bgs;
67 struct list_head io_bgs; 67 struct list_head io_bgs;
68 struct list_head dropped_roots;
68 u64 num_dirty_bgs; 69 u64 num_dirty_bgs;
69 70
70 /* 71 /*
@@ -76,6 +77,7 @@ struct btrfs_transaction {
76 spinlock_t dirty_bgs_lock; 77 spinlock_t dirty_bgs_lock;
77 struct list_head deleted_bgs; 78 struct list_head deleted_bgs;
78 spinlock_t deleted_bgs_lock; 79 spinlock_t deleted_bgs_lock;
80 spinlock_t dropped_roots_lock;
79 struct btrfs_delayed_ref_root delayed_refs; 81 struct btrfs_delayed_ref_root delayed_refs;
80 int aborted; 82 int aborted;
81 int dirty_bg_run; 83 int dirty_bg_run;
@@ -216,5 +218,6 @@ int btrfs_transaction_blocked(struct btrfs_fs_info *info);
216int btrfs_transaction_in_commit(struct btrfs_fs_info *info); 218int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
217void btrfs_put_transaction(struct btrfs_transaction *transaction); 219void btrfs_put_transaction(struct btrfs_transaction *transaction);
218void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info); 220void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info);
219 221void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
222 struct btrfs_root *root);
220#endif 223#endif
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
index a4b9c8b2d35a..f31db4325339 100644
--- a/fs/btrfs/tree-defrag.c
+++ b/fs/btrfs/tree-defrag.c
@@ -115,8 +115,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
115 ret = -EAGAIN; 115 ret = -EAGAIN;
116 } 116 }
117out: 117out:
118 if (path) 118 btrfs_free_path(path);
119 btrfs_free_path(path);
120 if (ret == -EAGAIN) { 119 if (ret == -EAGAIN) {
121 if (root->defrag_max.objectid > root->defrag_progress.objectid) 120 if (root->defrag_max.objectid > root->defrag_progress.objectid)
122 goto done; 121 goto done;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 76201d6f6ce4..6fc735869c18 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3585,23 +3585,10 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
3585 } while (read_seqretry(&fs_info->profiles_lock, seq)); 3585 } while (read_seqretry(&fs_info->profiles_lock, seq));
3586 3586
3587 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 3587 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3588 int num_tolerated_disk_barrier_failures; 3588 fs_info->num_tolerated_disk_barrier_failures = min(
3589 u64 target = bctl->sys.target; 3589 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info),
3590 3590 btrfs_get_num_tolerated_disk_barrier_failures(
3591 num_tolerated_disk_barrier_failures = 3591 bctl->sys.target));
3592 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3593 if (num_tolerated_disk_barrier_failures > 0 &&
3594 (target &
3595 (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3596 BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3597 num_tolerated_disk_barrier_failures = 0;
3598 else if (num_tolerated_disk_barrier_failures > 1 &&
3599 (target &
3600 (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3601 num_tolerated_disk_barrier_failures = 1;
3602
3603 fs_info->num_tolerated_disk_barrier_failures =
3604 num_tolerated_disk_barrier_failures;
3605 } 3592 }
3606 3593
3607 ret = insert_balance_item(fs_info->tree_root, bctl); 3594 ret = insert_balance_item(fs_info->tree_root, bctl);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index a268abfe60ac..9d23e788d1df 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -276,7 +276,7 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
276 for (i = 0; i < num_pages; i++) { 276 for (i = 0; i < num_pages; i++) {
277 struct page *page = osd_data->pages[i]; 277 struct page *page = osd_data->pages[i];
278 278
279 if (rc < 0) 279 if (rc < 0 && rc != ENOENT)
280 goto unlock; 280 goto unlock;
281 if (bytes < (int)PAGE_CACHE_SIZE) { 281 if (bytes < (int)PAGE_CACHE_SIZE) {
282 /* zero (remainder of) page */ 282 /* zero (remainder of) page */
@@ -717,8 +717,10 @@ static int ceph_writepages_start(struct address_space *mapping,
717 wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 717 wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
718 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 718 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
719 719
720 if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) { 720 if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
721 pr_warn("writepage_start %p on forced umount\n", inode); 721 pr_warn("writepage_start %p on forced umount\n", inode);
722 truncate_pagecache(inode, 0);
723 mapping_set_error(mapping, -EIO);
722 return -EIO; /* we're in a forced umount, don't write! */ 724 return -EIO; /* we're in a forced umount, don't write! */
723 } 725 }
724 if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize) 726 if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize)
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index ddd5e9471290..27b566874bc1 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2413,6 +2413,14 @@ again:
2413 goto out_unlock; 2413 goto out_unlock;
2414 } 2414 }
2415 2415
2416 if (!__ceph_is_any_caps(ci) &&
2417 ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2418 dout("get_cap_refs %p forced umount\n", inode);
2419 *err = -EIO;
2420 ret = 1;
2421 goto out_unlock;
2422 }
2423
2416 dout("get_cap_refs %p have %s needed %s\n", inode, 2424 dout("get_cap_refs %p have %s needed %s\n", inode,
2417 ceph_cap_string(have), ceph_cap_string(need)); 2425 ceph_cap_string(have), ceph_cap_string(need));
2418 } 2426 }
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 8b79d87eaf46..0c62868b5c56 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -136,7 +136,6 @@ int ceph_open(struct inode *inode, struct file *file)
136 struct ceph_mds_client *mdsc = fsc->mdsc; 136 struct ceph_mds_client *mdsc = fsc->mdsc;
137 struct ceph_mds_request *req; 137 struct ceph_mds_request *req;
138 struct ceph_file_info *cf = file->private_data; 138 struct ceph_file_info *cf = file->private_data;
139 struct inode *parent_inode = NULL;
140 int err; 139 int err;
141 int flags, fmode, wanted; 140 int flags, fmode, wanted;
142 141
@@ -210,10 +209,7 @@ int ceph_open(struct inode *inode, struct file *file)
210 ihold(inode); 209 ihold(inode);
211 210
212 req->r_num_caps = 1; 211 req->r_num_caps = 1;
213 if (flags & O_CREAT) 212 err = ceph_mdsc_do_request(mdsc, NULL, req);
214 parent_inode = ceph_get_dentry_parent_inode(file->f_path.dentry);
215 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
216 iput(parent_inode);
217 if (!err) 213 if (!err)
218 err = ceph_init_file(inode, file, req->r_fmode); 214 err = ceph_init_file(inode, file, req->r_fmode);
219 ceph_mdsc_put_request(req); 215 ceph_mdsc_put_request(req);
@@ -279,7 +275,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
279 if (err) 275 if (err)
280 goto out_req; 276 goto out_req;
281 277
282 if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry) 278 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
283 err = ceph_handle_notrace_create(dir, dentry); 279 err = ceph_handle_notrace_create(dir, dentry);
284 280
285 if (d_unhashed(dentry)) { 281 if (d_unhashed(dentry)) {
@@ -956,6 +952,12 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
956 /* We can write back this queue in page reclaim */ 952 /* We can write back this queue in page reclaim */
957 current->backing_dev_info = inode_to_bdi(inode); 953 current->backing_dev_info = inode_to_bdi(inode);
958 954
955 if (iocb->ki_flags & IOCB_APPEND) {
956 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
957 if (err < 0)
958 goto out;
959 }
960
959 err = generic_write_checks(iocb, from); 961 err = generic_write_checks(iocb, from);
960 if (err <= 0) 962 if (err <= 0)
961 goto out; 963 goto out;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 6aa07af67603..51cb02da75d9 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2107,7 +2107,6 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
2107 msg = create_request_message(mdsc, req, mds, drop_cap_releases); 2107 msg = create_request_message(mdsc, req, mds, drop_cap_releases);
2108 if (IS_ERR(msg)) { 2108 if (IS_ERR(msg)) {
2109 req->r_err = PTR_ERR(msg); 2109 req->r_err = PTR_ERR(msg);
2110 complete_request(mdsc, req);
2111 return PTR_ERR(msg); 2110 return PTR_ERR(msg);
2112 } 2111 }
2113 req->r_request = msg; 2112 req->r_request = msg;
@@ -2135,7 +2134,7 @@ static int __do_request(struct ceph_mds_client *mdsc,
2135{ 2134{
2136 struct ceph_mds_session *session = NULL; 2135 struct ceph_mds_session *session = NULL;
2137 int mds = -1; 2136 int mds = -1;
2138 int err = -EAGAIN; 2137 int err = 0;
2139 2138
2140 if (req->r_err || req->r_got_result) { 2139 if (req->r_err || req->r_got_result) {
2141 if (req->r_aborted) 2140 if (req->r_aborted)
@@ -2149,6 +2148,11 @@ static int __do_request(struct ceph_mds_client *mdsc,
2149 err = -EIO; 2148 err = -EIO;
2150 goto finish; 2149 goto finish;
2151 } 2150 }
2151 if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2152 dout("do_request forced umount\n");
2153 err = -EIO;
2154 goto finish;
2155 }
2152 2156
2153 put_request_session(req); 2157 put_request_session(req);
2154 2158
@@ -2196,13 +2200,15 @@ static int __do_request(struct ceph_mds_client *mdsc,
2196 2200
2197out_session: 2201out_session:
2198 ceph_put_mds_session(session); 2202 ceph_put_mds_session(session);
2203finish:
2204 if (err) {
2205 dout("__do_request early error %d\n", err);
2206 req->r_err = err;
2207 complete_request(mdsc, req);
2208 __unregister_request(mdsc, req);
2209 }
2199out: 2210out:
2200 return err; 2211 return err;
2201
2202finish:
2203 req->r_err = err;
2204 complete_request(mdsc, req);
2205 goto out;
2206} 2212}
2207 2213
2208/* 2214/*
@@ -2289,8 +2295,6 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
2289 2295
2290 if (req->r_err) { 2296 if (req->r_err) {
2291 err = req->r_err; 2297 err = req->r_err;
2292 __unregister_request(mdsc, req);
2293 dout("do_request early error %d\n", err);
2294 goto out; 2298 goto out;
2295 } 2299 }
2296 2300
@@ -2411,7 +2415,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2411 mutex_unlock(&mdsc->mutex); 2415 mutex_unlock(&mdsc->mutex);
2412 goto out; 2416 goto out;
2413 } 2417 }
2414 if (req->r_got_safe && !head->safe) { 2418 if (req->r_got_safe) {
2415 pr_warn("got unsafe after safe on %llu from mds%d\n", 2419 pr_warn("got unsafe after safe on %llu from mds%d\n",
2416 tid, mds); 2420 tid, mds);
2417 mutex_unlock(&mdsc->mutex); 2421 mutex_unlock(&mdsc->mutex);
@@ -2520,8 +2524,7 @@ out_err:
2520 if (err) { 2524 if (err) {
2521 req->r_err = err; 2525 req->r_err = err;
2522 } else { 2526 } else {
2523 req->r_reply = msg; 2527 req->r_reply = ceph_msg_get(msg);
2524 ceph_msg_get(msg);
2525 req->r_got_result = true; 2528 req->r_got_result = true;
2526 } 2529 }
2527 } else { 2530 } else {
@@ -3555,7 +3558,7 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
3555{ 3558{
3556 u64 want_tid, want_flush, want_snap; 3559 u64 want_tid, want_flush, want_snap;
3557 3560
3558 if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN) 3561 if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
3559 return; 3562 return;
3560 3563
3561 dout("sync\n"); 3564 dout("sync\n");
@@ -3584,7 +3587,7 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
3584 */ 3587 */
3585static bool done_closing_sessions(struct ceph_mds_client *mdsc) 3588static bool done_closing_sessions(struct ceph_mds_client *mdsc)
3586{ 3589{
3587 if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN) 3590 if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
3588 return true; 3591 return true;
3589 return atomic_read(&mdsc->num_sessions) == 0; 3592 return atomic_read(&mdsc->num_sessions) == 0;
3590} 3593}
@@ -3643,6 +3646,34 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
3643 dout("stopped\n"); 3646 dout("stopped\n");
3644} 3647}
3645 3648
3649void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
3650{
3651 struct ceph_mds_session *session;
3652 int mds;
3653
3654 dout("force umount\n");
3655
3656 mutex_lock(&mdsc->mutex);
3657 for (mds = 0; mds < mdsc->max_sessions; mds++) {
3658 session = __ceph_lookup_mds_session(mdsc, mds);
3659 if (!session)
3660 continue;
3661 mutex_unlock(&mdsc->mutex);
3662 mutex_lock(&session->s_mutex);
3663 __close_session(mdsc, session);
3664 if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
3665 cleanup_session_requests(mdsc, session);
3666 remove_session_caps(session);
3667 }
3668 mutex_unlock(&session->s_mutex);
3669 ceph_put_mds_session(session);
3670 mutex_lock(&mdsc->mutex);
3671 kick_requests(mdsc, mds);
3672 }
3673 __wake_requests(mdsc, &mdsc->waiting_for_map);
3674 mutex_unlock(&mdsc->mutex);
3675}
3676
3646static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) 3677static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
3647{ 3678{
3648 dout("stop\n"); 3679 dout("stop\n");
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 762757e6cebf..f575eafe2261 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -366,6 +366,7 @@ extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc,
366 366
367extern int ceph_mdsc_init(struct ceph_fs_client *fsc); 367extern int ceph_mdsc_init(struct ceph_fs_client *fsc);
368extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc); 368extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc);
369extern void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc);
369extern void ceph_mdsc_destroy(struct ceph_fs_client *fsc); 370extern void ceph_mdsc_destroy(struct ceph_fs_client *fsc);
370 371
371extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc); 372extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc);
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 233d906aec02..4aa7122a8d38 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -338,12 +338,6 @@ static int build_snap_context(struct ceph_snap_realm *realm)
338 return 0; 338 return 0;
339 } 339 }
340 340
341 if (num == 0 && realm->seq == ceph_empty_snapc->seq) {
342 ceph_get_snap_context(ceph_empty_snapc);
343 snapc = ceph_empty_snapc;
344 goto done;
345 }
346
347 /* alloc new snap context */ 341 /* alloc new snap context */
348 err = -ENOMEM; 342 err = -ENOMEM;
349 if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64)) 343 if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
@@ -381,7 +375,6 @@ static int build_snap_context(struct ceph_snap_realm *realm)
381 realm->ino, realm, snapc, snapc->seq, 375 realm->ino, realm, snapc, snapc->seq,
382 (unsigned int) snapc->num_snaps); 376 (unsigned int) snapc->num_snaps);
383 377
384done:
385 ceph_put_snap_context(realm->cached_context); 378 ceph_put_snap_context(realm->cached_context);
386 realm->cached_context = snapc; 379 realm->cached_context = snapc;
387 return 0; 380 return 0;
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 7b6bfcbf801c..f446afada328 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -708,6 +708,7 @@ static void ceph_umount_begin(struct super_block *sb)
708 if (!fsc) 708 if (!fsc)
709 return; 709 return;
710 fsc->mount_state = CEPH_MOUNT_SHUTDOWN; 710 fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
711 ceph_mdsc_force_umount(fsc->mdsc);
711 return; 712 return;
712} 713}
713 714
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 6a1119e87fbb..e739950ca084 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -325,8 +325,11 @@ cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
325static void 325static void
326cifs_show_security(struct seq_file *s, struct cifs_ses *ses) 326cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
327{ 327{
328 if (ses->sectype == Unspecified) 328 if (ses->sectype == Unspecified) {
329 if (ses->user_name == NULL)
330 seq_puts(s, ",sec=none");
329 return; 331 return;
332 }
330 333
331 seq_puts(s, ",sec="); 334 seq_puts(s, ",sec=");
332 335
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index c63f5227b681..28a77bf1d559 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -67,6 +67,12 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
67 goto out_drop_write; 67 goto out_drop_write;
68 } 68 }
69 69
70 if (src_file.file->f_op->unlocked_ioctl != cifs_ioctl) {
71 rc = -EBADF;
72 cifs_dbg(VFS, "src file seems to be from a different filesystem type\n");
73 goto out_fput;
74 }
75
70 if ((!src_file.file->private_data) || (!dst_file->private_data)) { 76 if ((!src_file.file->private_data) || (!dst_file->private_data)) {
71 rc = -EBADF; 77 rc = -EBADF;
72 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); 78 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
diff --git a/fs/dax.c b/fs/dax.c
index 93bf2f990ace..7ae6df7ea1d2 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -119,7 +119,8 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
119 size_t len; 119 size_t len;
120 if (pos == max) { 120 if (pos == max) {
121 unsigned blkbits = inode->i_blkbits; 121 unsigned blkbits = inode->i_blkbits;
122 sector_t block = pos >> blkbits; 122 long page = pos >> PAGE_SHIFT;
123 sector_t block = page << (PAGE_SHIFT - blkbits);
123 unsigned first = pos - (block << blkbits); 124 unsigned first = pos - (block << blkbits);
124 long size; 125 long size;
125 126
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 24489126f8ca..091a36444972 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1380,6 +1380,10 @@ static long writeback_chunk_size(struct bdi_writeback *wb,
1380 * Write a portion of b_io inodes which belong to @sb. 1380 * Write a portion of b_io inodes which belong to @sb.
1381 * 1381 *
1382 * Return the number of pages and/or inodes written. 1382 * Return the number of pages and/or inodes written.
1383 *
1384 * NOTE! This is called with wb->list_lock held, and will
1385 * unlock and relock that for each inode it ends up doing
1386 * IO for.
1383 */ 1387 */
1384static long writeback_sb_inodes(struct super_block *sb, 1388static long writeback_sb_inodes(struct super_block *sb,
1385 struct bdi_writeback *wb, 1389 struct bdi_writeback *wb,
@@ -1398,9 +1402,7 @@ static long writeback_sb_inodes(struct super_block *sb,
1398 unsigned long start_time = jiffies; 1402 unsigned long start_time = jiffies;
1399 long write_chunk; 1403 long write_chunk;
1400 long wrote = 0; /* count both pages and inodes */ 1404 long wrote = 0; /* count both pages and inodes */
1401 struct blk_plug plug;
1402 1405
1403 blk_start_plug(&plug);
1404 while (!list_empty(&wb->b_io)) { 1406 while (!list_empty(&wb->b_io)) {
1405 struct inode *inode = wb_inode(wb->b_io.prev); 1407 struct inode *inode = wb_inode(wb->b_io.prev);
1406 1408
@@ -1479,6 +1481,21 @@ static long writeback_sb_inodes(struct super_block *sb,
1479 wbc_detach_inode(&wbc); 1481 wbc_detach_inode(&wbc);
1480 work->nr_pages -= write_chunk - wbc.nr_to_write; 1482 work->nr_pages -= write_chunk - wbc.nr_to_write;
1481 wrote += write_chunk - wbc.nr_to_write; 1483 wrote += write_chunk - wbc.nr_to_write;
1484
1485 if (need_resched()) {
1486 /*
1487 * We're trying to balance between building up a nice
1488 * long list of IOs to improve our merge rate, and
1489 * getting those IOs out quickly for anyone throttling
1490 * in balance_dirty_pages(). cond_resched() doesn't
1491 * unplug, so get our IOs out the door before we
1492 * give up the CPU.
1493 */
1494 blk_flush_plug(current);
1495 cond_resched();
1496 }
1497
1498
1482 spin_lock(&wb->list_lock); 1499 spin_lock(&wb->list_lock);
1483 spin_lock(&inode->i_lock); 1500 spin_lock(&inode->i_lock);
1484 if (!(inode->i_state & I_DIRTY_ALL)) 1501 if (!(inode->i_state & I_DIRTY_ALL))
@@ -1486,7 +1503,7 @@ static long writeback_sb_inodes(struct super_block *sb,
1486 requeue_inode(inode, wb, &wbc); 1503 requeue_inode(inode, wb, &wbc);
1487 inode_sync_complete(inode); 1504 inode_sync_complete(inode);
1488 spin_unlock(&inode->i_lock); 1505 spin_unlock(&inode->i_lock);
1489 cond_resched_lock(&wb->list_lock); 1506
1490 /* 1507 /*
1491 * bail out to wb_writeback() often enough to check 1508 * bail out to wb_writeback() often enough to check
1492 * background threshold and other termination conditions. 1509 * background threshold and other termination conditions.
@@ -1498,7 +1515,6 @@ static long writeback_sb_inodes(struct super_block *sb,
1498 break; 1515 break;
1499 } 1516 }
1500 } 1517 }
1501 blk_finish_plug(&plug);
1502 return wrote; 1518 return wrote;
1503} 1519}
1504 1520
@@ -1545,12 +1561,15 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
1545 .range_cyclic = 1, 1561 .range_cyclic = 1,
1546 .reason = reason, 1562 .reason = reason,
1547 }; 1563 };
1564 struct blk_plug plug;
1548 1565
1566 blk_start_plug(&plug);
1549 spin_lock(&wb->list_lock); 1567 spin_lock(&wb->list_lock);
1550 if (list_empty(&wb->b_io)) 1568 if (list_empty(&wb->b_io))
1551 queue_io(wb, &work); 1569 queue_io(wb, &work);
1552 __writeback_inodes_wb(wb, &work); 1570 __writeback_inodes_wb(wb, &work);
1553 spin_unlock(&wb->list_lock); 1571 spin_unlock(&wb->list_lock);
1572 blk_finish_plug(&plug);
1554 1573
1555 return nr_pages - work.nr_pages; 1574 return nr_pages - work.nr_pages;
1556} 1575}
@@ -1578,10 +1597,12 @@ static long wb_writeback(struct bdi_writeback *wb,
1578 unsigned long oldest_jif; 1597 unsigned long oldest_jif;
1579 struct inode *inode; 1598 struct inode *inode;
1580 long progress; 1599 long progress;
1600 struct blk_plug plug;
1581 1601
1582 oldest_jif = jiffies; 1602 oldest_jif = jiffies;
1583 work->older_than_this = &oldest_jif; 1603 work->older_than_this = &oldest_jif;
1584 1604
1605 blk_start_plug(&plug);
1585 spin_lock(&wb->list_lock); 1606 spin_lock(&wb->list_lock);
1586 for (;;) { 1607 for (;;) {
1587 /* 1608 /*
@@ -1661,6 +1682,7 @@ static long wb_writeback(struct bdi_writeback *wb,
1661 } 1682 }
1662 } 1683 }
1663 spin_unlock(&wb->list_lock); 1684 spin_unlock(&wb->list_lock);
1685 blk_finish_plug(&plug);
1664 1686
1665 return nr_pages - work->nr_pages; 1687 return nr_pages - work->nr_pages;
1666} 1688}
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index a38e38f7b6fc..9bd1244caf38 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -34,6 +34,7 @@
34#include <linux/percpu.h> 34#include <linux/percpu.h>
35#include <linux/list_sort.h> 35#include <linux/list_sort.h>
36#include <linux/lockref.h> 36#include <linux/lockref.h>
37#include <linux/rhashtable.h>
37 38
38#include "gfs2.h" 39#include "gfs2.h"
39#include "incore.h" 40#include "incore.h"
@@ -50,9 +51,8 @@
50#include "trace_gfs2.h" 51#include "trace_gfs2.h"
51 52
52struct gfs2_glock_iter { 53struct gfs2_glock_iter {
53 int hash; /* hash bucket index */
54 unsigned nhash; /* Index within current bucket */
55 struct gfs2_sbd *sdp; /* incore superblock */ 54 struct gfs2_sbd *sdp; /* incore superblock */
55 struct rhashtable_iter hti; /* rhashtable iterator */
56 struct gfs2_glock *gl; /* current glock struct */ 56 struct gfs2_glock *gl; /* current glock struct */
57 loff_t last_pos; /* last position */ 57 loff_t last_pos; /* last position */
58}; 58};
@@ -70,44 +70,19 @@ static DEFINE_SPINLOCK(lru_lock);
70 70
71#define GFS2_GL_HASH_SHIFT 15 71#define GFS2_GL_HASH_SHIFT 15
72#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) 72#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
73#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
74 73
75static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE]; 74static struct rhashtable_params ht_parms = {
76static struct dentry *gfs2_root; 75 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
77 76 .key_len = sizeof(struct lm_lockname),
78/** 77 .key_offset = offsetof(struct gfs2_glock, gl_name),
79 * gl_hash() - Turn glock number into hash bucket number 78 .head_offset = offsetof(struct gfs2_glock, gl_node),
80 * @lock: The glock number 79};
81 *
82 * Returns: The number of the corresponding hash bucket
83 */
84
85static unsigned int gl_hash(const struct gfs2_sbd *sdp,
86 const struct lm_lockname *name)
87{
88 unsigned int h;
89
90 h = jhash(&name->ln_number, sizeof(u64), 0);
91 h = jhash(&name->ln_type, sizeof(unsigned int), h);
92 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
93 h &= GFS2_GL_HASH_MASK;
94
95 return h;
96}
97
98static inline void spin_lock_bucket(unsigned int hash)
99{
100 hlist_bl_lock(&gl_hash_table[hash]);
101}
102 80
103static inline void spin_unlock_bucket(unsigned int hash) 81static struct rhashtable gl_hash_table;
104{
105 hlist_bl_unlock(&gl_hash_table[hash]);
106}
107 82
108static void gfs2_glock_dealloc(struct rcu_head *rcu) 83void gfs2_glock_free(struct gfs2_glock *gl)
109{ 84{
110 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); 85 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
111 86
112 if (gl->gl_ops->go_flags & GLOF_ASPACE) { 87 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
113 kmem_cache_free(gfs2_glock_aspace_cachep, gl); 88 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
@@ -115,13 +90,6 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
115 kfree(gl->gl_lksb.sb_lvbptr); 90 kfree(gl->gl_lksb.sb_lvbptr);
116 kmem_cache_free(gfs2_glock_cachep, gl); 91 kmem_cache_free(gfs2_glock_cachep, gl);
117 } 92 }
118}
119
120void gfs2_glock_free(struct gfs2_glock *gl)
121{
122 struct gfs2_sbd *sdp = gl->gl_sbd;
123
124 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
125 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 93 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
126 wake_up(&sdp->sd_glock_wait); 94 wake_up(&sdp->sd_glock_wait);
127} 95}
@@ -192,7 +160,7 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
192 160
193void gfs2_glock_put(struct gfs2_glock *gl) 161void gfs2_glock_put(struct gfs2_glock *gl)
194{ 162{
195 struct gfs2_sbd *sdp = gl->gl_sbd; 163 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
196 struct address_space *mapping = gfs2_glock2aspace(gl); 164 struct address_space *mapping = gfs2_glock2aspace(gl);
197 165
198 if (lockref_put_or_lock(&gl->gl_lockref)) 166 if (lockref_put_or_lock(&gl->gl_lockref))
@@ -202,9 +170,7 @@ void gfs2_glock_put(struct gfs2_glock *gl)
202 170
203 gfs2_glock_remove_from_lru(gl); 171 gfs2_glock_remove_from_lru(gl);
204 spin_unlock(&gl->gl_lockref.lock); 172 spin_unlock(&gl->gl_lockref.lock);
205 spin_lock_bucket(gl->gl_hash); 173 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
206 hlist_bl_del_rcu(&gl->gl_list);
207 spin_unlock_bucket(gl->gl_hash);
208 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 174 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
209 GLOCK_BUG_ON(gl, mapping && mapping->nrpages); 175 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
210 trace_gfs2_glock_put(gl); 176 trace_gfs2_glock_put(gl);
@@ -212,33 +178,6 @@ void gfs2_glock_put(struct gfs2_glock *gl)
212} 178}
213 179
214/** 180/**
215 * search_bucket() - Find struct gfs2_glock by lock number
216 * @bucket: the bucket to search
217 * @name: The lock name
218 *
219 * Returns: NULL, or the struct gfs2_glock with the requested number
220 */
221
222static struct gfs2_glock *search_bucket(unsigned int hash,
223 const struct gfs2_sbd *sdp,
224 const struct lm_lockname *name)
225{
226 struct gfs2_glock *gl;
227 struct hlist_bl_node *h;
228
229 hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
230 if (!lm_name_equal(&gl->gl_name, name))
231 continue;
232 if (gl->gl_sbd != sdp)
233 continue;
234 if (lockref_get_not_dead(&gl->gl_lockref))
235 return gl;
236 }
237
238 return NULL;
239}
240
241/**
242 * may_grant - check if its ok to grant a new lock 181 * may_grant - check if its ok to grant a new lock
243 * @gl: The glock 182 * @gl: The glock
244 * @gh: The lock request which we wish to grant 183 * @gh: The lock request which we wish to grant
@@ -506,7 +445,7 @@ __releases(&gl->gl_spin)
506__acquires(&gl->gl_spin) 445__acquires(&gl->gl_spin)
507{ 446{
508 const struct gfs2_glock_operations *glops = gl->gl_ops; 447 const struct gfs2_glock_operations *glops = gl->gl_ops;
509 struct gfs2_sbd *sdp = gl->gl_sbd; 448 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
510 unsigned int lck_flags = gh ? gh->gh_flags : 0; 449 unsigned int lck_flags = gh ? gh->gh_flags : 0;
511 int ret; 450 int ret;
512 451
@@ -628,7 +567,7 @@ out_unlock:
628static void delete_work_func(struct work_struct *work) 567static void delete_work_func(struct work_struct *work)
629{ 568{
630 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); 569 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
631 struct gfs2_sbd *sdp = gl->gl_sbd; 570 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
632 struct gfs2_inode *ip; 571 struct gfs2_inode *ip;
633 struct inode *inode; 572 struct inode *inode;
634 u64 no_addr = gl->gl_name.ln_number; 573 u64 no_addr = gl->gl_name.ln_number;
@@ -704,15 +643,17 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
704 struct gfs2_glock **glp) 643 struct gfs2_glock **glp)
705{ 644{
706 struct super_block *s = sdp->sd_vfs; 645 struct super_block *s = sdp->sd_vfs;
707 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type }; 646 struct lm_lockname name = { .ln_number = number,
708 struct gfs2_glock *gl, *tmp; 647 .ln_type = glops->go_type,
709 unsigned int hash = gl_hash(sdp, &name); 648 .ln_sbd = sdp };
649 struct gfs2_glock *gl, *tmp = NULL;
710 struct address_space *mapping; 650 struct address_space *mapping;
711 struct kmem_cache *cachep; 651 struct kmem_cache *cachep;
652 int ret, tries = 0;
712 653
713 rcu_read_lock(); 654 gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
714 gl = search_bucket(hash, sdp, &name); 655 if (gl && !lockref_get_not_dead(&gl->gl_lockref))
715 rcu_read_unlock(); 656 gl = NULL;
716 657
717 *glp = gl; 658 *glp = gl;
718 if (gl) 659 if (gl)
@@ -739,14 +680,13 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
739 } 680 }
740 681
741 atomic_inc(&sdp->sd_glock_disposal); 682 atomic_inc(&sdp->sd_glock_disposal);
742 gl->gl_sbd = sdp; 683 gl->gl_node.next = NULL;
743 gl->gl_flags = 0; 684 gl->gl_flags = 0;
744 gl->gl_name = name; 685 gl->gl_name = name;
745 gl->gl_lockref.count = 1; 686 gl->gl_lockref.count = 1;
746 gl->gl_state = LM_ST_UNLOCKED; 687 gl->gl_state = LM_ST_UNLOCKED;
747 gl->gl_target = LM_ST_UNLOCKED; 688 gl->gl_target = LM_ST_UNLOCKED;
748 gl->gl_demote_state = LM_ST_EXCLUSIVE; 689 gl->gl_demote_state = LM_ST_EXCLUSIVE;
749 gl->gl_hash = hash;
750 gl->gl_ops = glops; 690 gl->gl_ops = glops;
751 gl->gl_dstamp = ktime_set(0, 0); 691 gl->gl_dstamp = ktime_set(0, 0);
752 preempt_disable(); 692 preempt_disable();
@@ -771,22 +711,34 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
771 mapping->writeback_index = 0; 711 mapping->writeback_index = 0;
772 } 712 }
773 713
774 spin_lock_bucket(hash); 714again:
775 tmp = search_bucket(hash, sdp, &name); 715 ret = rhashtable_lookup_insert_fast(&gl_hash_table, &gl->gl_node,
776 if (tmp) { 716 ht_parms);
777 spin_unlock_bucket(hash); 717 if (ret == 0) {
778 kfree(gl->gl_lksb.sb_lvbptr); 718 *glp = gl;
779 kmem_cache_free(cachep, gl); 719 return 0;
780 atomic_dec(&sdp->sd_glock_disposal);
781 gl = tmp;
782 } else {
783 hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
784 spin_unlock_bucket(hash);
785 } 720 }
786 721
787 *glp = gl; 722 if (ret == -EEXIST) {
723 ret = 0;
724 tmp = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
725 if (tmp == NULL || !lockref_get_not_dead(&tmp->gl_lockref)) {
726 if (++tries < 100) {
727 cond_resched();
728 goto again;
729 }
730 tmp = NULL;
731 ret = -ENOMEM;
732 }
733 } else {
734 WARN_ON_ONCE(ret);
735 }
736 kfree(gl->gl_lksb.sb_lvbptr);
737 kmem_cache_free(cachep, gl);
738 atomic_dec(&sdp->sd_glock_disposal);
739 *glp = tmp;
788 740
789 return 0; 741 return ret;
790} 742}
791 743
792/** 744/**
@@ -928,7 +880,7 @@ __releases(&gl->gl_spin)
928__acquires(&gl->gl_spin) 880__acquires(&gl->gl_spin)
929{ 881{
930 struct gfs2_glock *gl = gh->gh_gl; 882 struct gfs2_glock *gl = gh->gh_gl;
931 struct gfs2_sbd *sdp = gl->gl_sbd; 883 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
932 struct list_head *insert_pt = NULL; 884 struct list_head *insert_pt = NULL;
933 struct gfs2_holder *gh2; 885 struct gfs2_holder *gh2;
934 int try_futile = 0; 886 int try_futile = 0;
@@ -1006,7 +958,7 @@ trap_recursive:
1006int gfs2_glock_nq(struct gfs2_holder *gh) 958int gfs2_glock_nq(struct gfs2_holder *gh)
1007{ 959{
1008 struct gfs2_glock *gl = gh->gh_gl; 960 struct gfs2_glock *gl = gh->gh_gl;
1009 struct gfs2_sbd *sdp = gl->gl_sbd; 961 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1010 int error = 0; 962 int error = 0;
1011 963
1012 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 964 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
@@ -1313,7 +1265,7 @@ static int gfs2_should_freeze(const struct gfs2_glock *gl)
1313 1265
1314void gfs2_glock_complete(struct gfs2_glock *gl, int ret) 1266void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1315{ 1267{
1316 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; 1268 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
1317 1269
1318 spin_lock(&gl->gl_spin); 1270 spin_lock(&gl->gl_spin);
1319 gl->gl_reply = ret; 1271 gl->gl_reply = ret;
@@ -1462,31 +1414,26 @@ static struct shrinker glock_shrinker = {
1462 * 1414 *
1463 */ 1415 */
1464 1416
1465static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp, 1417static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1466 unsigned int hash)
1467{ 1418{
1468 struct gfs2_glock *gl; 1419 struct gfs2_glock *gl;
1469 struct hlist_bl_head *head = &gl_hash_table[hash]; 1420 struct rhash_head *pos, *next;
1470 struct hlist_bl_node *pos; 1421 const struct bucket_table *tbl;
1422 int i;
1471 1423
1472 rcu_read_lock(); 1424 rcu_read_lock();
1473 hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) { 1425 tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table);
1474 if ((gl->gl_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref)) 1426 for (i = 0; i < tbl->size; i++) {
1475 examiner(gl); 1427 rht_for_each_entry_safe(gl, pos, next, tbl, i, gl_node) {
1428 if ((gl->gl_name.ln_sbd == sdp) &&
1429 lockref_get_not_dead(&gl->gl_lockref))
1430 examiner(gl);
1431 }
1476 } 1432 }
1477 rcu_read_unlock(); 1433 rcu_read_unlock();
1478 cond_resched(); 1434 cond_resched();
1479} 1435}
1480 1436
1481static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1482{
1483 unsigned x;
1484
1485 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1486 examine_bucket(examiner, sdp, x);
1487}
1488
1489
1490/** 1437/**
1491 * thaw_glock - thaw out a glock which has an unprocessed reply waiting 1438 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1492 * @gl: The glock to thaw 1439 * @gl: The glock to thaw
@@ -1569,7 +1516,7 @@ void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1569 int ret; 1516 int ret;
1570 1517
1571 ret = gfs2_truncatei_resume(ip); 1518 ret = gfs2_truncatei_resume(ip);
1572 gfs2_assert_withdraw(gl->gl_sbd, ret == 0); 1519 gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
1573 1520
1574 spin_lock(&gl->gl_spin); 1521 spin_lock(&gl->gl_spin);
1575 clear_bit(GLF_LOCK, &gl->gl_flags); 1522 clear_bit(GLF_LOCK, &gl->gl_flags);
@@ -1733,17 +1680,17 @@ static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1733{ 1680{
1734 struct gfs2_glock *gl = iter_ptr; 1681 struct gfs2_glock *gl = iter_ptr;
1735 1682
1736 seq_printf(seq, "G: n:%u/%llx rtt:%lld/%lld rttb:%lld/%lld irt:%lld/%lld dcnt: %lld qcnt: %lld\n", 1683 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
1737 gl->gl_name.ln_type, 1684 gl->gl_name.ln_type,
1738 (unsigned long long)gl->gl_name.ln_number, 1685 (unsigned long long)gl->gl_name.ln_number,
1739 (long long)gl->gl_stats.stats[GFS2_LKS_SRTT], 1686 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1740 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], 1687 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1741 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], 1688 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1742 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], 1689 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1743 (long long)gl->gl_stats.stats[GFS2_LKS_SIRT], 1690 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1744 (long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], 1691 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1745 (long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], 1692 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1746 (long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); 1693 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1747 return 0; 1694 return 0;
1748} 1695}
1749 1696
@@ -1776,11 +1723,10 @@ static const char *gfs2_stype[] = {
1776 1723
1777static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr) 1724static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1778{ 1725{
1779 struct gfs2_glock_iter *gi = seq->private; 1726 struct gfs2_sbd *sdp = seq->private;
1780 struct gfs2_sbd *sdp = gi->sdp; 1727 loff_t pos = *(loff_t *)iter_ptr;
1781 unsigned index = gi->hash >> 3; 1728 unsigned index = pos >> 3;
1782 unsigned subindex = gi->hash & 0x07; 1729 unsigned subindex = pos & 0x07;
1783 s64 value;
1784 int i; 1730 int i;
1785 1731
1786 if (index == 0 && subindex != 0) 1732 if (index == 0 && subindex != 0)
@@ -1791,12 +1737,12 @@ static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1791 1737
1792 for_each_possible_cpu(i) { 1738 for_each_possible_cpu(i) {
1793 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i); 1739 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1794 if (index == 0) { 1740
1795 value = i; 1741 if (index == 0)
1796 } else { 1742 seq_printf(seq, " %15u", i);
1797 value = lkstats->lkstats[index - 1].stats[subindex]; 1743 else
1798 } 1744 seq_printf(seq, " %15llu", (unsigned long long)lkstats->
1799 seq_printf(seq, " %15lld", (long long)value); 1745 lkstats[index - 1].stats[subindex]);
1800 } 1746 }
1801 seq_putc(seq, '\n'); 1747 seq_putc(seq, '\n');
1802 return 0; 1748 return 0;
@@ -1804,20 +1750,24 @@ static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1804 1750
1805int __init gfs2_glock_init(void) 1751int __init gfs2_glock_init(void)
1806{ 1752{
1807 unsigned i; 1753 int ret;
1808 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) { 1754
1809 INIT_HLIST_BL_HEAD(&gl_hash_table[i]); 1755 ret = rhashtable_init(&gl_hash_table, &ht_parms);
1810 } 1756 if (ret < 0)
1757 return ret;
1811 1758
1812 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | 1759 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1813 WQ_HIGHPRI | WQ_FREEZABLE, 0); 1760 WQ_HIGHPRI | WQ_FREEZABLE, 0);
1814 if (!glock_workqueue) 1761 if (!glock_workqueue) {
1762 rhashtable_destroy(&gl_hash_table);
1815 return -ENOMEM; 1763 return -ENOMEM;
1764 }
1816 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", 1765 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1817 WQ_MEM_RECLAIM | WQ_FREEZABLE, 1766 WQ_MEM_RECLAIM | WQ_FREEZABLE,
1818 0); 1767 0);
1819 if (!gfs2_delete_workqueue) { 1768 if (!gfs2_delete_workqueue) {
1820 destroy_workqueue(glock_workqueue); 1769 destroy_workqueue(glock_workqueue);
1770 rhashtable_destroy(&gl_hash_table);
1821 return -ENOMEM; 1771 return -ENOMEM;
1822 } 1772 }
1823 1773
@@ -1829,72 +1779,41 @@ int __init gfs2_glock_init(void)
1829void gfs2_glock_exit(void) 1779void gfs2_glock_exit(void)
1830{ 1780{
1831 unregister_shrinker(&glock_shrinker); 1781 unregister_shrinker(&glock_shrinker);
1782 rhashtable_destroy(&gl_hash_table);
1832 destroy_workqueue(glock_workqueue); 1783 destroy_workqueue(glock_workqueue);
1833 destroy_workqueue(gfs2_delete_workqueue); 1784 destroy_workqueue(gfs2_delete_workqueue);
1834} 1785}
1835 1786
1836static inline struct gfs2_glock *glock_hash_chain(unsigned hash) 1787static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1837{ 1788{
1838 return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
1839 struct gfs2_glock, gl_list);
1840}
1841
1842static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
1843{
1844 return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
1845 struct gfs2_glock, gl_list);
1846}
1847
1848static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1849{
1850 struct gfs2_glock *gl;
1851
1852 do { 1789 do {
1853 gl = gi->gl; 1790 gi->gl = rhashtable_walk_next(&gi->hti);
1854 if (gl) { 1791 if (IS_ERR(gi->gl)) {
1855 gi->gl = glock_hash_next(gl); 1792 if (PTR_ERR(gi->gl) == -EAGAIN)
1856 gi->nhash++; 1793 continue;
1857 } else { 1794 gi->gl = NULL;
1858 if (gi->hash >= GFS2_GL_HASH_SIZE) {
1859 rcu_read_unlock();
1860 return 1;
1861 }
1862 gi->gl = glock_hash_chain(gi->hash);
1863 gi->nhash = 0;
1864 }
1865 while (gi->gl == NULL) {
1866 gi->hash++;
1867 if (gi->hash >= GFS2_GL_HASH_SIZE) {
1868 rcu_read_unlock();
1869 return 1;
1870 }
1871 gi->gl = glock_hash_chain(gi->hash);
1872 gi->nhash = 0;
1873 } 1795 }
1874 /* Skip entries for other sb and dead entries */ 1796 /* Skip entries for other sb and dead entries */
1875 } while (gi->sdp != gi->gl->gl_sbd || 1797 } while ((gi->gl) && ((gi->sdp != gi->gl->gl_name.ln_sbd) ||
1876 __lockref_is_dead(&gi->gl->gl_lockref)); 1798 __lockref_is_dead(&gi->gl->gl_lockref)));
1877
1878 return 0;
1879} 1799}
1880 1800
1881static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) 1801static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1882{ 1802{
1883 struct gfs2_glock_iter *gi = seq->private; 1803 struct gfs2_glock_iter *gi = seq->private;
1884 loff_t n = *pos; 1804 loff_t n = *pos;
1805 int ret;
1885 1806
1886 if (gi->last_pos <= *pos) 1807 if (gi->last_pos <= *pos)
1887 n = gi->nhash + (*pos - gi->last_pos); 1808 n = (*pos - gi->last_pos);
1888 else
1889 gi->hash = 0;
1890 1809
1891 gi->nhash = 0; 1810 ret = rhashtable_walk_start(&gi->hti);
1892 rcu_read_lock(); 1811 if (ret)
1812 return NULL;
1893 1813
1894 do { 1814 do {
1895 if (gfs2_glock_iter_next(gi)) 1815 gfs2_glock_iter_next(gi);
1896 return NULL; 1816 } while (gi->gl && n--);
1897 } while (n--);
1898 1817
1899 gi->last_pos = *pos; 1818 gi->last_pos = *pos;
1900 return gi->gl; 1819 return gi->gl;
@@ -1907,9 +1826,7 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1907 1826
1908 (*pos)++; 1827 (*pos)++;
1909 gi->last_pos = *pos; 1828 gi->last_pos = *pos;
1910 if (gfs2_glock_iter_next(gi)) 1829 gfs2_glock_iter_next(gi);
1911 return NULL;
1912
1913 return gi->gl; 1830 return gi->gl;
1914} 1831}
1915 1832
@@ -1917,9 +1834,8 @@ static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1917{ 1834{
1918 struct gfs2_glock_iter *gi = seq->private; 1835 struct gfs2_glock_iter *gi = seq->private;
1919 1836
1920 if (gi->gl)
1921 rcu_read_unlock();
1922 gi->gl = NULL; 1837 gi->gl = NULL;
1838 rhashtable_walk_stop(&gi->hti);
1923} 1839}
1924 1840
1925static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) 1841static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
@@ -1930,26 +1846,19 @@ static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1930 1846
1931static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos) 1847static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
1932{ 1848{
1933 struct gfs2_glock_iter *gi = seq->private; 1849 preempt_disable();
1934
1935 gi->hash = *pos;
1936 if (*pos >= GFS2_NR_SBSTATS) 1850 if (*pos >= GFS2_NR_SBSTATS)
1937 return NULL; 1851 return NULL;
1938 preempt_disable(); 1852 return pos;
1939 return SEQ_START_TOKEN;
1940} 1853}
1941 1854
1942static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr, 1855static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
1943 loff_t *pos) 1856 loff_t *pos)
1944{ 1857{
1945 struct gfs2_glock_iter *gi = seq->private;
1946 (*pos)++; 1858 (*pos)++;
1947 gi->hash++; 1859 if (*pos >= GFS2_NR_SBSTATS)
1948 if (gi->hash >= GFS2_NR_SBSTATS) {
1949 preempt_enable();
1950 return NULL; 1860 return NULL;
1951 } 1861 return pos;
1952 return SEQ_START_TOKEN;
1953} 1862}
1954 1863
1955static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr) 1864static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
@@ -1987,14 +1896,28 @@ static int gfs2_glocks_open(struct inode *inode, struct file *file)
1987 if (ret == 0) { 1896 if (ret == 0) {
1988 struct seq_file *seq = file->private_data; 1897 struct seq_file *seq = file->private_data;
1989 struct gfs2_glock_iter *gi = seq->private; 1898 struct gfs2_glock_iter *gi = seq->private;
1899
1990 gi->sdp = inode->i_private; 1900 gi->sdp = inode->i_private;
1901 gi->last_pos = 0;
1991 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN); 1902 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
1992 if (seq->buf) 1903 if (seq->buf)
1993 seq->size = GFS2_SEQ_GOODSIZE; 1904 seq->size = GFS2_SEQ_GOODSIZE;
1905 gi->gl = NULL;
1906 ret = rhashtable_walk_init(&gl_hash_table, &gi->hti);
1994 } 1907 }
1995 return ret; 1908 return ret;
1996} 1909}
1997 1910
1911static int gfs2_glocks_release(struct inode *inode, struct file *file)
1912{
1913 struct seq_file *seq = file->private_data;
1914 struct gfs2_glock_iter *gi = seq->private;
1915
1916 gi->gl = NULL;
1917 rhashtable_walk_exit(&gi->hti);
1918 return seq_release_private(inode, file);
1919}
1920
1998static int gfs2_glstats_open(struct inode *inode, struct file *file) 1921static int gfs2_glstats_open(struct inode *inode, struct file *file)
1999{ 1922{
2000 int ret = seq_open_private(file, &gfs2_glstats_seq_ops, 1923 int ret = seq_open_private(file, &gfs2_glstats_seq_ops,
@@ -2003,21 +1926,22 @@ static int gfs2_glstats_open(struct inode *inode, struct file *file)
2003 struct seq_file *seq = file->private_data; 1926 struct seq_file *seq = file->private_data;
2004 struct gfs2_glock_iter *gi = seq->private; 1927 struct gfs2_glock_iter *gi = seq->private;
2005 gi->sdp = inode->i_private; 1928 gi->sdp = inode->i_private;
1929 gi->last_pos = 0;
2006 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN); 1930 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2007 if (seq->buf) 1931 if (seq->buf)
2008 seq->size = GFS2_SEQ_GOODSIZE; 1932 seq->size = GFS2_SEQ_GOODSIZE;
1933 gi->gl = NULL;
1934 ret = rhashtable_walk_init(&gl_hash_table, &gi->hti);
2009 } 1935 }
2010 return ret; 1936 return ret;
2011} 1937}
2012 1938
2013static int gfs2_sbstats_open(struct inode *inode, struct file *file) 1939static int gfs2_sbstats_open(struct inode *inode, struct file *file)
2014{ 1940{
2015 int ret = seq_open_private(file, &gfs2_sbstats_seq_ops, 1941 int ret = seq_open(file, &gfs2_sbstats_seq_ops);
2016 sizeof(struct gfs2_glock_iter));
2017 if (ret == 0) { 1942 if (ret == 0) {
2018 struct seq_file *seq = file->private_data; 1943 struct seq_file *seq = file->private_data;
2019 struct gfs2_glock_iter *gi = seq->private; 1944 seq->private = inode->i_private; /* sdp */
2020 gi->sdp = inode->i_private;
2021 } 1945 }
2022 return ret; 1946 return ret;
2023} 1947}
@@ -2027,7 +1951,7 @@ static const struct file_operations gfs2_glocks_fops = {
2027 .open = gfs2_glocks_open, 1951 .open = gfs2_glocks_open,
2028 .read = seq_read, 1952 .read = seq_read,
2029 .llseek = seq_lseek, 1953 .llseek = seq_lseek,
2030 .release = seq_release_private, 1954 .release = gfs2_glocks_release,
2031}; 1955};
2032 1956
2033static const struct file_operations gfs2_glstats_fops = { 1957static const struct file_operations gfs2_glstats_fops = {
@@ -2035,7 +1959,7 @@ static const struct file_operations gfs2_glstats_fops = {
2035 .open = gfs2_glstats_open, 1959 .open = gfs2_glstats_open,
2036 .read = seq_read, 1960 .read = seq_read,
2037 .llseek = seq_lseek, 1961 .llseek = seq_lseek,
2038 .release = seq_release_private, 1962 .release = gfs2_glocks_release,
2039}; 1963};
2040 1964
2041static const struct file_operations gfs2_sbstats_fops = { 1965static const struct file_operations gfs2_sbstats_fops = {
@@ -2043,7 +1967,7 @@ static const struct file_operations gfs2_sbstats_fops = {
2043 .open = gfs2_sbstats_open, 1967 .open = gfs2_sbstats_open,
2044 .read = seq_read, 1968 .read = seq_read,
2045 .llseek = seq_lseek, 1969 .llseek = seq_lseek,
2046 .release = seq_release_private, 1970 .release = seq_release,
2047}; 1971};
2048 1972
2049int gfs2_create_debugfs_file(struct gfs2_sbd *sdp) 1973int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index fa3fa5e94553..1f6c9c3fe5cb 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -32,13 +32,15 @@ struct workqueue_struct *gfs2_freeze_wq;
32 32
33static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) 33static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
34{ 34{
35 fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n", 35 fs_err(gl->gl_name.ln_sbd,
36 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
37 "state 0x%lx\n",
36 bh, (unsigned long long)bh->b_blocknr, bh->b_state, 38 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
37 bh->b_page->mapping, bh->b_page->flags); 39 bh->b_page->mapping, bh->b_page->flags);
38 fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n", 40 fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
39 gl->gl_name.ln_type, gl->gl_name.ln_number, 41 gl->gl_name.ln_type, gl->gl_name.ln_number,
40 gfs2_glock2aspace(gl)); 42 gfs2_glock2aspace(gl));
41 gfs2_lm_withdraw(gl->gl_sbd, "AIL error\n"); 43 gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n");
42} 44}
43 45
44/** 46/**
@@ -52,7 +54,7 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
52static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, 54static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
53 unsigned int nr_revokes) 55 unsigned int nr_revokes)
54{ 56{
55 struct gfs2_sbd *sdp = gl->gl_sbd; 57 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
56 struct list_head *head = &gl->gl_ail_list; 58 struct list_head *head = &gl->gl_ail_list;
57 struct gfs2_bufdata *bd, *tmp; 59 struct gfs2_bufdata *bd, *tmp;
58 struct buffer_head *bh; 60 struct buffer_head *bh;
@@ -80,7 +82,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
80 82
81static void gfs2_ail_empty_gl(struct gfs2_glock *gl) 83static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
82{ 84{
83 struct gfs2_sbd *sdp = gl->gl_sbd; 85 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
84 struct gfs2_trans tr; 86 struct gfs2_trans tr;
85 87
86 memset(&tr, 0, sizeof(tr)); 88 memset(&tr, 0, sizeof(tr));
@@ -109,7 +111,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
109 111
110void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) 112void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
111{ 113{
112 struct gfs2_sbd *sdp = gl->gl_sbd; 114 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
113 unsigned int revokes = atomic_read(&gl->gl_ail_count); 115 unsigned int revokes = atomic_read(&gl->gl_ail_count);
114 unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); 116 unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
115 int ret; 117 int ret;
@@ -139,7 +141,7 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
139 141
140static void rgrp_go_sync(struct gfs2_glock *gl) 142static void rgrp_go_sync(struct gfs2_glock *gl)
141{ 143{
142 struct gfs2_sbd *sdp = gl->gl_sbd; 144 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
143 struct address_space *mapping = &sdp->sd_aspace; 145 struct address_space *mapping = &sdp->sd_aspace;
144 struct gfs2_rgrpd *rgd; 146 struct gfs2_rgrpd *rgd;
145 int error; 147 int error;
@@ -179,7 +181,7 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
179 181
180static void rgrp_go_inval(struct gfs2_glock *gl, int flags) 182static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
181{ 183{
182 struct gfs2_sbd *sdp = gl->gl_sbd; 184 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
183 struct address_space *mapping = &sdp->sd_aspace; 185 struct address_space *mapping = &sdp->sd_aspace;
184 struct gfs2_rgrpd *rgd = gl->gl_object; 186 struct gfs2_rgrpd *rgd = gl->gl_object;
185 187
@@ -218,7 +220,7 @@ static void inode_go_sync(struct gfs2_glock *gl)
218 220
219 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 221 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
220 222
221 gfs2_log_flush(gl->gl_sbd, gl, NORMAL_FLUSH); 223 gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH);
222 filemap_fdatawrite(metamapping); 224 filemap_fdatawrite(metamapping);
223 if (ip) { 225 if (ip) {
224 struct address_space *mapping = ip->i_inode.i_mapping; 226 struct address_space *mapping = ip->i_inode.i_mapping;
@@ -252,7 +254,7 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
252{ 254{
253 struct gfs2_inode *ip = gl->gl_object; 255 struct gfs2_inode *ip = gl->gl_object;
254 256
255 gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); 257 gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
256 258
257 if (flags & DIO_METADATA) { 259 if (flags & DIO_METADATA) {
258 struct address_space *mapping = gfs2_glock2aspace(gl); 260 struct address_space *mapping = gfs2_glock2aspace(gl);
@@ -264,9 +266,9 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
264 } 266 }
265 } 267 }
266 268
267 if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) { 269 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
268 gfs2_log_flush(gl->gl_sbd, NULL, NORMAL_FLUSH); 270 gfs2_log_flush(gl->gl_name.ln_sbd, NULL, NORMAL_FLUSH);
269 gl->gl_sbd->sd_rindex_uptodate = 0; 271 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
270 } 272 }
271 if (ip && S_ISREG(ip->i_inode.i_mode)) 273 if (ip && S_ISREG(ip->i_inode.i_mode))
272 truncate_inode_pages(ip->i_inode.i_mapping, 0); 274 truncate_inode_pages(ip->i_inode.i_mapping, 0);
@@ -281,7 +283,7 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
281 283
282static int inode_go_demote_ok(const struct gfs2_glock *gl) 284static int inode_go_demote_ok(const struct gfs2_glock *gl)
283{ 285{
284 struct gfs2_sbd *sdp = gl->gl_sbd; 286 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
285 struct gfs2_holder *gh; 287 struct gfs2_holder *gh;
286 288
287 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) 289 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
@@ -416,7 +418,7 @@ int gfs2_inode_refresh(struct gfs2_inode *ip)
416static int inode_go_lock(struct gfs2_holder *gh) 418static int inode_go_lock(struct gfs2_holder *gh)
417{ 419{
418 struct gfs2_glock *gl = gh->gh_gl; 420 struct gfs2_glock *gl = gh->gh_gl;
419 struct gfs2_sbd *sdp = gl->gl_sbd; 421 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
420 struct gfs2_inode *ip = gl->gl_object; 422 struct gfs2_inode *ip = gl->gl_object;
421 int error = 0; 423 int error = 0;
422 424
@@ -477,7 +479,7 @@ static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
477static void freeze_go_sync(struct gfs2_glock *gl) 479static void freeze_go_sync(struct gfs2_glock *gl)
478{ 480{
479 int error = 0; 481 int error = 0;
480 struct gfs2_sbd *sdp = gl->gl_sbd; 482 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
481 483
482 if (gl->gl_state == LM_ST_SHARED && 484 if (gl->gl_state == LM_ST_SHARED &&
483 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 485 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
@@ -500,7 +502,7 @@ static void freeze_go_sync(struct gfs2_glock *gl)
500 502
501static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) 503static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
502{ 504{
503 struct gfs2_sbd *sdp = gl->gl_sbd; 505 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
504 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 506 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
505 struct gfs2_glock *j_gl = ip->i_gl; 507 struct gfs2_glock *j_gl = ip->i_gl;
506 struct gfs2_log_header_host head; 508 struct gfs2_log_header_host head;
@@ -545,7 +547,7 @@ static int freeze_go_demote_ok(const struct gfs2_glock *gl)
545static void iopen_go_callback(struct gfs2_glock *gl, bool remote) 547static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
546{ 548{
547 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; 549 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
548 struct gfs2_sbd *sdp = gl->gl_sbd; 550 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
549 551
550 if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY)) 552 if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
551 return; 553 return;
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index a1ec7c20e498..121ed08d9d9f 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -22,6 +22,7 @@
22#include <linux/ktime.h> 22#include <linux/ktime.h>
23#include <linux/percpu.h> 23#include <linux/percpu.h>
24#include <linux/lockref.h> 24#include <linux/lockref.h>
25#include <linux/rhashtable.h>
25 26
26#define DIO_WAIT 0x00000010 27#define DIO_WAIT 0x00000010
27#define DIO_METADATA 0x00000020 28#define DIO_METADATA 0x00000020
@@ -203,13 +204,15 @@ enum {
203}; 204};
204 205
205struct lm_lockname { 206struct lm_lockname {
207 struct gfs2_sbd *ln_sbd;
206 u64 ln_number; 208 u64 ln_number;
207 unsigned int ln_type; 209 unsigned int ln_type;
208}; 210};
209 211
210#define lm_name_equal(name1, name2) \ 212#define lm_name_equal(name1, name2) \
211 (((name1)->ln_number == (name2)->ln_number) && \ 213 (((name1)->ln_number == (name2)->ln_number) && \
212 ((name1)->ln_type == (name2)->ln_type)) 214 ((name1)->ln_type == (name2)->ln_type) && \
215 ((name1)->ln_sbd == (name2)->ln_sbd))
213 216
214 217
215struct gfs2_glock_operations { 218struct gfs2_glock_operations {
@@ -241,7 +244,7 @@ enum {
241}; 244};
242 245
243struct gfs2_lkstats { 246struct gfs2_lkstats {
244 s64 stats[GFS2_NR_LKSTATS]; 247 u64 stats[GFS2_NR_LKSTATS];
245}; 248};
246 249
247enum { 250enum {
@@ -327,7 +330,6 @@ enum {
327 330
328struct gfs2_glock { 331struct gfs2_glock {
329 struct hlist_bl_node gl_list; 332 struct hlist_bl_node gl_list;
330 struct gfs2_sbd *gl_sbd;
331 unsigned long gl_flags; /* GLF_... */ 333 unsigned long gl_flags; /* GLF_... */
332 struct lm_lockname gl_name; 334 struct lm_lockname gl_name;
333 335
@@ -341,7 +343,6 @@ struct gfs2_glock {
341 gl_req:2, /* State in last dlm request */ 343 gl_req:2, /* State in last dlm request */
342 gl_reply:8; /* Last reply from the dlm */ 344 gl_reply:8; /* Last reply from the dlm */
343 345
344 unsigned int gl_hash;
345 unsigned long gl_demote_time; /* time of first demote request */ 346 unsigned long gl_demote_time; /* time of first demote request */
346 long gl_hold_time; 347 long gl_hold_time;
347 struct list_head gl_holders; 348 struct list_head gl_holders;
@@ -367,7 +368,7 @@ struct gfs2_glock {
367 loff_t end; 368 loff_t end;
368 } gl_vm; 369 } gl_vm;
369 }; 370 };
370 struct rcu_head gl_rcu; 371 struct rhash_head gl_node;
371}; 372};
372 373
373#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */ 374#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
@@ -835,7 +836,7 @@ static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which)
835 836
836static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which) 837static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
837{ 838{
838 const struct gfs2_sbd *sdp = gl->gl_sbd; 839 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
839 preempt_disable(); 840 preempt_disable();
840 this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++; 841 this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++;
841 preempt_enable(); 842 preempt_enable();
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 641383a9c1bb..284c1542783e 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -31,7 +31,7 @@ extern struct workqueue_struct *gfs2_control_wq;
31 * 31 *
32 * @delta is the difference between the current rtt sample and the 32 * @delta is the difference between the current rtt sample and the
33 * running average srtt. We add 1/8 of that to the srtt in order to 33 * running average srtt. We add 1/8 of that to the srtt in order to
34 * update the current srtt estimate. The varience estimate is a bit 34 * update the current srtt estimate. The variance estimate is a bit
35 * more complicated. We subtract the abs value of the @delta from 35 * more complicated. We subtract the abs value of the @delta from
36 * the current variance estimate and add 1/4 of that to the running 36 * the current variance estimate and add 1/4 of that to the running
37 * total. 37 * total.
@@ -80,7 +80,7 @@ static inline void gfs2_update_reply_times(struct gfs2_glock *gl)
80 80
81 preempt_disable(); 81 preempt_disable();
82 rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp)); 82 rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp));
83 lks = this_cpu_ptr(gl->gl_sbd->sd_lkstats); 83 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
84 gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */ 84 gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */
85 gfs2_update_stats(&lks->lkstats[gltype], index, rtt); /* Global */ 85 gfs2_update_stats(&lks->lkstats[gltype], index, rtt); /* Global */
86 preempt_enable(); 86 preempt_enable();
@@ -108,7 +108,7 @@ static inline void gfs2_update_request_times(struct gfs2_glock *gl)
108 dstamp = gl->gl_dstamp; 108 dstamp = gl->gl_dstamp;
109 gl->gl_dstamp = ktime_get_real(); 109 gl->gl_dstamp = ktime_get_real();
110 irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp)); 110 irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp));
111 lks = this_cpu_ptr(gl->gl_sbd->sd_lkstats); 111 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
112 gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt); /* Local */ 112 gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt); /* Local */
113 gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt); /* Global */ 113 gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt); /* Global */
114 preempt_enable(); 114 preempt_enable();
@@ -253,7 +253,7 @@ static void gfs2_reverse_hex(char *c, u64 value)
253static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, 253static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
254 unsigned int flags) 254 unsigned int flags)
255{ 255{
256 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; 256 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
257 int req; 257 int req;
258 u32 lkf; 258 u32 lkf;
259 char strname[GDLM_STRNAME_BYTES] = ""; 259 char strname[GDLM_STRNAME_BYTES] = "";
@@ -281,7 +281,7 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
281 281
282static void gdlm_put_lock(struct gfs2_glock *gl) 282static void gdlm_put_lock(struct gfs2_glock *gl)
283{ 283{
284 struct gfs2_sbd *sdp = gl->gl_sbd; 284 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
285 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 285 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
286 int lvb_needs_unlock = 0; 286 int lvb_needs_unlock = 0;
287 int error; 287 int error;
@@ -319,7 +319,7 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
319 319
320static void gdlm_cancel(struct gfs2_glock *gl) 320static void gdlm_cancel(struct gfs2_glock *gl)
321{ 321{
322 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; 322 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
323 dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl); 323 dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
324} 324}
325 325
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 92324ac58290..d5369a109781 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -70,7 +70,7 @@ static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
70static void maybe_release_space(struct gfs2_bufdata *bd) 70static void maybe_release_space(struct gfs2_bufdata *bd)
71{ 71{
72 struct gfs2_glock *gl = bd->bd_gl; 72 struct gfs2_glock *gl = bd->bd_gl;
73 struct gfs2_sbd *sdp = gl->gl_sbd; 73 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
74 struct gfs2_rgrpd *rgd = gl->gl_object; 74 struct gfs2_rgrpd *rgd = gl->gl_object;
75 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; 75 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
76 struct gfs2_bitmap *bi = rgd->rd_bits + index; 76 struct gfs2_bitmap *bi = rgd->rd_bits + index;
@@ -578,7 +578,7 @@ static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
578static void gfs2_meta_sync(struct gfs2_glock *gl) 578static void gfs2_meta_sync(struct gfs2_glock *gl)
579{ 579{
580 struct address_space *mapping = gfs2_glock2aspace(gl); 580 struct address_space *mapping = gfs2_glock2aspace(gl);
581 struct gfs2_sbd *sdp = gl->gl_sbd; 581 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
582 int error; 582 int error;
583 583
584 if (mapping == NULL) 584 if (mapping == NULL)
@@ -588,7 +588,7 @@ static void gfs2_meta_sync(struct gfs2_glock *gl)
588 error = filemap_fdatawait(mapping); 588 error = filemap_fdatawait(mapping);
589 589
590 if (error) 590 if (error)
591 gfs2_io_error(gl->gl_sbd); 591 gfs2_io_error(gl->gl_name.ln_sbd);
592} 592}
593 593
594static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) 594static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index b984a6e190bc..0e1d4be5865a 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -114,7 +114,7 @@ const struct address_space_operations gfs2_rgrp_aops = {
114struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) 114struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
115{ 115{
116 struct address_space *mapping = gfs2_glock2aspace(gl); 116 struct address_space *mapping = gfs2_glock2aspace(gl);
117 struct gfs2_sbd *sdp = gl->gl_sbd; 117 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
118 struct page *page; 118 struct page *page;
119 struct buffer_head *bh; 119 struct buffer_head *bh;
120 unsigned int shift; 120 unsigned int shift;
@@ -200,7 +200,7 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
200int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, 200int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
201 struct buffer_head **bhp) 201 struct buffer_head **bhp)
202{ 202{
203 struct gfs2_sbd *sdp = gl->gl_sbd; 203 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
204 struct buffer_head *bh; 204 struct buffer_head *bh;
205 205
206 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { 206 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
@@ -362,7 +362,7 @@ int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
362 362
363struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) 363struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
364{ 364{
365 struct gfs2_sbd *sdp = gl->gl_sbd; 365 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
366 struct buffer_head *first_bh, *bh; 366 struct buffer_head *first_bh, *bh;
367 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >> 367 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
368 sdp->sd_sb.sb_bsize_shift; 368 sdp->sd_sb.sb_bsize_shift;
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index ac5d8027d335..8ca161567a93 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -44,7 +44,7 @@ static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
44{ 44{
45 struct inode *inode = mapping->host; 45 struct inode *inode = mapping->host;
46 if (mapping->a_ops == &gfs2_meta_aops) 46 if (mapping->a_ops == &gfs2_meta_aops)
47 return (((struct gfs2_glock *)mapping) - 1)->gl_sbd; 47 return (((struct gfs2_glock *)mapping) - 1)->gl_name.ln_sbd;
48 else if (mapping->a_ops == &gfs2_rgrp_aops) 48 else if (mapping->a_ops == &gfs2_rgrp_aops)
49 return container_of(mapping, struct gfs2_sbd, sd_aspace); 49 return container_of(mapping, struct gfs2_sbd, sd_aspace);
50 else 50 else
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 9b61f92fcfdf..3a31226531ea 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -119,7 +119,7 @@ static void gfs2_qd_dispose(struct list_head *list)
119 119
120 while (!list_empty(list)) { 120 while (!list_empty(list)) {
121 qd = list_entry(list->next, struct gfs2_quota_data, qd_lru); 121 qd = list_entry(list->next, struct gfs2_quota_data, qd_lru);
122 sdp = qd->qd_gl->gl_sbd; 122 sdp = qd->qd_gl->gl_name.ln_sbd;
123 123
124 list_del(&qd->qd_lru); 124 list_del(&qd->qd_lru);
125 125
@@ -302,7 +302,7 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
302 302
303static void qd_hold(struct gfs2_quota_data *qd) 303static void qd_hold(struct gfs2_quota_data *qd)
304{ 304{
305 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 305 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
306 gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref)); 306 gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
307 lockref_get(&qd->qd_lockref); 307 lockref_get(&qd->qd_lockref);
308} 308}
@@ -367,7 +367,7 @@ static void slot_put(struct gfs2_quota_data *qd)
367 367
368static int bh_get(struct gfs2_quota_data *qd) 368static int bh_get(struct gfs2_quota_data *qd)
369{ 369{
370 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 370 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
371 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 371 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
372 unsigned int block, offset; 372 unsigned int block, offset;
373 struct buffer_head *bh; 373 struct buffer_head *bh;
@@ -414,7 +414,7 @@ fail:
414 414
415static void bh_put(struct gfs2_quota_data *qd) 415static void bh_put(struct gfs2_quota_data *qd)
416{ 416{
417 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 417 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
418 418
419 mutex_lock(&sdp->sd_quota_mutex); 419 mutex_lock(&sdp->sd_quota_mutex);
420 gfs2_assert(sdp, qd->qd_bh_count); 420 gfs2_assert(sdp, qd->qd_bh_count);
@@ -486,7 +486,7 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
486 486
487static void qd_unlock(struct gfs2_quota_data *qd) 487static void qd_unlock(struct gfs2_quota_data *qd)
488{ 488{
489 gfs2_assert_warn(qd->qd_gl->gl_sbd, 489 gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd,
490 test_bit(QDF_LOCKED, &qd->qd_flags)); 490 test_bit(QDF_LOCKED, &qd->qd_flags));
491 clear_bit(QDF_LOCKED, &qd->qd_flags); 491 clear_bit(QDF_LOCKED, &qd->qd_flags);
492 bh_put(qd); 492 bh_put(qd);
@@ -614,7 +614,7 @@ static int sort_qd(const void *a, const void *b)
614 614
615static void do_qc(struct gfs2_quota_data *qd, s64 change) 615static void do_qc(struct gfs2_quota_data *qd, s64 change)
616{ 616{
617 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 617 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
618 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 618 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
619 struct gfs2_quota_change *qc = qd->qd_bh_qc; 619 struct gfs2_quota_change *qc = qd->qd_bh_qc;
620 s64 x; 620 s64 x;
@@ -831,7 +831,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
831 831
832static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) 832static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
833{ 833{
834 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd; 834 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd;
835 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 835 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
836 struct gfs2_alloc_parms ap = { .aflags = 0, }; 836 struct gfs2_alloc_parms ap = { .aflags = 0, };
837 unsigned int data_blocks, ind_blocks; 837 unsigned int data_blocks, ind_blocks;
@@ -922,7 +922,7 @@ out:
922 gfs2_glock_dq_uninit(&ghs[qx]); 922 gfs2_glock_dq_uninit(&ghs[qx]);
923 mutex_unlock(&ip->i_inode.i_mutex); 923 mutex_unlock(&ip->i_inode.i_mutex);
924 kfree(ghs); 924 kfree(ghs);
925 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl, NORMAL_FLUSH); 925 gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, NORMAL_FLUSH);
926 return error; 926 return error;
927} 927}
928 928
@@ -954,7 +954,7 @@ static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
954static int do_glock(struct gfs2_quota_data *qd, int force_refresh, 954static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
955 struct gfs2_holder *q_gh) 955 struct gfs2_holder *q_gh)
956{ 956{
957 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 957 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
958 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 958 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
959 struct gfs2_holder i_gh; 959 struct gfs2_holder i_gh;
960 int error; 960 int error;
@@ -1037,7 +1037,7 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
1037 1037
1038static int need_sync(struct gfs2_quota_data *qd) 1038static int need_sync(struct gfs2_quota_data *qd)
1039{ 1039{
1040 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 1040 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
1041 struct gfs2_tune *gt = &sdp->sd_tune; 1041 struct gfs2_tune *gt = &sdp->sd_tune;
1042 s64 value; 1042 s64 value;
1043 unsigned int num, den; 1043 unsigned int num, den;
@@ -1125,7 +1125,7 @@ out:
1125 1125
1126static int print_message(struct gfs2_quota_data *qd, char *type) 1126static int print_message(struct gfs2_quota_data *qd, char *type)
1127{ 1127{
1128 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 1128 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
1129 1129
1130 fs_info(sdp, "quota %s for %s %u\n", 1130 fs_info(sdp, "quota %s for %s %u\n",
1131 type, 1131 type,
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index c6c62321dfd6..475985d14758 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1860,13 +1860,13 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
1860static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops) 1860static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
1861{ 1861{
1862 const struct gfs2_glock *gl = rgd->rd_gl; 1862 const struct gfs2_glock *gl = rgd->rd_gl;
1863 const struct gfs2_sbd *sdp = gl->gl_sbd; 1863 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1864 struct gfs2_lkstats *st; 1864 struct gfs2_lkstats *st;
1865 s64 r_dcount, l_dcount; 1865 u64 r_dcount, l_dcount;
1866 s64 l_srttb, a_srttb = 0; 1866 u64 l_srttb, a_srttb = 0;
1867 s64 srttb_diff; 1867 s64 srttb_diff;
1868 s64 sqr_diff; 1868 u64 sqr_diff;
1869 s64 var; 1869 u64 var;
1870 int cpu, nonzero = 0; 1870 int cpu, nonzero = 0;
1871 1871
1872 preempt_disable(); 1872 preempt_disable();
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index 20c007d747ab..49ac55da4e33 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -104,7 +104,7 @@ TRACE_EVENT(gfs2_glock_state_change,
104 ), 104 ),
105 105
106 TP_fast_assign( 106 TP_fast_assign(
107 __entry->dev = gl->gl_sbd->sd_vfs->s_dev; 107 __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
108 __entry->glnum = gl->gl_name.ln_number; 108 __entry->glnum = gl->gl_name.ln_number;
109 __entry->gltype = gl->gl_name.ln_type; 109 __entry->gltype = gl->gl_name.ln_type;
110 __entry->cur_state = glock_trace_state(gl->gl_state); 110 __entry->cur_state = glock_trace_state(gl->gl_state);
@@ -140,7 +140,7 @@ TRACE_EVENT(gfs2_glock_put,
140 ), 140 ),
141 141
142 TP_fast_assign( 142 TP_fast_assign(
143 __entry->dev = gl->gl_sbd->sd_vfs->s_dev; 143 __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
144 __entry->gltype = gl->gl_name.ln_type; 144 __entry->gltype = gl->gl_name.ln_type;
145 __entry->glnum = gl->gl_name.ln_number; 145 __entry->glnum = gl->gl_name.ln_number;
146 __entry->cur_state = glock_trace_state(gl->gl_state); 146 __entry->cur_state = glock_trace_state(gl->gl_state);
@@ -174,7 +174,7 @@ TRACE_EVENT(gfs2_demote_rq,
174 ), 174 ),
175 175
176 TP_fast_assign( 176 TP_fast_assign(
177 __entry->dev = gl->gl_sbd->sd_vfs->s_dev; 177 __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
178 __entry->gltype = gl->gl_name.ln_type; 178 __entry->gltype = gl->gl_name.ln_type;
179 __entry->glnum = gl->gl_name.ln_number; 179 __entry->glnum = gl->gl_name.ln_number;
180 __entry->cur_state = glock_trace_state(gl->gl_state); 180 __entry->cur_state = glock_trace_state(gl->gl_state);
@@ -209,7 +209,7 @@ TRACE_EVENT(gfs2_promote,
209 ), 209 ),
210 210
211 TP_fast_assign( 211 TP_fast_assign(
212 __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev; 212 __entry->dev = gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev;
213 __entry->glnum = gh->gh_gl->gl_name.ln_number; 213 __entry->glnum = gh->gh_gl->gl_name.ln_number;
214 __entry->gltype = gh->gh_gl->gl_name.ln_type; 214 __entry->gltype = gh->gh_gl->gl_name.ln_type;
215 __entry->first = first; 215 __entry->first = first;
@@ -239,7 +239,7 @@ TRACE_EVENT(gfs2_glock_queue,
239 ), 239 ),
240 240
241 TP_fast_assign( 241 TP_fast_assign(
242 __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev; 242 __entry->dev = gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev;
243 __entry->glnum = gh->gh_gl->gl_name.ln_number; 243 __entry->glnum = gh->gh_gl->gl_name.ln_number;
244 __entry->gltype = gh->gh_gl->gl_name.ln_type; 244 __entry->gltype = gh->gh_gl->gl_name.ln_type;
245 __entry->queue = queue; 245 __entry->queue = queue;
@@ -267,18 +267,18 @@ TRACE_EVENT(gfs2_glock_lock_time,
267 __field( int, status ) 267 __field( int, status )
268 __field( char, flags ) 268 __field( char, flags )
269 __field( s64, tdiff ) 269 __field( s64, tdiff )
270 __field( s64, srtt ) 270 __field( u64, srtt )
271 __field( s64, srttvar ) 271 __field( u64, srttvar )
272 __field( s64, srttb ) 272 __field( u64, srttb )
273 __field( s64, srttvarb ) 273 __field( u64, srttvarb )
274 __field( s64, sirt ) 274 __field( u64, sirt )
275 __field( s64, sirtvar ) 275 __field( u64, sirtvar )
276 __field( s64, dcount ) 276 __field( u64, dcount )
277 __field( s64, qcount ) 277 __field( u64, qcount )
278 ), 278 ),
279 279
280 TP_fast_assign( 280 TP_fast_assign(
281 __entry->dev = gl->gl_sbd->sd_vfs->s_dev; 281 __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
282 __entry->glnum = gl->gl_name.ln_number; 282 __entry->glnum = gl->gl_name.ln_number;
283 __entry->gltype = gl->gl_name.ln_type; 283 __entry->gltype = gl->gl_name.ln_type;
284 __entry->status = gl->gl_lksb.sb_status; 284 __entry->status = gl->gl_lksb.sb_status;
@@ -333,7 +333,7 @@ TRACE_EVENT(gfs2_pin,
333 ), 333 ),
334 334
335 TP_fast_assign( 335 TP_fast_assign(
336 __entry->dev = bd->bd_gl->gl_sbd->sd_vfs->s_dev; 336 __entry->dev = bd->bd_gl->gl_name.ln_sbd->sd_vfs->s_dev;
337 __entry->pin = pin; 337 __entry->pin = pin;
338 __entry->len = bd->bd_bh->b_size; 338 __entry->len = bd->bd_bh->b_size;
339 __entry->block = bd->bd_bh->b_blocknr; 339 __entry->block = bd->bd_bh->b_blocknr;
@@ -449,7 +449,7 @@ TRACE_EVENT(gfs2_bmap,
449 ), 449 ),
450 450
451 TP_fast_assign( 451 TP_fast_assign(
452 __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev; 452 __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
453 __entry->lblock = lblock; 453 __entry->lblock = lblock;
454 __entry->pblock = buffer_mapped(bh) ? bh->b_blocknr : 0; 454 __entry->pblock = buffer_mapped(bh) ? bh->b_blocknr : 0;
455 __entry->inum = ip->i_no_addr; 455 __entry->inum = ip->i_no_addr;
@@ -489,7 +489,7 @@ TRACE_EVENT(gfs2_block_alloc,
489 ), 489 ),
490 490
491 TP_fast_assign( 491 TP_fast_assign(
492 __entry->dev = rgd->rd_gl->gl_sbd->sd_vfs->s_dev; 492 __entry->dev = rgd->rd_gl->gl_name.ln_sbd->sd_vfs->s_dev;
493 __entry->start = block; 493 __entry->start = block;
494 __entry->inum = ip->i_no_addr; 494 __entry->inum = ip->i_no_addr;
495 __entry->len = len; 495 __entry->len = len;
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 88bff2430669..b95d0d625f32 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -158,7 +158,7 @@ static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl,
158void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh) 158void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh)
159{ 159{
160 struct gfs2_trans *tr = current->journal_info; 160 struct gfs2_trans *tr = current->journal_info;
161 struct gfs2_sbd *sdp = gl->gl_sbd; 161 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
162 struct address_space *mapping = bh->b_page->mapping; 162 struct address_space *mapping = bh->b_page->mapping;
163 struct gfs2_inode *ip = GFS2_I(mapping->host); 163 struct gfs2_inode *ip = GFS2_I(mapping->host);
164 struct gfs2_bufdata *bd; 164 struct gfs2_bufdata *bd;
@@ -224,7 +224,7 @@ static void meta_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
224void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) 224void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
225{ 225{
226 226
227 struct gfs2_sbd *sdp = gl->gl_sbd; 227 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
228 struct gfs2_bufdata *bd; 228 struct gfs2_bufdata *bd;
229 229
230 lock_buffer(bh); 230 lock_buffer(bh);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 2714ef835bdd..be806ead7f4d 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -113,7 +113,8 @@ out:
113 return status; 113 return status;
114} 114}
115 115
116static int nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid) 116static int nfs_delegation_claim_opens(struct inode *inode,
117 const nfs4_stateid *stateid, fmode_t type)
117{ 118{
118 struct nfs_inode *nfsi = NFS_I(inode); 119 struct nfs_inode *nfsi = NFS_I(inode);
119 struct nfs_open_context *ctx; 120 struct nfs_open_context *ctx;
@@ -140,7 +141,7 @@ again:
140 /* Block nfs4_proc_unlck */ 141 /* Block nfs4_proc_unlck */
141 mutex_lock(&sp->so_delegreturn_mutex); 142 mutex_lock(&sp->so_delegreturn_mutex);
142 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); 143 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
143 err = nfs4_open_delegation_recall(ctx, state, stateid); 144 err = nfs4_open_delegation_recall(ctx, state, stateid, type);
144 if (!err) 145 if (!err)
145 err = nfs_delegation_claim_locks(ctx, state, stateid); 146 err = nfs_delegation_claim_locks(ctx, state, stateid);
146 if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) 147 if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
@@ -411,7 +412,8 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
411 do { 412 do {
412 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) 413 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
413 break; 414 break;
414 err = nfs_delegation_claim_opens(inode, &delegation->stateid); 415 err = nfs_delegation_claim_opens(inode, &delegation->stateid,
416 delegation->type);
415 if (!issync || err != -EAGAIN) 417 if (!issync || err != -EAGAIN)
416 break; 418 break;
417 /* 419 /*
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index a44829173e57..333063e032f0 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -54,7 +54,7 @@ void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
54 54
55/* NFSv4 delegation-related procedures */ 55/* NFSv4 delegation-related procedures */
56int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync); 56int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync);
57int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid); 57int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type);
58int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid); 58int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
59bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, fmode_t flags); 59bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, fmode_t flags);
60 60
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 38678d9a5cc4..4b1d08f56aba 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -166,8 +166,11 @@ nfs_direct_select_verf(struct nfs_direct_req *dreq,
166 struct nfs_writeverf *verfp = &dreq->verf; 166 struct nfs_writeverf *verfp = &dreq->verf;
167 167
168#ifdef CONFIG_NFS_V4_1 168#ifdef CONFIG_NFS_V4_1
169 if (ds_clp) { 169 /*
170 /* pNFS is in use, use the DS verf */ 170 * pNFS is in use, use the DS verf except commit_through_mds is set
171 * for layout segment where nbuckets is zero.
172 */
173 if (ds_clp && dreq->ds_cinfo.nbuckets > 0) {
171 if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets) 174 if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets)
172 verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf; 175 verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf;
173 else 176 else
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index b34f2e228601..02ec07973bc4 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -629,23 +629,18 @@ out_put:
629 goto out; 629 goto out;
630} 630}
631 631
632static void filelayout_free_fh_array(struct nfs4_filelayout_segment *fl) 632static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
633{ 633{
634 int i; 634 int i;
635 635
636 for (i = 0; i < fl->num_fh; i++) { 636 if (fl->fh_array) {
637 if (!fl->fh_array[i]) 637 for (i = 0; i < fl->num_fh; i++) {
638 break; 638 if (!fl->fh_array[i])
639 kfree(fl->fh_array[i]); 639 break;
640 kfree(fl->fh_array[i]);
641 }
642 kfree(fl->fh_array);
640 } 643 }
641 kfree(fl->fh_array);
642 fl->fh_array = NULL;
643}
644
645static void
646_filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
647{
648 filelayout_free_fh_array(fl);
649 kfree(fl); 644 kfree(fl);
650} 645}
651 646
@@ -716,21 +711,21 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
716 /* Do we want to use a mempool here? */ 711 /* Do we want to use a mempool here? */
717 fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags); 712 fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags);
718 if (!fl->fh_array[i]) 713 if (!fl->fh_array[i])
719 goto out_err_free; 714 goto out_err;
720 715
721 p = xdr_inline_decode(&stream, 4); 716 p = xdr_inline_decode(&stream, 4);
722 if (unlikely(!p)) 717 if (unlikely(!p))
723 goto out_err_free; 718 goto out_err;
724 fl->fh_array[i]->size = be32_to_cpup(p++); 719 fl->fh_array[i]->size = be32_to_cpup(p++);
725 if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) { 720 if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
726 printk(KERN_ERR "NFS: Too big fh %d received %d\n", 721 printk(KERN_ERR "NFS: Too big fh %d received %d\n",
727 i, fl->fh_array[i]->size); 722 i, fl->fh_array[i]->size);
728 goto out_err_free; 723 goto out_err;
729 } 724 }
730 725
731 p = xdr_inline_decode(&stream, fl->fh_array[i]->size); 726 p = xdr_inline_decode(&stream, fl->fh_array[i]->size);
732 if (unlikely(!p)) 727 if (unlikely(!p))
733 goto out_err_free; 728 goto out_err;
734 memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size); 729 memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size);
735 dprintk("DEBUG: %s: fh len %d\n", __func__, 730 dprintk("DEBUG: %s: fh len %d\n", __func__,
736 fl->fh_array[i]->size); 731 fl->fh_array[i]->size);
@@ -739,8 +734,6 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
739 __free_page(scratch); 734 __free_page(scratch);
740 return 0; 735 return 0;
741 736
742out_err_free:
743 filelayout_free_fh_array(fl);
744out_err: 737out_err:
745 __free_page(scratch); 738 __free_page(scratch);
746 return -EIO; 739 return -EIO;
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index d731bbf974aa..0f020e4d8421 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -175,10 +175,12 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
175{ 175{
176 struct nfs_server *server = NFS_SERVER(file_inode(filep)); 176 struct nfs_server *server = NFS_SERVER(file_inode(filep));
177 struct nfs4_exception exception = { }; 177 struct nfs4_exception exception = { };
178 int err; 178 loff_t err;
179 179
180 do { 180 do {
181 err = _nfs42_proc_llseek(filep, offset, whence); 181 err = _nfs42_proc_llseek(filep, offset, whence);
182 if (err >= 0)
183 break;
182 if (err == -ENOTSUPP) 184 if (err == -ENOTSUPP)
183 return -EOPNOTSUPP; 185 return -EOPNOTSUPP;
184 err = nfs4_handle_exception(server, err, &exception); 186 err = nfs4_handle_exception(server, err, &exception);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 693b903b48bd..f93b9cdb4934 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1127,6 +1127,21 @@ static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
1127 return ret; 1127 return ret;
1128} 1128}
1129 1129
1130static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1131 fmode_t fmode)
1132{
1133 switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1134 case FMODE_READ|FMODE_WRITE:
1135 return state->n_rdwr != 0;
1136 case FMODE_WRITE:
1137 return state->n_wronly != 0;
1138 case FMODE_READ:
1139 return state->n_rdonly != 0;
1140 }
1141 WARN_ON_ONCE(1);
1142 return false;
1143}
1144
1130static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) 1145static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
1131{ 1146{
1132 int ret = 0; 1147 int ret = 0;
@@ -1571,17 +1586,13 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context
1571 return opendata; 1586 return opendata;
1572} 1587}
1573 1588
1574static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res) 1589static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
1590 fmode_t fmode)
1575{ 1591{
1576 struct nfs4_state *newstate; 1592 struct nfs4_state *newstate;
1577 int ret; 1593 int ret;
1578 1594
1579 if ((opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR || 1595 if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
1580 opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEG_CUR_FH) &&
1581 (opendata->o_arg.u.delegation_type & fmode) != fmode)
1582 /* This mode can't have been delegated, so we must have
1583 * a valid open_stateid to cover it - not need to reclaim.
1584 */
1585 return 0; 1596 return 0;
1586 opendata->o_arg.open_flags = 0; 1597 opendata->o_arg.open_flags = 0;
1587 opendata->o_arg.fmode = fmode; 1598 opendata->o_arg.fmode = fmode;
@@ -1597,14 +1608,14 @@ static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmod
1597 newstate = nfs4_opendata_to_nfs4_state(opendata); 1608 newstate = nfs4_opendata_to_nfs4_state(opendata);
1598 if (IS_ERR(newstate)) 1609 if (IS_ERR(newstate))
1599 return PTR_ERR(newstate); 1610 return PTR_ERR(newstate);
1611 if (newstate != opendata->state)
1612 ret = -ESTALE;
1600 nfs4_close_state(newstate, fmode); 1613 nfs4_close_state(newstate, fmode);
1601 *res = newstate; 1614 return ret;
1602 return 0;
1603} 1615}
1604 1616
1605static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 1617static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1606{ 1618{
1607 struct nfs4_state *newstate;
1608 int ret; 1619 int ret;
1609 1620
1610 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */ 1621 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
@@ -1615,27 +1626,15 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *
1615 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1626 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1616 clear_bit(NFS_OPEN_STATE, &state->flags); 1627 clear_bit(NFS_OPEN_STATE, &state->flags);
1617 smp_rmb(); 1628 smp_rmb();
1618 if (state->n_rdwr != 0) { 1629 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1619 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); 1630 if (ret != 0)
1620 if (ret != 0) 1631 return ret;
1621 return ret; 1632 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1622 if (newstate != state) 1633 if (ret != 0)
1623 return -ESTALE; 1634 return ret;
1624 } 1635 ret = nfs4_open_recover_helper(opendata, FMODE_READ);
1625 if (state->n_wronly != 0) { 1636 if (ret != 0)
1626 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); 1637 return ret;
1627 if (ret != 0)
1628 return ret;
1629 if (newstate != state)
1630 return -ESTALE;
1631 }
1632 if (state->n_rdonly != 0) {
1633 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1634 if (ret != 0)
1635 return ret;
1636 if (newstate != state)
1637 return -ESTALE;
1638 }
1639 /* 1638 /*
1640 * We may have performed cached opens for all three recoveries. 1639 * We may have performed cached opens for all three recoveries.
1641 * Check if we need to update the current stateid. 1640 * Check if we need to update the current stateid.
@@ -1759,18 +1758,32 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
1759 return err; 1758 return err;
1760} 1759}
1761 1760
1762int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) 1761int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
1762 struct nfs4_state *state, const nfs4_stateid *stateid,
1763 fmode_t type)
1763{ 1764{
1764 struct nfs_server *server = NFS_SERVER(state->inode); 1765 struct nfs_server *server = NFS_SERVER(state->inode);
1765 struct nfs4_opendata *opendata; 1766 struct nfs4_opendata *opendata;
1766 int err; 1767 int err = 0;
1767 1768
1768 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1769 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1769 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 1770 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
1770 if (IS_ERR(opendata)) 1771 if (IS_ERR(opendata))
1771 return PTR_ERR(opendata); 1772 return PTR_ERR(opendata);
1772 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 1773 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1773 err = nfs4_open_recover(opendata, state); 1774 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1775 switch (type & (FMODE_READ|FMODE_WRITE)) {
1776 case FMODE_READ|FMODE_WRITE:
1777 case FMODE_WRITE:
1778 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1779 if (err)
1780 break;
1781 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1782 if (err)
1783 break;
1784 case FMODE_READ:
1785 err = nfs4_open_recover_helper(opendata, FMODE_READ);
1786 }
1774 nfs4_opendata_put(opendata); 1787 nfs4_opendata_put(opendata);
1775 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 1788 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
1776} 1789}
@@ -2645,6 +2658,15 @@ out:
2645 return err; 2658 return err;
2646} 2659}
2647 2660
2661static bool
2662nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
2663{
2664 if (inode == NULL || !nfs_have_layout(inode))
2665 return false;
2666
2667 return pnfs_wait_on_layoutreturn(inode, task);
2668}
2669
2648struct nfs4_closedata { 2670struct nfs4_closedata {
2649 struct inode *inode; 2671 struct inode *inode;
2650 struct nfs4_state *state; 2672 struct nfs4_state *state;
@@ -2763,6 +2785,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
2763 goto out_no_action; 2785 goto out_no_action;
2764 } 2786 }
2765 2787
2788 if (nfs4_wait_on_layoutreturn(inode, task)) {
2789 nfs_release_seqid(calldata->arg.seqid);
2790 goto out_wait;
2791 }
2792
2766 if (calldata->arg.fmode == 0) 2793 if (calldata->arg.fmode == 0)
2767 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 2794 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2768 if (calldata->roc) 2795 if (calldata->roc)
@@ -5308,6 +5335,9 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
5308 5335
5309 d_data = (struct nfs4_delegreturndata *)data; 5336 d_data = (struct nfs4_delegreturndata *)data;
5310 5337
5338 if (nfs4_wait_on_layoutreturn(d_data->inode, task))
5339 return;
5340
5311 if (d_data->roc) 5341 if (d_data->roc)
5312 pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier); 5342 pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier);
5313 5343
@@ -7800,39 +7830,46 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
7800 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n", 7830 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
7801 __func__, delay); 7831 __func__, delay);
7802 rpc_delay(task, delay); 7832 rpc_delay(task, delay);
7803 task->tk_status = 0; 7833 /* Do not call nfs4_async_handle_error() */
7804 rpc_restart_call_prepare(task); 7834 goto out_restart;
7805 goto out; /* Do not call nfs4_async_handle_error() */
7806 } 7835 }
7807 break; 7836 break;
7808 case -NFS4ERR_EXPIRED: 7837 case -NFS4ERR_EXPIRED:
7809 case -NFS4ERR_BAD_STATEID: 7838 case -NFS4ERR_BAD_STATEID:
7810 spin_lock(&inode->i_lock); 7839 spin_lock(&inode->i_lock);
7811 lo = NFS_I(inode)->layout; 7840 if (nfs4_stateid_match(&lgp->args.stateid,
7812 if (!lo || list_empty(&lo->plh_segs)) { 7841 &lgp->args.ctx->state->stateid)) {
7813 spin_unlock(&inode->i_lock); 7842 spin_unlock(&inode->i_lock);
7814 /* If the open stateid was bad, then recover it. */ 7843 /* If the open stateid was bad, then recover it. */
7815 state = lgp->args.ctx->state; 7844 state = lgp->args.ctx->state;
7816 } else { 7845 break;
7846 }
7847 lo = NFS_I(inode)->layout;
7848 if (lo && nfs4_stateid_match(&lgp->args.stateid,
7849 &lo->plh_stateid)) {
7817 LIST_HEAD(head); 7850 LIST_HEAD(head);
7818 7851
7819 /* 7852 /*
7820 * Mark the bad layout state as invalid, then retry 7853 * Mark the bad layout state as invalid, then retry
7821 * with the current stateid. 7854 * with the current stateid.
7822 */ 7855 */
7856 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
7823 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL); 7857 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
7824 spin_unlock(&inode->i_lock); 7858 spin_unlock(&inode->i_lock);
7825 pnfs_free_lseg_list(&head); 7859 pnfs_free_lseg_list(&head);
7826 7860 } else
7827 task->tk_status = 0; 7861 spin_unlock(&inode->i_lock);
7828 rpc_restart_call_prepare(task); 7862 goto out_restart;
7829 }
7830 } 7863 }
7831 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) 7864 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN)
7832 rpc_restart_call_prepare(task); 7865 goto out_restart;
7833out: 7866out:
7834 dprintk("<-- %s\n", __func__); 7867 dprintk("<-- %s\n", __func__);
7835 return; 7868 return;
7869out_restart:
7870 task->tk_status = 0;
7871 rpc_restart_call_prepare(task);
7872 return;
7836out_overflow: 7873out_overflow:
7837 task->tk_status = -EOVERFLOW; 7874 task->tk_status = -EOVERFLOW;
7838 goto out; 7875 goto out;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index da73bc443238..5db324635e92 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1481,7 +1481,7 @@ restart:
1481 spin_unlock(&state->state_lock); 1481 spin_unlock(&state->state_lock);
1482 } 1482 }
1483 nfs4_put_open_state(state); 1483 nfs4_put_open_state(state);
1484 clear_bit(NFS4CLNT_RECLAIM_NOGRACE, 1484 clear_bit(NFS_STATE_RECLAIM_NOGRACE,
1485 &state->flags); 1485 &state->flags);
1486 spin_lock(&sp->so_lock); 1486 spin_lock(&sp->so_lock);
1487 goto restart; 1487 goto restart;
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 7c5718ba625e..fe3ddd20ff89 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -508,7 +508,7 @@ size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
508 * for it without upsetting the slab allocator. 508 * for it without upsetting the slab allocator.
509 */ 509 */
510 if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) * 510 if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
511 sizeof(struct page) > PAGE_SIZE) 511 sizeof(struct page *) > PAGE_SIZE)
512 return 0; 512 return 0;
513 513
514 return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes); 514 return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index ba1246433794..8abe27165ad0 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1104,20 +1104,15 @@ bool pnfs_roc(struct inode *ino)
1104 mark_lseg_invalid(lseg, &tmp_list); 1104 mark_lseg_invalid(lseg, &tmp_list);
1105 found = true; 1105 found = true;
1106 } 1106 }
1107 /* pnfs_prepare_layoutreturn() grabs lo ref and it will be put 1107 /* ROC in two conditions:
1108 * in pnfs_roc_release(). We don't really send a layoutreturn but
1109 * still want others to view us like we are sending one!
1110 *
1111 * If pnfs_prepare_layoutreturn() fails, it means someone else is doing
1112 * LAYOUTRETURN, so we proceed like there are no layouts to return.
1113 *
1114 * ROC in three conditions:
1115 * 1. there are ROC lsegs 1108 * 1. there are ROC lsegs
1116 * 2. we don't send layoutreturn 1109 * 2. we don't send layoutreturn
1117 * 3. no others are sending layoutreturn
1118 */ 1110 */
1119 if (found && !layoutreturn && pnfs_prepare_layoutreturn(lo)) 1111 if (found && !layoutreturn) {
1112 /* lo ref dropped in pnfs_roc_release() */
1113 pnfs_get_layout_hdr(lo);
1120 roc = true; 1114 roc = true;
1115 }
1121 1116
1122out_noroc: 1117out_noroc:
1123 spin_unlock(&ino->i_lock); 1118 spin_unlock(&ino->i_lock);
@@ -1172,6 +1167,26 @@ void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier)
1172 spin_unlock(&ino->i_lock); 1167 spin_unlock(&ino->i_lock);
1173} 1168}
1174 1169
1170bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
1171{
1172 struct nfs_inode *nfsi = NFS_I(ino);
1173 struct pnfs_layout_hdr *lo;
1174 bool sleep = false;
1175
1176 /* we might not have grabbed lo reference. so need to check under
1177 * i_lock */
1178 spin_lock(&ino->i_lock);
1179 lo = nfsi->layout;
1180 if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
1181 sleep = true;
1182 spin_unlock(&ino->i_lock);
1183
1184 if (sleep)
1185 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1186
1187 return sleep;
1188}
1189
1175/* 1190/*
1176 * Compare two layout segments for sorting into layout cache. 1191 * Compare two layout segments for sorting into layout cache.
1177 * We want to preferentially return RW over RO layouts, so ensure those 1192 * We want to preferentially return RW over RO layouts, so ensure those
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 78c9351ff117..d1990e90e7a0 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -270,6 +270,7 @@ bool pnfs_roc(struct inode *ino);
270void pnfs_roc_release(struct inode *ino); 270void pnfs_roc_release(struct inode *ino);
271void pnfs_roc_set_barrier(struct inode *ino, u32 barrier); 271void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
272void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier); 272void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier);
273bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task);
273void pnfs_set_layoutcommit(struct inode *, struct pnfs_layout_segment *, loff_t); 274void pnfs_set_layoutcommit(struct inode *, struct pnfs_layout_segment *, loff_t);
274void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data); 275void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data);
275int pnfs_layoutcommit_inode(struct inode *inode, bool sync); 276int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
@@ -639,6 +640,12 @@ pnfs_roc_get_barrier(struct inode *ino, u32 *barrier)
639{ 640{
640} 641}
641 642
643static inline bool
644pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
645{
646 return false;
647}
648
642static inline void set_pnfs_layoutdriver(struct nfs_server *s, 649static inline void set_pnfs_layoutdriver(struct nfs_server *s,
643 const struct nfs_fh *mntfh, u32 id) 650 const struct nfs_fh *mntfh, u32 id)
644{ 651{
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index ae0ff7a11b40..01b8cc8e8cfc 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -72,6 +72,9 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
72{ 72{
73 struct nfs_pgio_mirror *mirror; 73 struct nfs_pgio_mirror *mirror;
74 74
75 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
76 pgio->pg_ops->pg_cleanup(pgio);
77
75 pgio->pg_ops = &nfs_pgio_rw_ops; 78 pgio->pg_ops = &nfs_pgio_rw_ops;
76 79
77 /* read path should never have more than one mirror */ 80 /* read path should never have more than one mirror */
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 388f48079c43..72624dc4a623 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1351,6 +1351,9 @@ void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1351{ 1351{
1352 struct nfs_pgio_mirror *mirror; 1352 struct nfs_pgio_mirror *mirror;
1353 1353
1354 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
1355 pgio->pg_ops->pg_cleanup(pgio);
1356
1354 pgio->pg_ops = &nfs_pgio_rw_ops; 1357 pgio->pg_ops = &nfs_pgio_rw_ops;
1355 1358
1356 nfs_pageio_stop_mirroring(pgio); 1359 nfs_pageio_stop_mirroring(pgio);
diff --git a/fs/nsfs.c b/fs/nsfs.c
index e4905fbf3396..8f20d6016e20 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -142,7 +142,8 @@ static int nsfs_show_path(struct seq_file *seq, struct dentry *dentry)
142 struct inode *inode = d_inode(dentry); 142 struct inode *inode = d_inode(dentry);
143 const struct proc_ns_operations *ns_ops = dentry->d_fsdata; 143 const struct proc_ns_operations *ns_ops = dentry->d_fsdata;
144 144
145 return seq_printf(seq, "%s:[%lu]", ns_ops->name, inode->i_ino); 145 seq_printf(seq, "%s:[%lu]", ns_ops->name, inode->i_ino);
146 return 0;
146} 147}
147 148
148static const struct super_operations nsfs_ops = { 149static const struct super_operations nsfs_ops = {
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 46b8b2bbc95a..ee5aa4daaea0 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1439,6 +1439,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1439 int found, ret; 1439 int found, ret;
1440 int set_maybe; 1440 int set_maybe;
1441 int dispatch_assert = 0; 1441 int dispatch_assert = 0;
1442 int dispatched = 0;
1442 1443
1443 if (!dlm_grab(dlm)) 1444 if (!dlm_grab(dlm))
1444 return DLM_MASTER_RESP_NO; 1445 return DLM_MASTER_RESP_NO;
@@ -1658,15 +1659,18 @@ send_response:
1658 mlog(ML_ERROR, "failed to dispatch assert master work\n"); 1659 mlog(ML_ERROR, "failed to dispatch assert master work\n");
1659 response = DLM_MASTER_RESP_ERROR; 1660 response = DLM_MASTER_RESP_ERROR;
1660 dlm_lockres_put(res); 1661 dlm_lockres_put(res);
1661 } else 1662 } else {
1663 dispatched = 1;
1662 __dlm_lockres_grab_inflight_worker(dlm, res); 1664 __dlm_lockres_grab_inflight_worker(dlm, res);
1665 }
1663 spin_unlock(&res->spinlock); 1666 spin_unlock(&res->spinlock);
1664 } else { 1667 } else {
1665 if (res) 1668 if (res)
1666 dlm_lockres_put(res); 1669 dlm_lockres_put(res);
1667 } 1670 }
1668 1671
1669 dlm_put(dlm); 1672 if (!dispatched)
1673 dlm_put(dlm);
1670 return response; 1674 return response;
1671} 1675}
1672 1676
@@ -2090,7 +2094,6 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2090 2094
2091 2095
2092 /* queue up work for dlm_assert_master_worker */ 2096 /* queue up work for dlm_assert_master_worker */
2093 dlm_grab(dlm); /* get an extra ref for the work item */
2094 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL); 2097 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2095 item->u.am.lockres = res; /* already have a ref */ 2098 item->u.am.lockres = res; /* already have a ref */
2096 /* can optionally ignore node numbers higher than this node */ 2099 /* can optionally ignore node numbers higher than this node */
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index d0e436dc6437..3d90ad7ff91f 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1694,6 +1694,7 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1694 unsigned int hash; 1694 unsigned int hash;
1695 int master = DLM_LOCK_RES_OWNER_UNKNOWN; 1695 int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1696 u32 flags = DLM_ASSERT_MASTER_REQUERY; 1696 u32 flags = DLM_ASSERT_MASTER_REQUERY;
1697 int dispatched = 0;
1697 1698
1698 if (!dlm_grab(dlm)) { 1699 if (!dlm_grab(dlm)) {
1699 /* since the domain has gone away on this 1700 /* since the domain has gone away on this
@@ -1719,8 +1720,10 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1719 dlm_put(dlm); 1720 dlm_put(dlm);
1720 /* sender will take care of this and retry */ 1721 /* sender will take care of this and retry */
1721 return ret; 1722 return ret;
1722 } else 1723 } else {
1724 dispatched = 1;
1723 __dlm_lockres_grab_inflight_worker(dlm, res); 1725 __dlm_lockres_grab_inflight_worker(dlm, res);
1726 }
1724 spin_unlock(&res->spinlock); 1727 spin_unlock(&res->spinlock);
1725 } else { 1728 } else {
1726 /* put.. incase we are not the master */ 1729 /* put.. incase we are not the master */
@@ -1730,7 +1733,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1730 } 1733 }
1731 spin_unlock(&dlm->spinlock); 1734 spin_unlock(&dlm->spinlock);
1732 1735
1733 dlm_put(dlm); 1736 if (!dispatched)
1737 dlm_put(dlm);
1734 return master; 1738 return master;
1735} 1739}
1736 1740
@@ -1776,7 +1780,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1776 struct dlm_migratable_lockres *mres) 1780 struct dlm_migratable_lockres *mres)
1777{ 1781{
1778 struct dlm_migratable_lock *ml; 1782 struct dlm_migratable_lock *ml;
1779 struct list_head *queue; 1783 struct list_head *queue, *iter;
1780 struct list_head *tmpq = NULL; 1784 struct list_head *tmpq = NULL;
1781 struct dlm_lock *newlock = NULL; 1785 struct dlm_lock *newlock = NULL;
1782 struct dlm_lockstatus *lksb = NULL; 1786 struct dlm_lockstatus *lksb = NULL;
@@ -1821,7 +1825,9 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1821 spin_lock(&res->spinlock); 1825 spin_lock(&res->spinlock);
1822 for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) { 1826 for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
1823 tmpq = dlm_list_idx_to_ptr(res, j); 1827 tmpq = dlm_list_idx_to_ptr(res, j);
1824 list_for_each_entry(lock, tmpq, list) { 1828 list_for_each(iter, tmpq) {
1829 lock = list_entry(iter,
1830 struct dlm_lock, list);
1825 if (lock->ml.cookie == ml->cookie) 1831 if (lock->ml.cookie == ml->cookie)
1826 break; 1832 break;
1827 lock = NULL; 1833 lock = NULL;
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 263b125dbcf4..225586e141ca 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -372,16 +372,16 @@ EXPORT_SYMBOL(seq_release);
372 * @esc: set of characters that need escaping 372 * @esc: set of characters that need escaping
373 * 373 *
374 * Puts string into buffer, replacing each occurrence of character from 374 * Puts string into buffer, replacing each occurrence of character from
375 * @esc with usual octal escape. Returns 0 in case of success, -1 - in 375 * @esc with usual octal escape.
376 * case of overflow. 376 * Use seq_has_overflowed() to check for errors.
377 */ 377 */
378int seq_escape(struct seq_file *m, const char *s, const char *esc) 378void seq_escape(struct seq_file *m, const char *s, const char *esc)
379{ 379{
380 char *end = m->buf + m->size; 380 char *end = m->buf + m->size;
381 char *p; 381 char *p;
382 char c; 382 char c;
383 383
384 for (p = m->buf + m->count; (c = *s) != '\0' && p < end; s++) { 384 for (p = m->buf + m->count; (c = *s) != '\0' && p < end; s++) {
385 if (!strchr(esc, c)) { 385 if (!strchr(esc, c)) {
386 *p++ = c; 386 *p++ = c;
387 continue; 387 continue;
@@ -394,14 +394,13 @@ int seq_escape(struct seq_file *m, const char *s, const char *esc)
394 continue; 394 continue;
395 } 395 }
396 seq_set_overflow(m); 396 seq_set_overflow(m);
397 return -1; 397 return;
398 } 398 }
399 m->count = p - m->buf; 399 m->count = p - m->buf;
400 return 0;
401} 400}
402EXPORT_SYMBOL(seq_escape); 401EXPORT_SYMBOL(seq_escape);
403 402
404int seq_vprintf(struct seq_file *m, const char *f, va_list args) 403void seq_vprintf(struct seq_file *m, const char *f, va_list args)
405{ 404{
406 int len; 405 int len;
407 406
@@ -409,24 +408,20 @@ int seq_vprintf(struct seq_file *m, const char *f, va_list args)
409 len = vsnprintf(m->buf + m->count, m->size - m->count, f, args); 408 len = vsnprintf(m->buf + m->count, m->size - m->count, f, args);
410 if (m->count + len < m->size) { 409 if (m->count + len < m->size) {
411 m->count += len; 410 m->count += len;
412 return 0; 411 return;
413 } 412 }
414 } 413 }
415 seq_set_overflow(m); 414 seq_set_overflow(m);
416 return -1;
417} 415}
418EXPORT_SYMBOL(seq_vprintf); 416EXPORT_SYMBOL(seq_vprintf);
419 417
420int seq_printf(struct seq_file *m, const char *f, ...) 418void seq_printf(struct seq_file *m, const char *f, ...)
421{ 419{
422 int ret;
423 va_list args; 420 va_list args;
424 421
425 va_start(args, f); 422 va_start(args, f);
426 ret = seq_vprintf(m, f, args); 423 seq_vprintf(m, f, args);
427 va_end(args); 424 va_end(args);
428
429 return ret;
430} 425}
431EXPORT_SYMBOL(seq_printf); 426EXPORT_SYMBOL(seq_printf);
432 427
@@ -664,26 +659,25 @@ int seq_open_private(struct file *filp, const struct seq_operations *ops,
664} 659}
665EXPORT_SYMBOL(seq_open_private); 660EXPORT_SYMBOL(seq_open_private);
666 661
667int seq_putc(struct seq_file *m, char c) 662void seq_putc(struct seq_file *m, char c)
668{ 663{
669 if (m->count < m->size) { 664 if (m->count >= m->size)
670 m->buf[m->count++] = c; 665 return;
671 return 0; 666
672 } 667 m->buf[m->count++] = c;
673 return -1;
674} 668}
675EXPORT_SYMBOL(seq_putc); 669EXPORT_SYMBOL(seq_putc);
676 670
677int seq_puts(struct seq_file *m, const char *s) 671void seq_puts(struct seq_file *m, const char *s)
678{ 672{
679 int len = strlen(s); 673 int len = strlen(s);
680 if (m->count + len < m->size) { 674
681 memcpy(m->buf + m->count, s, len); 675 if (m->count + len >= m->size) {
682 m->count += len; 676 seq_set_overflow(m);
683 return 0; 677 return;
684 } 678 }
685 seq_set_overflow(m); 679 memcpy(m->buf + m->count, s, len);
686 return -1; 680 m->count += len;
687} 681}
688EXPORT_SYMBOL(seq_puts); 682EXPORT_SYMBOL(seq_puts);
689 683
@@ -694,8 +688,8 @@ EXPORT_SYMBOL(seq_puts);
694 * This routine is very quick when you show lots of numbers. 688 * This routine is very quick when you show lots of numbers.
695 * In usual cases, it will be better to use seq_printf(). It's easier to read. 689 * In usual cases, it will be better to use seq_printf(). It's easier to read.
696 */ 690 */
697int seq_put_decimal_ull(struct seq_file *m, char delimiter, 691void seq_put_decimal_ull(struct seq_file *m, char delimiter,
698 unsigned long long num) 692 unsigned long long num)
699{ 693{
700 int len; 694 int len;
701 695
@@ -707,35 +701,33 @@ int seq_put_decimal_ull(struct seq_file *m, char delimiter,
707 701
708 if (num < 10) { 702 if (num < 10) {
709 m->buf[m->count++] = num + '0'; 703 m->buf[m->count++] = num + '0';
710 return 0; 704 return;
711 } 705 }
712 706
713 len = num_to_str(m->buf + m->count, m->size - m->count, num); 707 len = num_to_str(m->buf + m->count, m->size - m->count, num);
714 if (!len) 708 if (!len)
715 goto overflow; 709 goto overflow;
716 m->count += len; 710 m->count += len;
717 return 0; 711 return;
712
718overflow: 713overflow:
719 seq_set_overflow(m); 714 seq_set_overflow(m);
720 return -1;
721} 715}
722EXPORT_SYMBOL(seq_put_decimal_ull); 716EXPORT_SYMBOL(seq_put_decimal_ull);
723 717
724int seq_put_decimal_ll(struct seq_file *m, char delimiter, 718void seq_put_decimal_ll(struct seq_file *m, char delimiter, long long num)
725 long long num)
726{ 719{
727 if (num < 0) { 720 if (num < 0) {
728 if (m->count + 3 >= m->size) { 721 if (m->count + 3 >= m->size) {
729 seq_set_overflow(m); 722 seq_set_overflow(m);
730 return -1; 723 return;
731 } 724 }
732 if (delimiter) 725 if (delimiter)
733 m->buf[m->count++] = delimiter; 726 m->buf[m->count++] = delimiter;
734 num = -num; 727 num = -num;
735 delimiter = '-'; 728 delimiter = '-';
736 } 729 }
737 return seq_put_decimal_ull(m, delimiter, num); 730 seq_put_decimal_ull(m, delimiter, num);
738
739} 731}
740EXPORT_SYMBOL(seq_put_decimal_ll); 732EXPORT_SYMBOL(seq_put_decimal_ll);
741 733
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 634e676072cb..50311703135b 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -467,8 +467,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
467 * the fault_*wqh. 467 * the fault_*wqh.
468 */ 468 */
469 spin_lock(&ctx->fault_pending_wqh.lock); 469 spin_lock(&ctx->fault_pending_wqh.lock);
470 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 0, &range); 470 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
471 __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, 0, &range); 471 __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range);
472 spin_unlock(&ctx->fault_pending_wqh.lock); 472 spin_unlock(&ctx->fault_pending_wqh.lock);
473 473
474 wake_up_poll(&ctx->fd_wqh, POLLHUP); 474 wake_up_poll(&ctx->fd_wqh, POLLHUP);
@@ -650,10 +650,10 @@ static void __wake_userfault(struct userfaultfd_ctx *ctx,
650 spin_lock(&ctx->fault_pending_wqh.lock); 650 spin_lock(&ctx->fault_pending_wqh.lock);
651 /* wake all in the range and autoremove */ 651 /* wake all in the range and autoremove */
652 if (waitqueue_active(&ctx->fault_pending_wqh)) 652 if (waitqueue_active(&ctx->fault_pending_wqh))
653 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 0, 653 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
654 range); 654 range);
655 if (waitqueue_active(&ctx->fault_wqh)) 655 if (waitqueue_active(&ctx->fault_wqh))
656 __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, 0, range); 656 __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, range);
657 spin_unlock(&ctx->fault_pending_wqh.lock); 657 spin_unlock(&ctx->fault_pending_wqh.lock);
658} 658}
659 659
@@ -1287,8 +1287,10 @@ static struct file *userfaultfd_file_create(int flags)
1287 1287
1288 file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx, 1288 file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
1289 O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS)); 1289 O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
1290 if (IS_ERR(file)) 1290 if (IS_ERR(file)) {
1291 mmput(ctx->mm);
1291 kmem_cache_free(userfaultfd_ctx_cachep, ctx); 1292 kmem_cache_free(userfaultfd_ctx_cachep, ctx);
1293 }
1292out: 1294out:
1293 return file; 1295 return file;
1294} 1296}
diff --git a/include/acpi/button.h b/include/acpi/button.h
index 97eea0e4c016..1cad8b2d460c 100644
--- a/include/acpi/button.h
+++ b/include/acpi/button.h
@@ -3,7 +3,7 @@
3 3
4#include <linux/notifier.h> 4#include <linux/notifier.h>
5 5
6#if defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) 6#if IS_ENABLED(CONFIG_ACPI_BUTTON)
7extern int acpi_lid_notifier_register(struct notifier_block *nb); 7extern int acpi_lid_notifier_register(struct notifier_block *nb);
8extern int acpi_lid_notifier_unregister(struct notifier_block *nb); 8extern int acpi_lid_notifier_unregister(struct notifier_block *nb);
9extern int acpi_lid_open(void); 9extern int acpi_lid_open(void);
@@ -20,6 +20,6 @@ static inline int acpi_lid_open(void)
20{ 20{
21 return 1; 21 return 1;
22} 22}
23#endif /* defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) */ 23#endif /* IS_ENABLED(CONFIG_ACPI_BUTTON) */
24 24
25#endif /* ACPI_BUTTON_H */ 25#endif /* ACPI_BUTTON_H */
diff --git a/include/acpi/video.h b/include/acpi/video.h
index e840b294c6f5..c62392d9b52a 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -24,7 +24,7 @@ enum acpi_backlight_type {
24 acpi_backlight_native, 24 acpi_backlight_native,
25}; 25};
26 26
27#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE) 27#if IS_ENABLED(CONFIG_ACPI_VIDEO)
28extern int acpi_video_register(void); 28extern int acpi_video_register(void);
29extern void acpi_video_unregister(void); 29extern void acpi_video_unregister(void);
30extern int acpi_video_get_edid(struct acpi_device *device, int type, 30extern int acpi_video_get_edid(struct acpi_device *device, int type,
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index f20f407ce45d..4b4b056a6eb0 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -73,7 +73,7 @@
73 * Convert a physical address to a Page Frame Number and back 73 * Convert a physical address to a Page Frame Number and back
74 */ 74 */
75#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) 75#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
76#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) 76#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
77 77
78#define page_to_pfn __page_to_pfn 78#define page_to_pfn __page_to_pfn
79#define pfn_to_page __pfn_to_page 79#define pfn_to_page __pfn_to_page
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 83bfb87f5bf1..e2aadbc7151f 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -111,8 +111,8 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock)
111 cpu_relax(); 111 cpu_relax();
112} 112}
113 113
114#ifndef virt_queued_spin_lock 114#ifndef virt_spin_lock
115static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock) 115static __always_inline bool virt_spin_lock(struct qspinlock *lock)
116{ 116{
117 return false; 117 return false;
118} 118}
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index d901f1a47be6..4e14dac282bb 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -35,11 +35,7 @@
35#define VGIC_V3_MAX_LRS 16 35#define VGIC_V3_MAX_LRS 16
36#define VGIC_MAX_IRQS 1024 36#define VGIC_MAX_IRQS 1024
37#define VGIC_V2_MAX_CPUS 8 37#define VGIC_V2_MAX_CPUS 8
38 38#define VGIC_V3_MAX_CPUS 255
39/* Sanity checks... */
40#if (KVM_MAX_VCPUS > 255)
41#error Too many KVM VCPUs, the VGIC only supports up to 255 VCPUs for now
42#endif
43 39
44#if (VGIC_NR_IRQS_LEGACY & 31) 40#if (VGIC_NR_IRQS_LEGACY & 31)
45#error "VGIC_NR_IRQS must be a multiple of 32" 41#error "VGIC_NR_IRQS must be a multiple of 32"
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 5a5d79ee256f..d5eb4ad1c534 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -13,6 +13,7 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/blkdev.h> 14#include <linux/blkdev.h>
15#include <linux/writeback.h> 15#include <linux/writeback.h>
16#include <linux/memcontrol.h>
16#include <linux/blk-cgroup.h> 17#include <linux/blk-cgroup.h>
17#include <linux/backing-dev-defs.h> 18#include <linux/backing-dev-defs.h>
18#include <linux/slab.h> 19#include <linux/slab.h>
@@ -252,13 +253,19 @@ int inode_congested(struct inode *inode, int cong_bits);
252 * @inode: inode of interest 253 * @inode: inode of interest
253 * 254 *
254 * cgroup writeback requires support from both the bdi and filesystem. 255 * cgroup writeback requires support from both the bdi and filesystem.
255 * Test whether @inode has both. 256 * Also, both memcg and iocg have to be on the default hierarchy. Test
257 * whether all conditions are met.
258 *
259 * Note that the test result may change dynamically on the same inode
260 * depending on how memcg and iocg are configured.
256 */ 261 */
257static inline bool inode_cgwb_enabled(struct inode *inode) 262static inline bool inode_cgwb_enabled(struct inode *inode)
258{ 263{
259 struct backing_dev_info *bdi = inode_to_bdi(inode); 264 struct backing_dev_info *bdi = inode_to_bdi(inode);
260 265
261 return bdi_cap_account_dirty(bdi) && 266 return cgroup_on_dfl(mem_cgroup_root_css->cgroup) &&
267 cgroup_on_dfl(blkcg_root_css->cgroup) &&
268 bdi_cap_account_dirty(bdi) &&
262 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && 269 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
263 (inode->i_sb->s_iflags & SB_I_CGROUPWB); 270 (inode->i_sb->s_iflags & SB_I_CGROUPWB);
264} 271}
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 708923b9b623..99da9ebc7377 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -584,7 +584,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
584 584
585#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 585#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
586 586
587#define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0) 587#define rq_data_dir(rq) ((int)((rq)->cmd_flags & 1))
588 588
589/* 589/*
590 * Driver can handle struct request, if it either has an old style 590 * Driver can handle struct request, if it either has an old style
@@ -1368,6 +1368,26 @@ static inline bool bvec_gap_to_prev(struct request_queue *q,
1368 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); 1368 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
1369} 1369}
1370 1370
1371static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
1372 struct bio *next)
1373{
1374 if (!bio_has_data(prev))
1375 return false;
1376
1377 return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1],
1378 next->bi_io_vec[0].bv_offset);
1379}
1380
1381static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
1382{
1383 return bio_will_gap(req->q, req->biotail, bio);
1384}
1385
1386static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
1387{
1388 return bio_will_gap(req->q, bio, req->bio);
1389}
1390
1371struct work_struct; 1391struct work_struct;
1372int kblockd_schedule_work(struct work_struct *work); 1392int kblockd_schedule_work(struct work_struct *work);
1373int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); 1393int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
@@ -1494,6 +1514,26 @@ queue_max_integrity_segments(struct request_queue *q)
1494 return q->limits.max_integrity_segments; 1514 return q->limits.max_integrity_segments;
1495} 1515}
1496 1516
1517static inline bool integrity_req_gap_back_merge(struct request *req,
1518 struct bio *next)
1519{
1520 struct bio_integrity_payload *bip = bio_integrity(req->bio);
1521 struct bio_integrity_payload *bip_next = bio_integrity(next);
1522
1523 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1524 bip_next->bip_vec[0].bv_offset);
1525}
1526
1527static inline bool integrity_req_gap_front_merge(struct request *req,
1528 struct bio *bio)
1529{
1530 struct bio_integrity_payload *bip = bio_integrity(bio);
1531 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
1532
1533 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1534 bip_next->bip_vec[0].bv_offset);
1535}
1536
1497#else /* CONFIG_BLK_DEV_INTEGRITY */ 1537#else /* CONFIG_BLK_DEV_INTEGRITY */
1498 1538
1499struct bio; 1539struct bio;
@@ -1560,6 +1600,16 @@ static inline bool blk_integrity_is_initialized(struct gendisk *g)
1560{ 1600{
1561 return 0; 1601 return 0;
1562} 1602}
1603static inline bool integrity_req_gap_back_merge(struct request *req,
1604 struct bio *next)
1605{
1606 return false;
1607}
1608static inline bool integrity_req_gap_front_merge(struct request *req,
1609 struct bio *bio)
1610{
1611 return false;
1612}
1563 1613
1564#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1614#endif /* CONFIG_BLK_DEV_INTEGRITY */
1565 1615
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 4763ad64e832..f89b31d45cc8 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -107,6 +107,7 @@ static inline u64 ceph_sanitize_features(u64 features)
107 CEPH_FEATURE_OSDMAP_ENC | \ 107 CEPH_FEATURE_OSDMAP_ENC | \
108 CEPH_FEATURE_CRUSH_TUNABLES3 | \ 108 CEPH_FEATURE_CRUSH_TUNABLES3 | \
109 CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \ 109 CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
110 CEPH_FEATURE_MSGR_KEEPALIVE2 | \
110 CEPH_FEATURE_CRUSH_V4) 111 CEPH_FEATURE_CRUSH_V4)
111 112
112#define CEPH_FEATURES_REQUIRED_DEFAULT \ 113#define CEPH_FEATURES_REQUIRED_DEFAULT \
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 9ebee53d3bf5..397c5cd09794 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -46,6 +46,7 @@ struct ceph_options {
46 unsigned long mount_timeout; /* jiffies */ 46 unsigned long mount_timeout; /* jiffies */
47 unsigned long osd_idle_ttl; /* jiffies */ 47 unsigned long osd_idle_ttl; /* jiffies */
48 unsigned long osd_keepalive_timeout; /* jiffies */ 48 unsigned long osd_keepalive_timeout; /* jiffies */
49 unsigned long monc_ping_timeout; /* jiffies */
49 50
50 /* 51 /*
51 * any type that can't be simply compared or doesn't need need 52 * any type that can't be simply compared or doesn't need need
@@ -66,6 +67,7 @@ struct ceph_options {
66#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000) 67#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000)
67#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) 68#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000)
68#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) 69#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000)
70#define CEPH_MONC_PING_TIMEOUT_DEFAULT msecs_to_jiffies(30 * 1000)
69 71
70#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) 72#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
71#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) 73#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 37753278987a..b2371d9b51fa 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -238,6 +238,8 @@ struct ceph_connection {
238 bool out_kvec_is_msg; /* kvec refers to out_msg */ 238 bool out_kvec_is_msg; /* kvec refers to out_msg */
239 int out_more; /* there is more data after the kvecs */ 239 int out_more; /* there is more data after the kvecs */
240 __le64 out_temp_ack; /* for writing an ack */ 240 __le64 out_temp_ack; /* for writing an ack */
241 struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
242 stamp */
241 243
242 /* message in temps */ 244 /* message in temps */
243 struct ceph_msg_header in_hdr; 245 struct ceph_msg_header in_hdr;
@@ -248,6 +250,8 @@ struct ceph_connection {
248 int in_base_pos; /* bytes read */ 250 int in_base_pos; /* bytes read */
249 __le64 in_temp_ack; /* for reading an ack */ 251 __le64 in_temp_ack; /* for reading an ack */
250 252
253 struct timespec last_keepalive_ack; /* keepalive2 ack stamp */
254
251 struct delayed_work work; /* send|recv work */ 255 struct delayed_work work; /* send|recv work */
252 unsigned long delay; /* current delay interval */ 256 unsigned long delay; /* current delay interval */
253}; 257};
@@ -285,6 +289,8 @@ extern void ceph_msg_revoke(struct ceph_msg *msg);
285extern void ceph_msg_revoke_incoming(struct ceph_msg *msg); 289extern void ceph_msg_revoke_incoming(struct ceph_msg *msg);
286 290
287extern void ceph_con_keepalive(struct ceph_connection *con); 291extern void ceph_con_keepalive(struct ceph_connection *con);
292extern bool ceph_con_keepalive_expired(struct ceph_connection *con,
293 unsigned long interval);
288 294
289extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, 295extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
290 size_t length, size_t alignment); 296 size_t length, size_t alignment);
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h
index 1c1887206ffa..0fe2656ac415 100644
--- a/include/linux/ceph/msgr.h
+++ b/include/linux/ceph/msgr.h
@@ -84,10 +84,12 @@ struct ceph_entity_inst {
84#define CEPH_MSGR_TAG_MSG 7 /* message */ 84#define CEPH_MSGR_TAG_MSG 7 /* message */
85#define CEPH_MSGR_TAG_ACK 8 /* message ack */ 85#define CEPH_MSGR_TAG_ACK 8 /* message ack */
86#define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */ 86#define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */
87#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */ 87#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */
88#define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */ 88#define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */
89#define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */ 89#define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */
90#define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */ 90#define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */
91#define CEPH_MSGR_TAG_KEEPALIVE2 14 /* keepalive2 byte + ceph_timespec */
92#define CEPH_MSGR_TAG_KEEPALIVE2_ACK 15 /* keepalive2 reply */
91 93
92 94
93/* 95/*
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 4d8fcf2187dc..8492721b39be 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -473,31 +473,8 @@ struct cgroup_subsys {
473 unsigned int depends_on; 473 unsigned int depends_on;
474}; 474};
475 475
476extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; 476void cgroup_threadgroup_change_begin(struct task_struct *tsk);
477 477void cgroup_threadgroup_change_end(struct task_struct *tsk);
478/**
479 * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
480 * @tsk: target task
481 *
482 * Called from threadgroup_change_begin() and allows cgroup operations to
483 * synchronize against threadgroup changes using a percpu_rw_semaphore.
484 */
485static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
486{
487 percpu_down_read(&cgroup_threadgroup_rwsem);
488}
489
490/**
491 * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
492 * @tsk: target task
493 *
494 * Called from threadgroup_change_end(). Counterpart of
495 * cgroup_threadcgroup_change_begin().
496 */
497static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
498{
499 percpu_up_read(&cgroup_threadgroup_rwsem);
500}
501 478
502#else /* CONFIG_CGROUPS */ 479#else /* CONFIG_CGROUPS */
503 480
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 31ce435981fe..bdcf358dfce2 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -18,15 +18,6 @@
18struct clock_event_device; 18struct clock_event_device;
19struct module; 19struct module;
20 20
21/* Clock event mode commands for legacy ->set_mode(): OBSOLETE */
22enum clock_event_mode {
23 CLOCK_EVT_MODE_UNUSED,
24 CLOCK_EVT_MODE_SHUTDOWN,
25 CLOCK_EVT_MODE_PERIODIC,
26 CLOCK_EVT_MODE_ONESHOT,
27 CLOCK_EVT_MODE_RESUME,
28};
29
30/* 21/*
31 * Possible states of a clock event device. 22 * Possible states of a clock event device.
32 * 23 *
@@ -86,16 +77,14 @@ enum clock_event_state {
86 * @min_delta_ns: minimum delta value in ns 77 * @min_delta_ns: minimum delta value in ns
87 * @mult: nanosecond to cycles multiplier 78 * @mult: nanosecond to cycles multiplier
88 * @shift: nanoseconds to cycles divisor (power of two) 79 * @shift: nanoseconds to cycles divisor (power of two)
89 * @mode: operating mode, relevant only to ->set_mode(), OBSOLETE
90 * @state_use_accessors:current state of the device, assigned by the core code 80 * @state_use_accessors:current state of the device, assigned by the core code
91 * @features: features 81 * @features: features
92 * @retries: number of forced programming retries 82 * @retries: number of forced programming retries
93 * @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME. 83 * @set_state_periodic: switch state to periodic
94 * @set_state_periodic: switch state to periodic, if !set_mode 84 * @set_state_oneshot: switch state to oneshot
95 * @set_state_oneshot: switch state to oneshot, if !set_mode 85 * @set_state_oneshot_stopped: switch state to oneshot_stopped
96 * @set_state_oneshot_stopped: switch state to oneshot_stopped, if !set_mode 86 * @set_state_shutdown: switch state to shutdown
97 * @set_state_shutdown: switch state to shutdown, if !set_mode 87 * @tick_resume: resume clkevt device
98 * @tick_resume: resume clkevt device, if !set_mode
99 * @broadcast: function to broadcast events 88 * @broadcast: function to broadcast events
100 * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration 89 * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration
101 * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration 90 * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration
@@ -116,18 +105,10 @@ struct clock_event_device {
116 u64 min_delta_ns; 105 u64 min_delta_ns;
117 u32 mult; 106 u32 mult;
118 u32 shift; 107 u32 shift;
119 enum clock_event_mode mode;
120 enum clock_event_state state_use_accessors; 108 enum clock_event_state state_use_accessors;
121 unsigned int features; 109 unsigned int features;
122 unsigned long retries; 110 unsigned long retries;
123 111
124 /*
125 * State transition callback(s): Only one of the two groups should be
126 * defined:
127 * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME.
128 * - set_state_{shutdown|periodic|oneshot|oneshot_stopped}(), tick_resume().
129 */
130 void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *);
131 int (*set_state_periodic)(struct clock_event_device *); 112 int (*set_state_periodic)(struct clock_event_device *);
132 int (*set_state_oneshot)(struct clock_event_device *); 113 int (*set_state_oneshot)(struct clock_event_device *);
133 int (*set_state_oneshot_stopped)(struct clock_event_device *); 114 int (*set_state_oneshot_stopped)(struct clock_event_device *);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 430efcbea48e..dca22de98d94 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -127,9 +127,14 @@ struct cpufreq_policy {
127#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ 127#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
128 128
129#ifdef CONFIG_CPU_FREQ 129#ifdef CONFIG_CPU_FREQ
130struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
130struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); 131struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
131void cpufreq_cpu_put(struct cpufreq_policy *policy); 132void cpufreq_cpu_put(struct cpufreq_policy *policy);
132#else 133#else
134static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
135{
136 return NULL;
137}
133static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 138static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
134{ 139{
135 return NULL; 140 return NULL;
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index ce447f0f1bad..68030e22af35 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -65,7 +65,10 @@ struct devfreq_dev_status {
65 * The "flags" parameter's possible values are 65 * The "flags" parameter's possible values are
66 * explained above with "DEVFREQ_FLAG_*" macros. 66 * explained above with "DEVFREQ_FLAG_*" macros.
67 * @get_dev_status: The device should provide the current performance 67 * @get_dev_status: The device should provide the current performance
68 * status to devfreq, which is used by governors. 68 * status to devfreq. Governors are recommended not to
69 * use this directly. Instead, governors are recommended
70 * to use devfreq_update_stats() along with
71 * devfreq.last_status.
69 * @get_cur_freq: The device should provide the current frequency 72 * @get_cur_freq: The device should provide the current frequency
70 * at which it is operating. 73 * at which it is operating.
71 * @exit: An optional callback that is called when devfreq 74 * @exit: An optional callback that is called when devfreq
@@ -161,6 +164,7 @@ struct devfreq {
161 struct delayed_work work; 164 struct delayed_work work;
162 165
163 unsigned long previous_freq; 166 unsigned long previous_freq;
167 struct devfreq_dev_status last_status;
164 168
165 void *data; /* private data for governors */ 169 void *data; /* private data for governors */
166 170
@@ -204,6 +208,19 @@ extern int devm_devfreq_register_opp_notifier(struct device *dev,
204extern void devm_devfreq_unregister_opp_notifier(struct device *dev, 208extern void devm_devfreq_unregister_opp_notifier(struct device *dev,
205 struct devfreq *devfreq); 209 struct devfreq *devfreq);
206 210
211/**
212 * devfreq_update_stats() - update the last_status pointer in struct devfreq
213 * @df: the devfreq instance whose status needs updating
214 *
215 * Governors are recommended to use this function along with last_status,
216 * which allows other entities to reuse the last_status without affecting
217 * the values fetched later by governors.
218 */
219static inline int devfreq_update_stats(struct devfreq *df)
220{
221 return df->profile->get_dev_status(df->dev.parent, &df->last_status);
222}
223
207#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) 224#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
208/** 225/**
209 * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq 226 * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq
@@ -289,6 +306,11 @@ static inline void devm_devfreq_unregister_opp_notifier(struct device *dev,
289 struct devfreq *devfreq) 306 struct devfreq *devfreq)
290{ 307{
291} 308}
309
310static inline int devfreq_update_stats(struct devfreq *df)
311{
312 return -EINVAL;
313}
292#endif /* CONFIG_PM_DEVFREQ */ 314#endif /* CONFIG_PM_DEVFREQ */
293 315
294#endif /* __LINUX_DEVFREQ_H__ */ 316#endif /* __LINUX_DEVFREQ_H__ */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index d0b380ee7d67..e38681f4912d 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -25,6 +25,13 @@
25extern struct files_struct init_files; 25extern struct files_struct init_files;
26extern struct fs_struct init_fs; 26extern struct fs_struct init_fs;
27 27
28#ifdef CONFIG_CGROUPS
29#define INIT_GROUP_RWSEM(sig) \
30 .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem),
31#else
32#define INIT_GROUP_RWSEM(sig)
33#endif
34
28#ifdef CONFIG_CPUSETS 35#ifdef CONFIG_CPUSETS
29#define INIT_CPUSET_SEQ(tsk) \ 36#define INIT_CPUSET_SEQ(tsk) \
30 .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), 37 .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
@@ -57,6 +64,7 @@ extern struct fs_struct init_fs;
57 INIT_PREV_CPUTIME(sig) \ 64 INIT_PREV_CPUTIME(sig) \
58 .cred_guard_mutex = \ 65 .cred_guard_mutex = \
59 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ 66 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
67 INIT_GROUP_RWSEM(sig) \
60} 68}
61 69
62extern struct nsproxy init_nsproxy; 70extern struct nsproxy init_nsproxy;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 6f8b34066442..11bf09288ddb 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -110,8 +110,8 @@ enum {
110/* 110/*
111 * Return value for chip->irq_set_affinity() 111 * Return value for chip->irq_set_affinity()
112 * 112 *
113 * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity 113 * IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity
114 * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity 114 * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity
115 * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to 115 * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
116 * support stacked irqchips, which indicates skipping 116 * support stacked irqchips, which indicates skipping
117 * all descendent irqchips. 117 * all descendent irqchips.
@@ -129,9 +129,19 @@ struct irq_domain;
129 * struct irq_common_data - per irq data shared by all irqchips 129 * struct irq_common_data - per irq data shared by all irqchips
130 * @state_use_accessors: status information for irq chip functions. 130 * @state_use_accessors: status information for irq chip functions.
131 * Use accessor functions to deal with it 131 * Use accessor functions to deal with it
132 * @node: node index useful for balancing
133 * @handler_data: per-IRQ data for the irq_chip methods
134 * @affinity: IRQ affinity on SMP
135 * @msi_desc: MSI descriptor
132 */ 136 */
133struct irq_common_data { 137struct irq_common_data {
134 unsigned int state_use_accessors; 138 unsigned int state_use_accessors;
139#ifdef CONFIG_NUMA
140 unsigned int node;
141#endif
142 void *handler_data;
143 struct msi_desc *msi_desc;
144 cpumask_var_t affinity;
135}; 145};
136 146
137/** 147/**
@@ -139,38 +149,26 @@ struct irq_common_data {
139 * @mask: precomputed bitmask for accessing the chip registers 149 * @mask: precomputed bitmask for accessing the chip registers
140 * @irq: interrupt number 150 * @irq: interrupt number
141 * @hwirq: hardware interrupt number, local to the interrupt domain 151 * @hwirq: hardware interrupt number, local to the interrupt domain
142 * @node: node index useful for balancing
143 * @common: point to data shared by all irqchips 152 * @common: point to data shared by all irqchips
144 * @chip: low level interrupt hardware access 153 * @chip: low level interrupt hardware access
145 * @domain: Interrupt translation domain; responsible for mapping 154 * @domain: Interrupt translation domain; responsible for mapping
146 * between hwirq number and linux irq number. 155 * between hwirq number and linux irq number.
147 * @parent_data: pointer to parent struct irq_data to support hierarchy 156 * @parent_data: pointer to parent struct irq_data to support hierarchy
148 * irq_domain 157 * irq_domain
149 * @handler_data: per-IRQ data for the irq_chip methods
150 * @chip_data: platform-specific per-chip private data for the chip 158 * @chip_data: platform-specific per-chip private data for the chip
151 * methods, to allow shared chip implementations 159 * methods, to allow shared chip implementations
152 * @msi_desc: MSI descriptor
153 * @affinity: IRQ affinity on SMP
154 *
155 * The fields here need to overlay the ones in irq_desc until we
156 * cleaned up the direct references and switched everything over to
157 * irq_data.
158 */ 160 */
159struct irq_data { 161struct irq_data {
160 u32 mask; 162 u32 mask;
161 unsigned int irq; 163 unsigned int irq;
162 unsigned long hwirq; 164 unsigned long hwirq;
163 unsigned int node;
164 struct irq_common_data *common; 165 struct irq_common_data *common;
165 struct irq_chip *chip; 166 struct irq_chip *chip;
166 struct irq_domain *domain; 167 struct irq_domain *domain;
167#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 168#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
168 struct irq_data *parent_data; 169 struct irq_data *parent_data;
169#endif 170#endif
170 void *handler_data;
171 void *chip_data; 171 void *chip_data;
172 struct msi_desc *msi_desc;
173 cpumask_var_t affinity;
174}; 172};
175 173
176/* 174/*
@@ -190,6 +188,7 @@ struct irq_data {
190 * IRQD_IRQ_MASKED - Masked state of the interrupt 188 * IRQD_IRQ_MASKED - Masked state of the interrupt
191 * IRQD_IRQ_INPROGRESS - In progress state of the interrupt 189 * IRQD_IRQ_INPROGRESS - In progress state of the interrupt
192 * IRQD_WAKEUP_ARMED - Wakeup mode armed 190 * IRQD_WAKEUP_ARMED - Wakeup mode armed
191 * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU
193 */ 192 */
194enum { 193enum {
195 IRQD_TRIGGER_MASK = 0xf, 194 IRQD_TRIGGER_MASK = 0xf,
@@ -204,6 +203,7 @@ enum {
204 IRQD_IRQ_MASKED = (1 << 17), 203 IRQD_IRQ_MASKED = (1 << 17),
205 IRQD_IRQ_INPROGRESS = (1 << 18), 204 IRQD_IRQ_INPROGRESS = (1 << 18),
206 IRQD_WAKEUP_ARMED = (1 << 19), 205 IRQD_WAKEUP_ARMED = (1 << 19),
206 IRQD_FORWARDED_TO_VCPU = (1 << 20),
207}; 207};
208 208
209#define __irqd_to_state(d) ((d)->common->state_use_accessors) 209#define __irqd_to_state(d) ((d)->common->state_use_accessors)
@@ -282,6 +282,20 @@ static inline bool irqd_is_wakeup_armed(struct irq_data *d)
282 return __irqd_to_state(d) & IRQD_WAKEUP_ARMED; 282 return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
283} 283}
284 284
285static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d)
286{
287 return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU;
288}
289
290static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d)
291{
292 __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU;
293}
294
295static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
296{
297 __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
298}
285 299
286/* 300/*
287 * Functions for chained handlers which can be enabled/disabled by the 301 * Functions for chained handlers which can be enabled/disabled by the
@@ -461,14 +475,14 @@ static inline int irq_set_parent(int irq, int parent_irq)
461 * Built-in IRQ handlers for various IRQ types, 475 * Built-in IRQ handlers for various IRQ types,
462 * callable via desc->handle_irq() 476 * callable via desc->handle_irq()
463 */ 477 */
464extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); 478extern void handle_level_irq(struct irq_desc *desc);
465extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); 479extern void handle_fasteoi_irq(struct irq_desc *desc);
466extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); 480extern void handle_edge_irq(struct irq_desc *desc);
467extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); 481extern void handle_edge_eoi_irq(struct irq_desc *desc);
468extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); 482extern void handle_simple_irq(struct irq_desc *desc);
469extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); 483extern void handle_percpu_irq(struct irq_desc *desc);
470extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); 484extern void handle_percpu_devid_irq(struct irq_desc *desc);
471extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); 485extern void handle_bad_irq(struct irq_desc *desc);
472extern void handle_nested_irq(unsigned int irq); 486extern void handle_nested_irq(unsigned int irq);
473 487
474extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); 488extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
@@ -627,23 +641,23 @@ static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
627static inline void *irq_get_handler_data(unsigned int irq) 641static inline void *irq_get_handler_data(unsigned int irq)
628{ 642{
629 struct irq_data *d = irq_get_irq_data(irq); 643 struct irq_data *d = irq_get_irq_data(irq);
630 return d ? d->handler_data : NULL; 644 return d ? d->common->handler_data : NULL;
631} 645}
632 646
633static inline void *irq_data_get_irq_handler_data(struct irq_data *d) 647static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
634{ 648{
635 return d->handler_data; 649 return d->common->handler_data;
636} 650}
637 651
638static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) 652static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
639{ 653{
640 struct irq_data *d = irq_get_irq_data(irq); 654 struct irq_data *d = irq_get_irq_data(irq);
641 return d ? d->msi_desc : NULL; 655 return d ? d->common->msi_desc : NULL;
642} 656}
643 657
644static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d) 658static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
645{ 659{
646 return d->msi_desc; 660 return d->common->msi_desc;
647} 661}
648 662
649static inline u32 irq_get_trigger_type(unsigned int irq) 663static inline u32 irq_get_trigger_type(unsigned int irq)
@@ -652,21 +666,30 @@ static inline u32 irq_get_trigger_type(unsigned int irq)
652 return d ? irqd_get_trigger_type(d) : 0; 666 return d ? irqd_get_trigger_type(d) : 0;
653} 667}
654 668
655static inline int irq_data_get_node(struct irq_data *d) 669static inline int irq_common_data_get_node(struct irq_common_data *d)
656{ 670{
671#ifdef CONFIG_NUMA
657 return d->node; 672 return d->node;
673#else
674 return 0;
675#endif
676}
677
678static inline int irq_data_get_node(struct irq_data *d)
679{
680 return irq_common_data_get_node(d->common);
658} 681}
659 682
660static inline struct cpumask *irq_get_affinity_mask(int irq) 683static inline struct cpumask *irq_get_affinity_mask(int irq)
661{ 684{
662 struct irq_data *d = irq_get_irq_data(irq); 685 struct irq_data *d = irq_get_irq_data(irq);
663 686
664 return d ? d->affinity : NULL; 687 return d ? d->common->affinity : NULL;
665} 688}
666 689
667static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) 690static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
668{ 691{
669 return d->affinity; 692 return d->common->affinity;
670} 693}
671 694
672unsigned int arch_dynirq_lower_bound(unsigned int from); 695unsigned int arch_dynirq_lower_bound(unsigned int from);
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 5acfa26602e1..a587a33363c7 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -98,11 +98,7 @@ extern struct irq_desc irq_desc[NR_IRQS];
98 98
99static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) 99static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
100{ 100{
101#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 101 return container_of(data->common, struct irq_desc, irq_common_data);
102 return irq_to_desc(data->irq);
103#else
104 return container_of(data, struct irq_desc, irq_data);
105#endif
106} 102}
107 103
108static inline unsigned int irq_desc_get_irq(struct irq_desc *desc) 104static inline unsigned int irq_desc_get_irq(struct irq_desc *desc)
@@ -127,23 +123,21 @@ static inline void *irq_desc_get_chip_data(struct irq_desc *desc)
127 123
128static inline void *irq_desc_get_handler_data(struct irq_desc *desc) 124static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
129{ 125{
130 return desc->irq_data.handler_data; 126 return desc->irq_common_data.handler_data;
131} 127}
132 128
133static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) 129static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
134{ 130{
135 return desc->irq_data.msi_desc; 131 return desc->irq_common_data.msi_desc;
136} 132}
137 133
138/* 134/*
139 * Architectures call this to let the generic IRQ layer 135 * Architectures call this to let the generic IRQ layer
140 * handle an interrupt. If the descriptor is attached to an 136 * handle an interrupt.
141 * irqchip-style controller then we call the ->handle_irq() handler,
142 * and it calls __do_IRQ() if it's attached to an irqtype-style controller.
143 */ 137 */
144static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) 138static inline void generic_handle_irq_desc(struct irq_desc *desc)
145{ 139{
146 desc->handle_irq(irq, desc); 140 desc->handle_irq(desc);
147} 141}
148 142
149int generic_handle_irq(unsigned int irq); 143int generic_handle_irq(unsigned int irq);
@@ -176,29 +170,6 @@ static inline int irq_has_action(unsigned int irq)
176 return irq_desc_has_action(irq_to_desc(irq)); 170 return irq_desc_has_action(irq_to_desc(irq));
177} 171}
178 172
179/* caller has locked the irq_desc and both params are valid */
180static inline void __irq_set_handler_locked(unsigned int irq,
181 irq_flow_handler_t handler)
182{
183 struct irq_desc *desc;
184
185 desc = irq_to_desc(irq);
186 desc->handle_irq = handler;
187}
188
189/* caller has locked the irq_desc and both params are valid */
190static inline void
191__irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip,
192 irq_flow_handler_t handler, const char *name)
193{
194 struct irq_desc *desc;
195
196 desc = irq_to_desc(irq);
197 irq_desc_get_irq_data(desc)->chip = chip;
198 desc->handle_irq = handler;
199 desc->name = name;
200}
201
202/** 173/**
203 * irq_set_handler_locked - Set irq handler from a locked region 174 * irq_set_handler_locked - Set irq handler from a locked region
204 * @data: Pointer to the irq_data structure which identifies the irq 175 * @data: Pointer to the irq_data structure which identifies the irq
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h
index 62d543004197..661bed0ed1f3 100644
--- a/include/linux/irqhandler.h
+++ b/include/linux/irqhandler.h
@@ -8,7 +8,7 @@
8 8
9struct irq_desc; 9struct irq_desc;
10struct irq_data; 10struct irq_data;
11typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc); 11typedef void (*irq_flow_handler_t)(struct irq_desc *desc);
12typedef void (*irq_preflow_handler_t)(struct irq_data *data); 12typedef void (*irq_preflow_handler_t)(struct irq_data *data);
13 13
14#endif 14#endif
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 7f653e8f6690..f1094238ab2a 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -21,8 +21,8 @@
21 * 21 *
22 * DEFINE_STATIC_KEY_TRUE(key); 22 * DEFINE_STATIC_KEY_TRUE(key);
23 * DEFINE_STATIC_KEY_FALSE(key); 23 * DEFINE_STATIC_KEY_FALSE(key);
24 * static_key_likely() 24 * static_branch_likely()
25 * statick_key_unlikely() 25 * static_branch_unlikely()
26 * 26 *
27 * Jump labels provide an interface to generate dynamic branches using 27 * Jump labels provide an interface to generate dynamic branches using
28 * self-modifying code. Assuming toolchain and architecture support, if we 28 * self-modifying code. Assuming toolchain and architecture support, if we
@@ -45,12 +45,10 @@
45 * statement, setting the key to true requires us to patch in a jump 45 * statement, setting the key to true requires us to patch in a jump
46 * to the out-of-line of true branch. 46 * to the out-of-line of true branch.
47 * 47 *
48 * In addtion to static_branch_{enable,disable}, we can also reference count 48 * In addition to static_branch_{enable,disable}, we can also reference count
49 * the key or branch direction via static_branch_{inc,dec}. Thus, 49 * the key or branch direction via static_branch_{inc,dec}. Thus,
50 * static_branch_inc() can be thought of as a 'make more true' and 50 * static_branch_inc() can be thought of as a 'make more true' and
51 * static_branch_dec() as a 'make more false'. The inc()/dec() 51 * static_branch_dec() as a 'make more false'.
52 * interface is meant to be used exclusively from the inc()/dec() for a given
53 * key.
54 * 52 *
55 * Since this relies on modifying code, the branch modifying functions 53 * Since this relies on modifying code, the branch modifying functions
56 * must be considered absolute slow paths (machine wide synchronization etc.). 54 * must be considered absolute slow paths (machine wide synchronization etc.).
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fda728e3c27d..91c08f6f0dc9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -20,6 +20,7 @@
20#include <linux/shrinker.h> 20#include <linux/shrinker.h>
21#include <linux/resource.h> 21#include <linux/resource.h>
22#include <linux/page_ext.h> 22#include <linux/page_ext.h>
23#include <linux/err.h>
23 24
24struct mempolicy; 25struct mempolicy;
25struct anon_vma; 26struct anon_vma;
@@ -1214,6 +1215,49 @@ long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1214 int write, int force, struct page **pages); 1215 int write, int force, struct page **pages);
1215int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1216int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1216 struct page **pages); 1217 struct page **pages);
1218
1219/* Container for pinned pfns / pages */
1220struct frame_vector {
1221 unsigned int nr_allocated; /* Number of frames we have space for */
1222 unsigned int nr_frames; /* Number of frames stored in ptrs array */
1223 bool got_ref; /* Did we pin pages by getting page ref? */
1224 bool is_pfns; /* Does array contain pages or pfns? */
1225 void *ptrs[0]; /* Array of pinned pfns / pages. Use
1226 * pfns_vector_pages() or pfns_vector_pfns()
1227 * for access */
1228};
1229
1230struct frame_vector *frame_vector_create(unsigned int nr_frames);
1231void frame_vector_destroy(struct frame_vector *vec);
1232int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
1233 bool write, bool force, struct frame_vector *vec);
1234void put_vaddr_frames(struct frame_vector *vec);
1235int frame_vector_to_pages(struct frame_vector *vec);
1236void frame_vector_to_pfns(struct frame_vector *vec);
1237
1238static inline unsigned int frame_vector_count(struct frame_vector *vec)
1239{
1240 return vec->nr_frames;
1241}
1242
1243static inline struct page **frame_vector_pages(struct frame_vector *vec)
1244{
1245 if (vec->is_pfns) {
1246 int err = frame_vector_to_pages(vec);
1247
1248 if (err)
1249 return ERR_PTR(err);
1250 }
1251 return (struct page **)(vec->ptrs);
1252}
1253
1254static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
1255{
1256 if (!vec->is_pfns)
1257 frame_vector_to_pfns(vec);
1258 return (unsigned long *)(vec->ptrs);
1259}
1260
1217struct kvec; 1261struct kvec;
1218int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, 1262int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1219 struct page **pages); 1263 struct page **pages);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index b791405958b4..d2ffeafc9998 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -507,6 +507,7 @@ static inline void napi_enable(struct napi_struct *n)
507 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); 507 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
508 smp_mb__before_atomic(); 508 smp_mb__before_atomic();
509 clear_bit(NAPI_STATE_SCHED, &n->state); 509 clear_bit(NAPI_STATE_SCHED, &n->state);
510 clear_bit(NAPI_STATE_NPSVC, &n->state);
510} 511}
511 512
512#ifdef CONFIG_SMP 513#ifdef CONFIG_SMP
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index b02f72bb8e32..f798e2afba88 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -522,10 +522,9 @@ static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int idx)
522 * @speed: OUT - The link speed expressed as PCIe generation number. 522 * @speed: OUT - The link speed expressed as PCIe generation number.
523 * @width: OUT - The link width expressed as the number of PCIe lanes. 523 * @width: OUT - The link width expressed as the number of PCIe lanes.
524 * 524 *
525 * Set the translation of a memory window. The peer may access local memory 525 * Get the current state of the ntb link. It is recommended to query the link
526 * through the window starting at the address, up to the size. The address 526 * state once after every link event. It is safe to query the link state in
527 * must be aligned to the alignment specified by ntb_mw_get_range(). The size 527 * the context of the link event callback.
528 * must be aligned to the size alignment specified by ntb_mw_get_range().
529 * 528 *
530 * Return: One if the link is up, zero if the link is down, otherwise a 529 * Return: One if the link is up, zero if the link is down, otherwise a
531 * negative value indicating the error number. 530 * negative value indicating the error number.
@@ -795,7 +794,7 @@ static inline int ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
795} 794}
796 795
797/** 796/**
798 * ntb_peer_db_clear() - clear bits in the local doorbell register 797 * ntb_peer_db_clear() - clear bits in the peer doorbell register
799 * @ntb: NTB device context. 798 * @ntb: NTB device context.
800 * @db_bits: Doorbell bits to clear. 799 * @db_bits: Doorbell bits to clear.
801 * 800 *
diff --git a/include/linux/ntb_transport.h b/include/linux/ntb_transport.h
index 2862861366a5..7243eb98a722 100644
--- a/include/linux/ntb_transport.h
+++ b/include/linux/ntb_transport.h
@@ -83,3 +83,4 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len);
83void ntb_transport_link_up(struct ntb_transport_qp *qp); 83void ntb_transport_link_up(struct ntb_transport_qp *qp);
84void ntb_transport_link_down(struct ntb_transport_qp *qp); 84void ntb_transport_link_down(struct ntb_transport_qp *qp);
85bool ntb_transport_link_query(struct ntb_transport_qp *qp); 85bool ntb_transport_link_query(struct ntb_transport_qp *qp);
86unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp);
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 962387a192f1..4a4e3a092337 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -19,6 +19,7 @@
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/ethtool.h> 20#include <linux/ethtool.h>
21#include <linux/mii.h> 21#include <linux/mii.h>
22#include <linux/module.h>
22#include <linux/timer.h> 23#include <linux/timer.h>
23#include <linux/workqueue.h> 24#include <linux/workqueue.h>
24#include <linux/mod_devicetable.h> 25#include <linux/mod_devicetable.h>
@@ -153,6 +154,7 @@ struct sk_buff;
153 * PHYs should register using this structure 154 * PHYs should register using this structure
154 */ 155 */
155struct mii_bus { 156struct mii_bus {
157 struct module *owner;
156 const char *name; 158 const char *name;
157 char id[MII_BUS_ID_SIZE]; 159 char id[MII_BUS_ID_SIZE];
158 void *priv; 160 void *priv;
@@ -198,7 +200,8 @@ static inline struct mii_bus *mdiobus_alloc(void)
198 return mdiobus_alloc_size(0); 200 return mdiobus_alloc_size(0);
199} 201}
200 202
201int mdiobus_register(struct mii_bus *bus); 203int __mdiobus_register(struct mii_bus *bus, struct module *owner);
204#define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE)
202void mdiobus_unregister(struct mii_bus *bus); 205void mdiobus_unregister(struct mii_bus *bus);
203void mdiobus_free(struct mii_bus *bus); 206void mdiobus_free(struct mii_bus *bus);
204struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv); 207struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv);
@@ -742,6 +745,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
742 struct phy_c45_device_ids *c45_ids); 745 struct phy_c45_device_ids *c45_ids);
743struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); 746struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
744int phy_device_register(struct phy_device *phy); 747int phy_device_register(struct phy_device *phy);
748void phy_device_remove(struct phy_device *phydev);
745int phy_init_hw(struct phy_device *phydev); 749int phy_init_hw(struct phy_device *phydev);
746int phy_suspend(struct phy_device *phydev); 750int phy_suspend(struct phy_device *phydev);
747int phy_resume(struct phy_device *phydev); 751int phy_resume(struct phy_device *phydev);
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index cab7ba55bedb..e817722ee3f0 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -34,6 +34,7 @@ bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
34 34
35int dev_pm_opp_get_opp_count(struct device *dev); 35int dev_pm_opp_get_opp_count(struct device *dev);
36unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev); 36unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
37struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev);
37 38
38struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 39struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
39 unsigned long freq, 40 unsigned long freq,
@@ -80,6 +81,11 @@ static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
80 return 0; 81 return 0;
81} 82}
82 83
84static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
85{
86 return NULL;
87}
88
83static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 89static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
84 unsigned long freq, bool available) 90 unsigned long freq, bool available)
85{ 91{
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a4ab9daa387c..b7b9501b41af 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -762,6 +762,18 @@ struct signal_struct {
762 unsigned audit_tty_log_passwd; 762 unsigned audit_tty_log_passwd;
763 struct tty_audit_buf *tty_audit_buf; 763 struct tty_audit_buf *tty_audit_buf;
764#endif 764#endif
765#ifdef CONFIG_CGROUPS
766 /*
767 * group_rwsem prevents new tasks from entering the threadgroup and
768 * member tasks from exiting,a more specifically, setting of
769 * PF_EXITING. fork and exit paths are protected with this rwsem
770 * using threadgroup_change_begin/end(). Users which require
771 * threadgroup to remain stable should use threadgroup_[un]lock()
772 * which also takes care of exec path. Currently, cgroup is the
773 * only user.
774 */
775 struct rw_semaphore group_rwsem;
776#endif
765 777
766 oom_flags_t oom_flags; 778 oom_flags_t oom_flags;
767 short oom_score_adj; /* OOM kill score adjustment */ 779 short oom_score_adj; /* OOM kill score adjustment */
diff --git a/include/linux/security.h b/include/linux/security.h
index 79d85ddf8093..2f4c1f7aa7db 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -946,7 +946,7 @@ static inline int security_task_prctl(int option, unsigned long arg2,
946 unsigned long arg4, 946 unsigned long arg4,
947 unsigned long arg5) 947 unsigned long arg5)
948{ 948{
949 return cap_task_prctl(option, arg2, arg3, arg3, arg5); 949 return cap_task_prctl(option, arg2, arg3, arg4, arg5);
950} 950}
951 951
952static inline void security_task_to_inode(struct task_struct *p, struct inode *inode) 952static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index adeadbd6d7bf..dde00defbaa5 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -114,13 +114,18 @@ int seq_open(struct file *, const struct seq_operations *);
114ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); 114ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
115loff_t seq_lseek(struct file *, loff_t, int); 115loff_t seq_lseek(struct file *, loff_t, int);
116int seq_release(struct inode *, struct file *); 116int seq_release(struct inode *, struct file *);
117int seq_escape(struct seq_file *, const char *, const char *);
118int seq_putc(struct seq_file *m, char c);
119int seq_puts(struct seq_file *m, const char *s);
120int seq_write(struct seq_file *seq, const void *data, size_t len); 117int seq_write(struct seq_file *seq, const void *data, size_t len);
121 118
122__printf(2, 3) int seq_printf(struct seq_file *, const char *, ...); 119__printf(2, 0)
123__printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args); 120void seq_vprintf(struct seq_file *m, const char *fmt, va_list args);
121__printf(2, 3)
122void seq_printf(struct seq_file *m, const char *fmt, ...);
123void seq_putc(struct seq_file *m, char c);
124void seq_puts(struct seq_file *m, const char *s);
125void seq_put_decimal_ull(struct seq_file *m, char delimiter,
126 unsigned long long num);
127void seq_put_decimal_ll(struct seq_file *m, char delimiter, long long num);
128void seq_escape(struct seq_file *m, const char *s, const char *esc);
124 129
125void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type, 130void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
126 int rowsize, int groupsize, const void *buf, size_t len, 131 int rowsize, int groupsize, const void *buf, size_t len,
@@ -138,10 +143,6 @@ int single_release(struct inode *, struct file *);
138void *__seq_open_private(struct file *, const struct seq_operations *, int); 143void *__seq_open_private(struct file *, const struct seq_operations *, int);
139int seq_open_private(struct file *, const struct seq_operations *, int); 144int seq_open_private(struct file *, const struct seq_operations *, int);
140int seq_release_private(struct inode *, struct file *); 145int seq_release_private(struct inode *, struct file *);
141int seq_put_decimal_ull(struct seq_file *m, char delimiter,
142 unsigned long long num);
143int seq_put_decimal_ll(struct seq_file *m, char delimiter,
144 long long num);
145 146
146static inline struct user_namespace *seq_user_ns(struct seq_file *seq) 147static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
147{ 148{
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2738d355cdf9..2b0a30a6e31c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -179,6 +179,9 @@ struct nf_bridge_info {
179 u8 bridged_dnat:1; 179 u8 bridged_dnat:1;
180 __u16 frag_max_size; 180 __u16 frag_max_size;
181 struct net_device *physindev; 181 struct net_device *physindev;
182
183 /* always valid & non-NULL from FORWARD on, for physdev match */
184 struct net_device *physoutdev;
182 union { 185 union {
183 /* prerouting: detect dnat in orig/reply direction */ 186 /* prerouting: detect dnat in orig/reply direction */
184 __be32 ipv4_daddr; 187 __be32 ipv4_daddr;
@@ -189,9 +192,6 @@ struct nf_bridge_info {
189 * skb is out in neigh layer. 192 * skb is out in neigh layer.
190 */ 193 */
191 char neigh_header[8]; 194 char neigh_header[8];
192
193 /* always valid & non-NULL from FORWARD on, for physdev match */
194 struct net_device *physoutdev;
195 }; 195 };
196}; 196};
197#endif 197#endif
@@ -2707,6 +2707,9 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
2707{ 2707{
2708 if (skb->ip_summed == CHECKSUM_COMPLETE) 2708 if (skb->ip_summed == CHECKSUM_COMPLETE)
2709 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 2709 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2710 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
2711 skb_checksum_start_offset(skb) <= len)
2712 skb->ip_summed = CHECKSUM_NONE;
2710} 2713}
2711 2714
2712unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 2715unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 269e8afd3e2a..6b00f18f5e6b 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -34,7 +34,7 @@ extern struct bus_type spi_bus_type;
34 34
35/** 35/**
36 * struct spi_statistics - statistics for spi transfers 36 * struct spi_statistics - statistics for spi transfers
37 * @clock: lock protecting this structure 37 * @lock: lock protecting this structure
38 * 38 *
39 * @messages: number of spi-messages handled 39 * @messages: number of spi-messages handled
40 * @transfers: number of spi_transfers handled 40 * @transfers: number of spi_transfers handled
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index 7591788e9fbf..357e44c1a46b 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -42,6 +42,7 @@ struct sock_xprt {
42 /* 42 /*
43 * Connection of transports 43 * Connection of transports
44 */ 44 */
45 unsigned long sock_state;
45 struct delayed_work connect_worker; 46 struct delayed_work connect_worker;
46 struct sockaddr_storage srcaddr; 47 struct sockaddr_storage srcaddr;
47 unsigned short srcport; 48 unsigned short srcport;
@@ -76,6 +77,8 @@ struct sock_xprt {
76 */ 77 */
77#define TCP_RPC_REPLY (1UL << 6) 78#define TCP_RPC_REPLY (1UL << 6)
78 79
80#define XPRT_SOCK_CONNECTING 1U
81
79#endif /* __KERNEL__ */ 82#endif /* __KERNEL__ */
80 83
81#endif /* _LINUX_SUNRPC_XPRTSOCK_H */ 84#endif /* _LINUX_SUNRPC_XPRTSOCK_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 08001317aee7..a460e2ef2843 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -885,4 +885,6 @@ asmlinkage long sys_execveat(int dfd, const char __user *filename,
885 const char __user *const __user *argv, 885 const char __user *const __user *argv,
886 const char __user *const __user *envp, int flags); 886 const char __user *const __user *envp, int flags);
887 887
888asmlinkage long sys_membarrier(int cmd, int flags);
889
888#endif 890#endif
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 037e9df2f610..157d366e761b 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -92,23 +92,19 @@ struct thermal_zone_device_ops {
92 struct thermal_cooling_device *); 92 struct thermal_cooling_device *);
93 int (*unbind) (struct thermal_zone_device *, 93 int (*unbind) (struct thermal_zone_device *,
94 struct thermal_cooling_device *); 94 struct thermal_cooling_device *);
95 int (*get_temp) (struct thermal_zone_device *, unsigned long *); 95 int (*get_temp) (struct thermal_zone_device *, int *);
96 int (*get_mode) (struct thermal_zone_device *, 96 int (*get_mode) (struct thermal_zone_device *,
97 enum thermal_device_mode *); 97 enum thermal_device_mode *);
98 int (*set_mode) (struct thermal_zone_device *, 98 int (*set_mode) (struct thermal_zone_device *,
99 enum thermal_device_mode); 99 enum thermal_device_mode);
100 int (*get_trip_type) (struct thermal_zone_device *, int, 100 int (*get_trip_type) (struct thermal_zone_device *, int,
101 enum thermal_trip_type *); 101 enum thermal_trip_type *);
102 int (*get_trip_temp) (struct thermal_zone_device *, int, 102 int (*get_trip_temp) (struct thermal_zone_device *, int, int *);
103 unsigned long *); 103 int (*set_trip_temp) (struct thermal_zone_device *, int, int);
104 int (*set_trip_temp) (struct thermal_zone_device *, int, 104 int (*get_trip_hyst) (struct thermal_zone_device *, int, int *);
105 unsigned long); 105 int (*set_trip_hyst) (struct thermal_zone_device *, int, int);
106 int (*get_trip_hyst) (struct thermal_zone_device *, int, 106 int (*get_crit_temp) (struct thermal_zone_device *, int *);
107 unsigned long *); 107 int (*set_emul_temp) (struct thermal_zone_device *, int);
108 int (*set_trip_hyst) (struct thermal_zone_device *, int,
109 unsigned long);
110 int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *);
111 int (*set_emul_temp) (struct thermal_zone_device *, unsigned long);
112 int (*get_trend) (struct thermal_zone_device *, int, 108 int (*get_trend) (struct thermal_zone_device *, int,
113 enum thermal_trend *); 109 enum thermal_trend *);
114 int (*notify) (struct thermal_zone_device *, int, 110 int (*notify) (struct thermal_zone_device *, int,
@@ -332,9 +328,9 @@ struct thermal_genl_event {
332 * temperature. 328 * temperature.
333 */ 329 */
334struct thermal_zone_of_device_ops { 330struct thermal_zone_of_device_ops {
335 int (*get_temp)(void *, long *); 331 int (*get_temp)(void *, int *);
336 int (*get_trend)(void *, long *); 332 int (*get_trend)(void *, long *);
337 int (*set_emul_temp)(void *, unsigned long); 333 int (*set_emul_temp)(void *, int);
338}; 334};
339 335
340/** 336/**
@@ -364,7 +360,7 @@ static inline struct thermal_zone_device *
364thermal_zone_of_sensor_register(struct device *dev, int id, void *data, 360thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
365 const struct thermal_zone_of_device_ops *ops) 361 const struct thermal_zone_of_device_ops *ops)
366{ 362{
367 return NULL; 363 return ERR_PTR(-ENODEV);
368} 364}
369 365
370static inline 366static inline
@@ -384,6 +380,8 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
384 380
385int power_actor_get_max_power(struct thermal_cooling_device *, 381int power_actor_get_max_power(struct thermal_cooling_device *,
386 struct thermal_zone_device *tz, u32 *max_power); 382 struct thermal_zone_device *tz, u32 *max_power);
383int power_actor_get_min_power(struct thermal_cooling_device *,
384 struct thermal_zone_device *tz, u32 *min_power);
387int power_actor_set_power(struct thermal_cooling_device *, 385int power_actor_set_power(struct thermal_cooling_device *,
388 struct thermal_instance *, u32); 386 struct thermal_instance *, u32);
389struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, 387struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
@@ -406,7 +404,7 @@ thermal_of_cooling_device_register(struct device_node *np, char *, void *,
406 const struct thermal_cooling_device_ops *); 404 const struct thermal_cooling_device_ops *);
407void thermal_cooling_device_unregister(struct thermal_cooling_device *); 405void thermal_cooling_device_unregister(struct thermal_cooling_device *);
408struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name); 406struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name);
409int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp); 407int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
410 408
411int get_tz_trend(struct thermal_zone_device *, int); 409int get_tz_trend(struct thermal_zone_device *, int);
412struct thermal_instance *get_thermal_instance(struct thermal_zone_device *, 410struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
@@ -419,6 +417,10 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
419static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev, 417static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev,
420 struct thermal_zone_device *tz, u32 *max_power) 418 struct thermal_zone_device *tz, u32 *max_power)
421{ return 0; } 419{ return 0; }
420static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev,
421 struct thermal_zone_device *tz,
422 u32 *min_power)
423{ return -ENODEV; }
422static inline int power_actor_set_power(struct thermal_cooling_device *cdev, 424static inline int power_actor_set_power(struct thermal_cooling_device *cdev,
423 struct thermal_instance *tz, u32 power) 425 struct thermal_instance *tz, u32 power)
424{ return 0; } 426{ return 0; }
@@ -457,7 +459,7 @@ static inline struct thermal_zone_device *thermal_zone_get_zone_by_name(
457 const char *name) 459 const char *name)
458{ return ERR_PTR(-ENODEV); } 460{ return ERR_PTR(-ENODEV); }
459static inline int thermal_zone_get_temp( 461static inline int thermal_zone_get_temp(
460 struct thermal_zone_device *tz, unsigned long *temp) 462 struct thermal_zone_device *tz, int *temp)
461{ return -ENODEV; } 463{ return -ENODEV; }
462static inline int get_tz_trend(struct thermal_zone_device *tz, int trip) 464static inline int get_tz_trend(struct thermal_zone_device *tz, int trip)
463{ return -ENODEV; } 465{ return -ENODEV; }
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 48d901f83f92..e312219ff823 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -147,11 +147,20 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
147 cpumask_or(mask, mask, tick_nohz_full_mask); 147 cpumask_or(mask, mask, tick_nohz_full_mask);
148} 148}
149 149
150static inline int housekeeping_any_cpu(void)
151{
152 return cpumask_any_and(housekeeping_mask, cpu_online_mask);
153}
154
150extern void tick_nohz_full_kick(void); 155extern void tick_nohz_full_kick(void);
151extern void tick_nohz_full_kick_cpu(int cpu); 156extern void tick_nohz_full_kick_cpu(int cpu);
152extern void tick_nohz_full_kick_all(void); 157extern void tick_nohz_full_kick_all(void);
153extern void __tick_nohz_task_switch(void); 158extern void __tick_nohz_task_switch(void);
154#else 159#else
160static inline int housekeeping_any_cpu(void)
161{
162 return smp_processor_id();
163}
155static inline bool tick_nohz_full_enabled(void) { return false; } 164static inline bool tick_nohz_full_enabled(void) { return false; }
156static inline bool tick_nohz_full_cpu(int cpu) { return false; } 165static inline bool tick_nohz_full_cpu(int cpu) { return false; }
157static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } 166static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
diff --git a/include/linux/wait.h b/include/linux/wait.h
index d3d077228d4c..1e1bf9f963a9 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -147,8 +147,7 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
147 147
148typedef int wait_bit_action_f(struct wait_bit_key *); 148typedef int wait_bit_action_f(struct wait_bit_key *);
149void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 149void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
150void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr, 150void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
151 void *key);
152void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 151void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
153void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); 152void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
154void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); 153void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
@@ -180,7 +179,7 @@ wait_queue_head_t *bit_waitqueue(void *, int);
180#define wake_up_poll(x, m) \ 179#define wake_up_poll(x, m) \
181 __wake_up(x, TASK_NORMAL, 1, (void *) (m)) 180 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
182#define wake_up_locked_poll(x, m) \ 181#define wake_up_locked_poll(x, m) \
183 __wake_up_locked_key((x), TASK_NORMAL, 1, (void *) (m)) 182 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
184#define wake_up_interruptible_poll(x, m) \ 183#define wake_up_interruptible_poll(x, m) \
185 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) 184 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
186#define wake_up_interruptible_sync_poll(x, m) \ 185#define wake_up_interruptible_sync_poll(x, m) \
diff --git a/include/media/videobuf2-memops.h b/include/media/videobuf2-memops.h
index 9f36641a6781..6513c7ec3116 100644
--- a/include/media/videobuf2-memops.h
+++ b/include/media/videobuf2-memops.h
@@ -15,6 +15,7 @@
15#define _MEDIA_VIDEOBUF2_MEMOPS_H 15#define _MEDIA_VIDEOBUF2_MEMOPS_H
16 16
17#include <media/videobuf2-core.h> 17#include <media/videobuf2-core.h>
18#include <linux/mm.h>
18 19
19/** 20/**
20 * struct vb2_vmarea_handler - common vma refcount tracking handler 21 * struct vb2_vmarea_handler - common vma refcount tracking handler
@@ -31,11 +32,9 @@ struct vb2_vmarea_handler {
31 32
32extern const struct vm_operations_struct vb2_common_vm_ops; 33extern const struct vm_operations_struct vb2_common_vm_ops;
33 34
34int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size, 35struct frame_vector *vb2_create_framevec(unsigned long start,
35 struct vm_area_struct **res_vma, dma_addr_t *res_pa); 36 unsigned long length,
36 37 bool write);
37struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma); 38void vb2_destroy_framevec(struct frame_vector *vec);
38void vb2_put_vma(struct vm_area_struct *vma);
39
40 39
41#endif 40#endif
diff --git a/include/net/flow.h b/include/net/flow.h
index acd6a096250e..9b85db85f13c 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -35,6 +35,7 @@ struct flowi_common {
35#define FLOWI_FLAG_ANYSRC 0x01 35#define FLOWI_FLAG_ANYSRC 0x01
36#define FLOWI_FLAG_KNOWN_NH 0x02 36#define FLOWI_FLAG_KNOWN_NH 0x02
37#define FLOWI_FLAG_VRFSRC 0x04 37#define FLOWI_FLAG_VRFSRC 0x04
38#define FLOWI_FLAG_SKIP_NH_OIF 0x08
38 __u32 flowic_secid; 39 __u32 flowic_secid;
39 struct flowi_tunnel flowic_tun_key; 40 struct flowi_tunnel flowic_tun_key;
40}; 41};
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 879d6e5a973b..186f3a1e1b1f 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -110,7 +110,19 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
110void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, 110void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
111 struct inet_hashinfo *hashinfo); 111 struct inet_hashinfo *hashinfo);
112 112
113void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo); 113void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
114 bool rearm);
115
116static void inline inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
117{
118 __inet_twsk_schedule(tw, timeo, false);
119}
120
121static void inline inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
122{
123 __inet_twsk_schedule(tw, timeo, true);
124}
125
114void inet_twsk_deschedule_put(struct inet_timewait_sock *tw); 126void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
115 127
116void inet_twsk_purge(struct inet_hashinfo *hashinfo, 128void inet_twsk_purge(struct inet_hashinfo *hashinfo,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 063d30474cf6..aaf9700fc9e5 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -275,7 +275,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
275 struct nl_info *info, struct mx6_config *mxc); 275 struct nl_info *info, struct mx6_config *mxc);
276int fib6_del(struct rt6_info *rt, struct nl_info *info); 276int fib6_del(struct rt6_info *rt, struct nl_info *info);
277 277
278void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info); 278void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
279 unsigned int flags);
279 280
280void fib6_run_gc(unsigned long expires, struct net *net, bool force); 281void fib6_run_gc(unsigned long expires, struct net *net, bool force);
281 282
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index b8529aa1dae7..fa915fa0f703 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -32,6 +32,12 @@ struct __ip6_tnl_parm {
32 __be32 o_key; 32 __be32 o_key;
33}; 33};
34 34
35struct ip6_tnl_dst {
36 seqlock_t lock;
37 struct dst_entry __rcu *dst;
38 u32 cookie;
39};
40
35/* IPv6 tunnel */ 41/* IPv6 tunnel */
36struct ip6_tnl { 42struct ip6_tnl {
37 struct ip6_tnl __rcu *next; /* next tunnel in list */ 43 struct ip6_tnl __rcu *next; /* next tunnel in list */
@@ -39,8 +45,7 @@ struct ip6_tnl {
39 struct net *net; /* netns for packet i/o */ 45 struct net *net; /* netns for packet i/o */
40 struct __ip6_tnl_parm parms; /* tunnel configuration parameters */ 46 struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
41 struct flowi fl; /* flowi template for xmit */ 47 struct flowi fl; /* flowi template for xmit */
42 struct dst_entry *dst_cache; /* cached dst */ 48 struct ip6_tnl_dst __percpu *dst_cache; /* cached dst */
43 u32 dst_cookie;
44 49
45 int err_count; 50 int err_count;
46 unsigned long err_time; 51 unsigned long err_time;
@@ -60,9 +65,11 @@ struct ipv6_tlv_tnl_enc_lim {
60 __u8 encap_limit; /* tunnel encapsulation limit */ 65 __u8 encap_limit; /* tunnel encapsulation limit */
61} __packed; 66} __packed;
62 67
63struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t); 68struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t);
69int ip6_tnl_dst_init(struct ip6_tnl *t);
70void ip6_tnl_dst_destroy(struct ip6_tnl *t);
64void ip6_tnl_dst_reset(struct ip6_tnl *t); 71void ip6_tnl_dst_reset(struct ip6_tnl *t);
65void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst); 72void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst);
66int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, 73int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
67 const struct in6_addr *raddr); 74 const struct in6_addr *raddr);
68int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, 75int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
@@ -79,7 +86,7 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
79 struct net_device_stats *stats = &dev->stats; 86 struct net_device_stats *stats = &dev->stats;
80 int pkt_len, err; 87 int pkt_len, err;
81 88
82 pkt_len = skb->len; 89 pkt_len = skb->len - skb_inner_network_offset(skb);
83 err = ip6_local_out_sk(sk, skb); 90 err = ip6_local_out_sk(sk, skb);
84 91
85 if (net_xmit_eval(err) == 0) { 92 if (net_xmit_eval(err) == 0) {
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index a37d0432bebd..727d6e9a9685 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -236,8 +236,11 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
236 rcu_read_lock(); 236 rcu_read_lock();
237 237
238 tb = fib_get_table(net, RT_TABLE_MAIN); 238 tb = fib_get_table(net, RT_TABLE_MAIN);
239 if (tb && !fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF)) 239 if (tb)
240 err = 0; 240 err = fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF);
241
242 if (err == -EAGAIN)
243 err = -ENETUNREACH;
241 244
242 rcu_read_unlock(); 245 rcu_read_unlock();
243 246
@@ -258,7 +261,7 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp,
258 struct fib_result *res, unsigned int flags) 261 struct fib_result *res, unsigned int flags)
259{ 262{
260 struct fib_table *tb; 263 struct fib_table *tb;
261 int err; 264 int err = -ENETUNREACH;
262 265
263 flags |= FIB_LOOKUP_NOREF; 266 flags |= FIB_LOOKUP_NOREF;
264 if (net->ipv4.fib_has_custom_rules) 267 if (net->ipv4.fib_has_custom_rules)
@@ -268,15 +271,20 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp,
268 271
269 res->tclassid = 0; 272 res->tclassid = 0;
270 273
271 for (err = 0; !err; err = -ENETUNREACH) { 274 tb = rcu_dereference_rtnl(net->ipv4.fib_main);
272 tb = rcu_dereference_rtnl(net->ipv4.fib_main); 275 if (tb)
273 if (tb && !fib_table_lookup(tb, flp, res, flags)) 276 err = fib_table_lookup(tb, flp, res, flags);
274 break; 277
278 if (!err)
279 goto out;
280
281 tb = rcu_dereference_rtnl(net->ipv4.fib_default);
282 if (tb)
283 err = fib_table_lookup(tb, flp, res, flags);
275 284
276 tb = rcu_dereference_rtnl(net->ipv4.fib_default); 285out:
277 if (tb && !fib_table_lookup(tb, flp, res, flags)) 286 if (err == -EAGAIN)
278 break; 287 err = -ENETUNREACH;
279 }
280 288
281 rcu_read_unlock(); 289 rcu_read_unlock();
282 290
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 9a6a3ba888e8..f6dafec9102c 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -276,6 +276,8 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
276int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, 276int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
277 __be32 src, __be32 dst, u8 proto, 277 __be32 src, __be32 dst, u8 proto,
278 u8 tos, u8 ttl, __be16 df, bool xnet); 278 u8 tos, u8 ttl, __be16 df, bool xnet);
279struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
280 gfp_t flags);
279 281
280struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum, 282struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
281 int gso_type_mask); 283 int gso_type_mask);
diff --git a/include/net/route.h b/include/net/route.h
index 414beadc619f..d1bd90bb3187 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -257,7 +257,7 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32
257 flow_flags |= FLOWI_FLAG_ANYSRC; 257 flow_flags |= FLOWI_FLAG_ANYSRC;
258 258
259 if (netif_index_is_vrf(sock_net(sk), oif)) 259 if (netif_index_is_vrf(sock_net(sk), oif))
260 flow_flags |= FLOWI_FLAG_VRFSRC; 260 flow_flags |= FLOWI_FLAG_VRFSRC | FLOWI_FLAG_SKIP_NH_OIF;
261 261
262 flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, 262 flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
263 protocol, flow_flags, dst, src, dport, sport); 263 protocol, flow_flags, dst, src, dport, sport);
diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h
index 391dae1931c0..a0fa975cd1c1 100644
--- a/include/rdma/opa_port_info.h
+++ b/include/rdma/opa_port_info.h
@@ -294,8 +294,8 @@ struct opa_port_states {
294 294
295struct opa_port_state_info { 295struct opa_port_state_info {
296 struct opa_port_states port_states; 296 struct opa_port_states port_states;
297 u16 link_width_downgrade_tx_active; 297 __be16 link_width_downgrade_tx_active;
298 u16 link_width_downgrade_rx_active; 298 __be16 link_width_downgrade_rx_active;
299}; 299};
300 300
301struct opa_port_info { 301struct opa_port_info {
diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
index 676b03b78e57..11571b2a831e 100644
--- a/include/scsi/scsi_common.h
+++ b/include/scsi/scsi_common.h
@@ -61,4 +61,9 @@ static inline bool scsi_sense_valid(const struct scsi_sense_hdr *sshdr)
61extern bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len, 61extern bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
62 struct scsi_sense_hdr *sshdr); 62 struct scsi_sense_hdr *sshdr);
63 63
64extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
65int scsi_set_sense_information(u8 *buf, int buf_len, u64 info);
66extern const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
67 int desc_type);
68
64#endif /* _SCSI_COMMON_H_ */ 69#endif /* _SCSI_COMMON_H_ */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 50c2a363bc8f..fe89d7cd67b9 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -196,34 +196,13 @@ struct scsi_device {
196 struct execute_work ew; /* used to get process context on put */ 196 struct execute_work ew; /* used to get process context on put */
197 struct work_struct requeue_work; 197 struct work_struct requeue_work;
198 198
199 struct scsi_dh_data *scsi_dh_data; 199 struct scsi_device_handler *handler;
200 void *handler_data;
201
200 enum scsi_device_state sdev_state; 202 enum scsi_device_state sdev_state;
201 unsigned long sdev_data[0]; 203 unsigned long sdev_data[0];
202} __attribute__((aligned(sizeof(unsigned long)))); 204} __attribute__((aligned(sizeof(unsigned long))));
203 205
204typedef void (*activate_complete)(void *, int);
205struct scsi_device_handler {
206 /* Used by the infrastructure */
207 struct list_head list; /* list of scsi_device_handlers */
208
209 /* Filled by the hardware handler */
210 struct module *module;
211 const char *name;
212 int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *);
213 struct scsi_dh_data *(*attach)(struct scsi_device *);
214 void (*detach)(struct scsi_device *);
215 int (*activate)(struct scsi_device *, activate_complete, void *);
216 int (*prep_fn)(struct scsi_device *, struct request *);
217 int (*set_params)(struct scsi_device *, const char *);
218 bool (*match)(struct scsi_device *);
219};
220
221struct scsi_dh_data {
222 struct scsi_device_handler *scsi_dh;
223 struct scsi_device *sdev;
224 struct kref kref;
225};
226
227#define to_scsi_device(d) \ 206#define to_scsi_device(d) \
228 container_of(d, struct scsi_device, sdev_gendev) 207 container_of(d, struct scsi_device, sdev_gendev)
229#define class_to_sdev(d) \ 208#define class_to_sdev(d) \
diff --git a/include/scsi/scsi_dh.h b/include/scsi/scsi_dh.h
index 620c723ee8ed..85d731746834 100644
--- a/include/scsi/scsi_dh.h
+++ b/include/scsi/scsi_dh.h
@@ -55,11 +55,26 @@ enum {
55 SCSI_DH_NOSYS, 55 SCSI_DH_NOSYS,
56 SCSI_DH_DRIVER_MAX, 56 SCSI_DH_DRIVER_MAX,
57}; 57};
58#if defined(CONFIG_SCSI_DH) || defined(CONFIG_SCSI_DH_MODULE) 58
59typedef void (*activate_complete)(void *, int);
60struct scsi_device_handler {
61 /* Used by the infrastructure */
62 struct list_head list; /* list of scsi_device_handlers */
63
64 /* Filled by the hardware handler */
65 struct module *module;
66 const char *name;
67 int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *);
68 int (*attach)(struct scsi_device *);
69 void (*detach)(struct scsi_device *);
70 int (*activate)(struct scsi_device *, activate_complete, void *);
71 int (*prep_fn)(struct scsi_device *, struct request *);
72 int (*set_params)(struct scsi_device *, const char *);
73};
74
75#ifdef CONFIG_SCSI_DH
59extern int scsi_dh_activate(struct request_queue *, activate_complete, void *); 76extern int scsi_dh_activate(struct request_queue *, activate_complete, void *);
60extern int scsi_dh_handler_exist(const char *);
61extern int scsi_dh_attach(struct request_queue *, const char *); 77extern int scsi_dh_attach(struct request_queue *, const char *);
62extern void scsi_dh_detach(struct request_queue *);
63extern const char *scsi_dh_attached_handler_name(struct request_queue *, gfp_t); 78extern const char *scsi_dh_attached_handler_name(struct request_queue *, gfp_t);
64extern int scsi_dh_set_params(struct request_queue *, const char *); 79extern int scsi_dh_set_params(struct request_queue *, const char *);
65#else 80#else
@@ -69,18 +84,10 @@ static inline int scsi_dh_activate(struct request_queue *req,
69 fn(data, 0); 84 fn(data, 0);
70 return 0; 85 return 0;
71} 86}
72static inline int scsi_dh_handler_exist(const char *name)
73{
74 return 0;
75}
76static inline int scsi_dh_attach(struct request_queue *req, const char *name) 87static inline int scsi_dh_attach(struct request_queue *req, const char *name)
77{ 88{
78 return SCSI_DH_NOSYS; 89 return SCSI_DH_NOSYS;
79} 90}
80static inline void scsi_dh_detach(struct request_queue *q)
81{
82 return;
83}
84static inline const char *scsi_dh_attached_handler_name(struct request_queue *q, 91static inline const char *scsi_dh_attached_handler_name(struct request_queue *q,
85 gfp_t gfp) 92 gfp_t gfp)
86{ 93{
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 8d1d7fa67ec4..dbb8c640e26f 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -4,6 +4,7 @@
4#include <linux/scatterlist.h> 4#include <linux/scatterlist.h>
5 5
6#include <scsi/scsi_cmnd.h> 6#include <scsi/scsi_cmnd.h>
7#include <scsi/scsi_common.h>
7struct scsi_device; 8struct scsi_device;
8struct Scsi_Host; 9struct Scsi_Host;
9 10
@@ -21,14 +22,9 @@ static inline bool scsi_sense_is_deferred(const struct scsi_sense_hdr *sshdr)
21 return ((sshdr->response_code >= 0x70) && (sshdr->response_code & 1)); 22 return ((sshdr->response_code >= 0x70) && (sshdr->response_code & 1));
22} 23}
23 24
24extern const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
25 int desc_type);
26
27extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len, 25extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
28 u64 * info_out); 26 u64 * info_out);
29 27
30extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
31
32extern int scsi_ioctl_reset(struct scsi_device *, int __user *); 28extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
33 29
34struct scsi_eh_save { 30struct scsi_eh_save {
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 0aedbb2c10e0..373d3342002b 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -62,6 +62,8 @@
62/* T10 protection information disabled by default */ 62/* T10 protection information disabled by default */
63#define TA_DEFAULT_T10_PI 0 63#define TA_DEFAULT_T10_PI 0
64#define TA_DEFAULT_FABRIC_PROT_TYPE 0 64#define TA_DEFAULT_FABRIC_PROT_TYPE 0
65/* TPG status needs to be enabled to return sendtargets discovery endpoint info */
66#define TA_DEFAULT_TPG_ENABLED_SENDTARGETS 1
65 67
66#define ISCSI_IOV_DATA_BUFFER 5 68#define ISCSI_IOV_DATA_BUFFER 5
67 69
@@ -517,7 +519,6 @@ struct iscsi_conn {
517 u16 cid; 519 u16 cid;
518 /* Remote TCP Port */ 520 /* Remote TCP Port */
519 u16 login_port; 521 u16 login_port;
520 u16 local_port;
521 int net_size; 522 int net_size;
522 int login_family; 523 int login_family;
523 u32 auth_id; 524 u32 auth_id;
@@ -527,9 +528,8 @@ struct iscsi_conn {
527 u32 exp_statsn; 528 u32 exp_statsn;
528 /* Per connection status sequence number */ 529 /* Per connection status sequence number */
529 u32 stat_sn; 530 u32 stat_sn;
530#define IPV6_ADDRESS_SPACE 48 531 struct sockaddr_storage login_sockaddr;
531 unsigned char login_ip[IPV6_ADDRESS_SPACE]; 532 struct sockaddr_storage local_sockaddr;
532 unsigned char local_ip[IPV6_ADDRESS_SPACE];
533 int conn_usage_count; 533 int conn_usage_count;
534 int conn_waiting_on_uc; 534 int conn_waiting_on_uc;
535 atomic_t check_immediate_queue; 535 atomic_t check_immediate_queue;
@@ -636,7 +636,7 @@ struct iscsi_session {
636 /* session wide counter: expected command sequence number */ 636 /* session wide counter: expected command sequence number */
637 u32 exp_cmd_sn; 637 u32 exp_cmd_sn;
638 /* session wide counter: maximum allowed command sequence number */ 638 /* session wide counter: maximum allowed command sequence number */
639 u32 max_cmd_sn; 639 atomic_t max_cmd_sn;
640 struct list_head sess_ooo_cmdsn_list; 640 struct list_head sess_ooo_cmdsn_list;
641 641
642 /* LIO specific session ID */ 642 /* LIO specific session ID */
@@ -764,6 +764,7 @@ struct iscsi_tpg_attrib {
764 u32 default_erl; 764 u32 default_erl;
765 u8 t10_pi; 765 u8 t10_pi;
766 u32 fabric_prot_type; 766 u32 fabric_prot_type;
767 u32 tpg_enabled_sendtargets;
767 struct iscsi_portal_group *tpg; 768 struct iscsi_portal_group *tpg;
768}; 769};
769 770
@@ -776,12 +777,10 @@ struct iscsi_np {
776 enum iscsi_timer_flags_table np_login_timer_flags; 777 enum iscsi_timer_flags_table np_login_timer_flags;
777 u32 np_exports; 778 u32 np_exports;
778 enum np_flags_table np_flags; 779 enum np_flags_table np_flags;
779 unsigned char np_ip[IPV6_ADDRESS_SPACE];
780 u16 np_port;
781 spinlock_t np_thread_lock; 780 spinlock_t np_thread_lock;
782 struct completion np_restart_comp; 781 struct completion np_restart_comp;
783 struct socket *np_socket; 782 struct socket *np_socket;
784 struct __kernel_sockaddr_storage np_sockaddr; 783 struct sockaddr_storage np_sockaddr;
785 struct task_struct *np_thread; 784 struct task_struct *np_thread;
786 struct timer_list np_login_timer; 785 struct timer_list np_login_timer;
787 void *np_context; 786 void *np_context;
diff --git a/include/target/iscsi/iscsi_target_stat.h b/include/target/iscsi/iscsi_target_stat.h
index 3ff76b4faad3..e615bb485d0b 100644
--- a/include/target/iscsi/iscsi_target_stat.h
+++ b/include/target/iscsi/iscsi_target_stat.h
@@ -50,7 +50,7 @@ struct iscsi_login_stats {
50 u64 last_fail_time; /* time stamp (jiffies) */ 50 u64 last_fail_time; /* time stamp (jiffies) */
51 u32 last_fail_type; 51 u32 last_fail_type;
52 int last_intr_fail_ip_family; 52 int last_intr_fail_ip_family;
53 unsigned char last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE]; 53 struct sockaddr_storage last_intr_fail_sockaddr;
54 char last_intr_fail_name[224]; 54 char last_intr_fail_name[224];
55} ____cacheline_aligned; 55} ____cacheline_aligned;
56 56
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index e6bb166f12c2..90e37faa2ede 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -9,7 +9,7 @@ struct iscsit_transport {
9 int priv_size; 9 int priv_size;
10 struct module *owner; 10 struct module *owner;
11 struct list_head t_node; 11 struct list_head t_node;
12 int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *); 12 int (*iscsit_setup_np)(struct iscsi_np *, struct sockaddr_storage *);
13 int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *); 13 int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
14 void (*iscsit_free_np)(struct iscsi_np *); 14 void (*iscsit_free_np)(struct iscsi_np *);
15 void (*iscsit_wait_conn)(struct iscsi_conn *); 15 void (*iscsit_wait_conn)(struct iscsi_conn *);
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 1e5c8f949bae..56cf8e485ef2 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -93,4 +93,6 @@ bool target_lun_is_rdonly(struct se_cmd *);
93sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, 93sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
94 sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); 94 sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
95 95
96bool target_sense_desc_format(struct se_device *dev);
97
96#endif /* TARGET_CORE_BACKEND_H */ 98#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 17ae2d6a4891..ac9bf1c0e42d 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -6,6 +6,7 @@
6#include <linux/dma-mapping.h> 6#include <linux/dma-mapping.h>
7#include <linux/blkdev.h> 7#include <linux/blkdev.h>
8#include <linux/percpu_ida.h> 8#include <linux/percpu_ida.h>
9#include <linux/t10-pi.h>
9#include <net/sock.h> 10#include <net/sock.h>
10#include <net/tcp.h> 11#include <net/tcp.h>
11 12
@@ -426,12 +427,6 @@ enum target_core_dif_check {
426 TARGET_DIF_CHECK_REFTAG = 0x1 << 2, 427 TARGET_DIF_CHECK_REFTAG = 0x1 << 2,
427}; 428};
428 429
429struct se_dif_v1_tuple {
430 __be16 guard_tag;
431 __be16 app_tag;
432 __be32 ref_tag;
433};
434
435/* for sam_task_attr */ 430/* for sam_task_attr */
436#define TCM_SIMPLE_TAG 0x20 431#define TCM_SIMPLE_TAG 0x20
437#define TCM_HEAD_TAG 0x21 432#define TCM_HEAD_TAG 0x21
@@ -444,6 +439,9 @@ struct se_cmd {
444 u8 scsi_asc; 439 u8 scsi_asc;
445 u8 scsi_ascq; 440 u8 scsi_ascq;
446 u16 scsi_sense_length; 441 u16 scsi_sense_length;
442 unsigned cmd_wait_set:1;
443 unsigned unknown_data_length:1;
444 bool state_active:1;
447 u64 tag; /* SAM command identifier aka task tag */ 445 u64 tag; /* SAM command identifier aka task tag */
448 /* Delay for ALUA Active/NonOptimized state access in milliseconds */ 446 /* Delay for ALUA Active/NonOptimized state access in milliseconds */
449 int alua_nonop_delay; 447 int alua_nonop_delay;
@@ -455,11 +453,8 @@ struct se_cmd {
455 unsigned int map_tag; 453 unsigned int map_tag;
456 /* Transport protocol dependent state, see transport_state_table */ 454 /* Transport protocol dependent state, see transport_state_table */
457 enum transport_state_table t_state; 455 enum transport_state_table t_state;
458 unsigned cmd_wait_set:1;
459 unsigned unknown_data_length:1;
460 /* See se_cmd_flags_table */ 456 /* See se_cmd_flags_table */
461 u32 se_cmd_flags; 457 u32 se_cmd_flags;
462 u32 se_ordered_id;
463 /* Total size in bytes associated with command */ 458 /* Total size in bytes associated with command */
464 u32 data_length; 459 u32 data_length;
465 u32 residual_count; 460 u32 residual_count;
@@ -477,7 +472,6 @@ struct se_cmd {
477 struct se_tmr_req *se_tmr_req; 472 struct se_tmr_req *se_tmr_req;
478 struct list_head se_cmd_list; 473 struct list_head se_cmd_list;
479 struct completion cmd_wait_comp; 474 struct completion cmd_wait_comp;
480 struct kref cmd_kref;
481 const struct target_core_fabric_ops *se_tfo; 475 const struct target_core_fabric_ops *se_tfo;
482 sense_reason_t (*execute_cmd)(struct se_cmd *); 476 sense_reason_t (*execute_cmd)(struct se_cmd *);
483 sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool); 477 sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
@@ -497,6 +491,7 @@ struct se_cmd {
497#define CMD_T_REQUEST_STOP (1 << 8) 491#define CMD_T_REQUEST_STOP (1 << 8)
498#define CMD_T_BUSY (1 << 9) 492#define CMD_T_BUSY (1 << 9)
499 spinlock_t t_state_lock; 493 spinlock_t t_state_lock;
494 struct kref cmd_kref;
500 struct completion t_transport_stop_comp; 495 struct completion t_transport_stop_comp;
501 496
502 struct work_struct work; 497 struct work_struct work;
@@ -509,8 +504,10 @@ struct se_cmd {
509 struct scatterlist *t_bidi_data_sg; 504 struct scatterlist *t_bidi_data_sg;
510 unsigned int t_bidi_data_nents; 505 unsigned int t_bidi_data_nents;
511 506
507 /* Used for lun->lun_ref counting */
508 int lun_ref_active;
509
512 struct list_head state_list; 510 struct list_head state_list;
513 bool state_active;
514 511
515 /* old task stop completion, consider merging with some of the above */ 512 /* old task stop completion, consider merging with some of the above */
516 struct completion task_stop_comp; 513 struct completion task_stop_comp;
@@ -518,20 +515,17 @@ struct se_cmd {
518 /* backend private data */ 515 /* backend private data */
519 void *priv; 516 void *priv;
520 517
521 /* Used for lun->lun_ref counting */
522 int lun_ref_active;
523
524 /* DIF related members */ 518 /* DIF related members */
525 enum target_prot_op prot_op; 519 enum target_prot_op prot_op;
526 enum target_prot_type prot_type; 520 enum target_prot_type prot_type;
527 u8 prot_checks; 521 u8 prot_checks;
522 bool prot_pto;
528 u32 prot_length; 523 u32 prot_length;
529 u32 reftag_seed; 524 u32 reftag_seed;
530 struct scatterlist *t_prot_sg; 525 struct scatterlist *t_prot_sg;
531 unsigned int t_prot_nents; 526 unsigned int t_prot_nents;
532 sense_reason_t pi_err; 527 sense_reason_t pi_err;
533 sector_t bad_sector; 528 sector_t bad_sector;
534 bool prot_pto;
535}; 529};
536 530
537struct se_ua { 531struct se_ua {
@@ -598,7 +592,6 @@ struct se_ml_stat_grps {
598}; 592};
599 593
600struct se_lun_acl { 594struct se_lun_acl {
601 char initiatorname[TRANSPORT_IQN_LEN];
602 u64 mapped_lun; 595 u64 mapped_lun;
603 struct se_node_acl *se_lun_nacl; 596 struct se_node_acl *se_lun_nacl;
604 struct se_lun *se_lun; 597 struct se_lun *se_lun;
@@ -685,7 +678,6 @@ struct se_lun {
685#define SE_LUN_LINK_MAGIC 0xffff7771 678#define SE_LUN_LINK_MAGIC 0xffff7771
686 u32 lun_link_magic; 679 u32 lun_link_magic;
687 u32 lun_access; 680 u32 lun_access;
688 u32 lun_flags;
689 u32 lun_index; 681 u32 lun_index;
690 682
691 /* RELATIVE TARGET PORT IDENTIFER */ 683 /* RELATIVE TARGET PORT IDENTIFER */
@@ -751,7 +743,6 @@ struct se_device {
751 atomic_long_t write_bytes; 743 atomic_long_t write_bytes;
752 /* Active commands on this virtual SE device */ 744 /* Active commands on this virtual SE device */
753 atomic_t simple_cmds; 745 atomic_t simple_cmds;
754 atomic_t dev_ordered_id;
755 atomic_t dev_ordered_sync; 746 atomic_t dev_ordered_sync;
756 atomic_t dev_qf_count; 747 atomic_t dev_qf_count;
757 u32 export_count; 748 u32 export_count;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 18afef91b447..7fb2557a760e 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -5,6 +5,19 @@ struct target_core_fabric_ops {
5 struct module *module; 5 struct module *module;
6 const char *name; 6 const char *name;
7 size_t node_acl_size; 7 size_t node_acl_size;
8 /*
9 * Limits number of scatterlist entries per SCF_SCSI_DATA_CDB payload.
10 * Setting this value tells target-core to enforce this limit, and
11 * report as INQUIRY EVPD=b0 MAXIMUM TRANSFER LENGTH.
12 *
13 * target-core will currently reset se_cmd->data_length to this
14 * maximum size, and set UNDERFLOW residual count if length exceeds
15 * this limit.
16 *
17 * XXX: Not all initiator hosts honor this block-limit EVPD
18 * XXX: Currently assumes single PAGE_SIZE per scatterlist entry
19 */
20 u32 max_data_sg_nents;
8 char *(*get_fabric_name)(void); 21 char *(*get_fabric_name)(void);
9 char *(*tpg_get_wwn)(struct se_portal_group *); 22 char *(*tpg_get_wwn)(struct se_portal_group *);
10 u16 (*tpg_get_tag)(struct se_portal_group *); 23 u16 (*tpg_get_tag)(struct se_portal_group *);
@@ -152,6 +165,7 @@ int transport_generic_handle_tmr(struct se_cmd *);
152void transport_generic_request_failure(struct se_cmd *, sense_reason_t); 165void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
153void __target_execute_cmd(struct se_cmd *); 166void __target_execute_cmd(struct se_cmd *);
154int transport_lookup_tmr_lun(struct se_cmd *, u64); 167int transport_lookup_tmr_lun(struct se_cmd *, u64);
168void core_allocate_nexus_loss_ua(struct se_node_acl *acl);
155 169
156struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, 170struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
157 unsigned char *); 171 unsigned char *);
diff --git a/include/trace/events/thermal_power_allocator.h b/include/trace/events/thermal_power_allocator.h
index 12e1321c4e0c..5afae8fe3795 100644
--- a/include/trace/events/thermal_power_allocator.h
+++ b/include/trace/events/thermal_power_allocator.h
@@ -11,7 +11,7 @@ TRACE_EVENT(thermal_power_allocator,
11 u32 total_req_power, u32 *granted_power, 11 u32 total_req_power, u32 *granted_power,
12 u32 total_granted_power, size_t num_actors, 12 u32 total_granted_power, size_t num_actors,
13 u32 power_range, u32 max_allocatable_power, 13 u32 power_range, u32 max_allocatable_power,
14 unsigned long current_temp, s32 delta_temp), 14 int current_temp, s32 delta_temp),
15 TP_ARGS(tz, req_power, total_req_power, granted_power, 15 TP_ARGS(tz, req_power, total_req_power, granted_power,
16 total_granted_power, num_actors, power_range, 16 total_granted_power, num_actors, power_range,
17 max_allocatable_power, current_temp, delta_temp), 17 max_allocatable_power, current_temp, delta_temp),
@@ -24,7 +24,7 @@ TRACE_EVENT(thermal_power_allocator,
24 __field(size_t, num_actors ) 24 __field(size_t, num_actors )
25 __field(u32, power_range ) 25 __field(u32, power_range )
26 __field(u32, max_allocatable_power ) 26 __field(u32, max_allocatable_power )
27 __field(unsigned long, current_temp ) 27 __field(int, current_temp )
28 __field(s32, delta_temp ) 28 __field(s32, delta_temp )
29 ), 29 ),
30 TP_fast_assign( 30 TP_fast_assign(
@@ -42,7 +42,7 @@ TRACE_EVENT(thermal_power_allocator,
42 __entry->delta_temp = delta_temp; 42 __entry->delta_temp = delta_temp;
43 ), 43 ),
44 44
45 TP_printk("thermal_zone_id=%d req_power={%s} total_req_power=%u granted_power={%s} total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%lu delta_temperature=%d", 45 TP_printk("thermal_zone_id=%d req_power={%s} total_req_power=%u granted_power={%s} total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%d delta_temperature=%d",
46 __entry->tz_id, 46 __entry->tz_id,
47 __print_array(__get_dynamic_array(req_power), 47 __print_array(__get_dynamic_array(req_power),
48 __entry->num_actors, 4), 48 __entry->num_actors, 4),
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index e016bd9b1a04..ee124009e12a 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -709,15 +709,19 @@ __SYSCALL(__NR_memfd_create, sys_memfd_create)
709__SYSCALL(__NR_bpf, sys_bpf) 709__SYSCALL(__NR_bpf, sys_bpf)
710#define __NR_execveat 281 710#define __NR_execveat 281
711__SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat) 711__SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat)
712#define __NR_userfaultfd 282
713__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
714#define __NR_membarrier 283
715__SYSCALL(__NR_membarrier, sys_membarrier)
712 716
713#undef __NR_syscalls 717#undef __NR_syscalls
714#define __NR_syscalls 282 718#define __NR_syscalls 284
715 719
716/* 720/*
717 * All syscalls below here should go away really, 721 * All syscalls below here should go away really,
718 * these are provided for both review and as a porting 722 * these are provided for both review and as a porting
719 * help for the C library version. 723 * help for the C library version.
720* 724 *
721 * Last chance: are any of these important enough to 725 * Last chance: are any of these important enough to
722 * enable by default? 726 * enable by default?
723 */ 727 */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 70ff1d9abf0d..f7b2db44eb4b 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -252,6 +252,7 @@ header-y += mdio.h
252header-y += media.h 252header-y += media.h
253header-y += media-bus-format.h 253header-y += media-bus-format.h
254header-y += mei.h 254header-y += mei.h
255header-y += membarrier.h
255header-y += memfd.h 256header-y += memfd.h
256header-y += mempolicy.h 257header-y += mempolicy.h
257header-y += meye.h 258header-y += meye.h
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
index 34141a5dfe74..f8b01887a495 100644
--- a/include/uapi/linux/lwtunnel.h
+++ b/include/uapi/linux/lwtunnel.h
@@ -21,8 +21,6 @@ enum lwtunnel_ip_t {
21 LWTUNNEL_IP_SRC, 21 LWTUNNEL_IP_SRC,
22 LWTUNNEL_IP_TTL, 22 LWTUNNEL_IP_TTL,
23 LWTUNNEL_IP_TOS, 23 LWTUNNEL_IP_TOS,
24 LWTUNNEL_IP_SPORT,
25 LWTUNNEL_IP_DPORT,
26 LWTUNNEL_IP_FLAGS, 24 LWTUNNEL_IP_FLAGS,
27 __LWTUNNEL_IP_MAX, 25 __LWTUNNEL_IP_MAX,
28}; 26};
@@ -36,8 +34,6 @@ enum lwtunnel_ip6_t {
36 LWTUNNEL_IP6_SRC, 34 LWTUNNEL_IP6_SRC,
37 LWTUNNEL_IP6_HOPLIMIT, 35 LWTUNNEL_IP6_HOPLIMIT,
38 LWTUNNEL_IP6_TC, 36 LWTUNNEL_IP6_TC,
39 LWTUNNEL_IP6_SPORT,
40 LWTUNNEL_IP6_DPORT,
41 LWTUNNEL_IP6_FLAGS, 37 LWTUNNEL_IP6_FLAGS,
42 __LWTUNNEL_IP6_MAX, 38 __LWTUNNEL_IP6_MAX,
43}; 39};
diff --git a/include/uapi/linux/membarrier.h b/include/uapi/linux/membarrier.h
new file mode 100644
index 000000000000..e0b108bd2624
--- /dev/null
+++ b/include/uapi/linux/membarrier.h
@@ -0,0 +1,53 @@
1#ifndef _UAPI_LINUX_MEMBARRIER_H
2#define _UAPI_LINUX_MEMBARRIER_H
3
4/*
5 * linux/membarrier.h
6 *
7 * membarrier system call API
8 *
9 * Copyright (c) 2010, 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 * SOFTWARE.
28 */
29
30/**
31 * enum membarrier_cmd - membarrier system call command
32 * @MEMBARRIER_CMD_QUERY: Query the set of supported commands. It returns
33 * a bitmask of valid commands.
34 * @MEMBARRIER_CMD_SHARED: Execute a memory barrier on all running threads.
35 * Upon return from system call, the caller thread
36 * is ensured that all running threads have passed
37 * through a state where all memory accesses to
38 * user-space addresses match program order between
39 * entry to and return from the system call
40 * (non-running threads are de facto in such a
41 * state). This covers threads from all processes
42 * running on the system. This command returns 0.
43 *
44 * Command to be passed to the membarrier system call. The commands need to
45 * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to
46 * the value 0.
47 */
48enum membarrier_cmd {
49 MEMBARRIER_CMD_QUERY = 0,
50 MEMBARRIER_CMD_SHARED = (1 << 0),
51};
52
53#endif /* _UAPI_LINUX_MEMBARRIER_H */
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index b67f99d3c520..95c6521d8a95 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -42,10 +42,6 @@
42#define TCMU_MAILBOX_VERSION 2 42#define TCMU_MAILBOX_VERSION 2
43#define ALIGN_SIZE 64 /* Should be enough for most CPUs */ 43#define ALIGN_SIZE 64 /* Should be enough for most CPUs */
44 44
45/* See https://gcc.gnu.org/onlinedocs/cpp/Stringification.html */
46#define xstr(s) str(s)
47#define str(s) #s
48
49struct tcmu_mailbox { 45struct tcmu_mailbox {
50 __u16 version; 46 __u16 version;
51 __u16 flags; 47 __u16 flags;
diff --git a/init/Kconfig b/init/Kconfig
index 02da9f1fd9df..c24b6f767bf0 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1602,6 +1602,18 @@ config PCI_QUIRKS
1602 bugs/quirks. Disable this only if your target machine is 1602 bugs/quirks. Disable this only if your target machine is
1603 unaffected by PCI quirks. 1603 unaffected by PCI quirks.
1604 1604
1605config MEMBARRIER
1606 bool "Enable membarrier() system call" if EXPERT
1607 default y
1608 help
1609 Enable the membarrier() system call that allows issuing memory
1610 barriers across all running threads, which can be used to distribute
1611 the cost of user-space memory barriers asymmetrically by transforming
1612 pairs of memory barriers into pairs consisting of membarrier() and a
1613 compiler barrier.
1614
1615 If unsure, say Y.
1616
1605config EMBEDDED 1617config EMBEDDED
1606 bool "Embedded system" 1618 bool "Embedded system"
1607 option allnoconfig_y 1619 option allnoconfig_y
diff --git a/kernel/Makefile b/kernel/Makefile
index d4988410b410..53abf008ecb3 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -100,6 +100,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
100obj-$(CONFIG_JUMP_LABEL) += jump_label.o 100obj-$(CONFIG_JUMP_LABEL) += jump_label.o
101obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o 101obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o
102obj-$(CONFIG_TORTURE_TEST) += torture.o 102obj-$(CONFIG_TORTURE_TEST) += torture.o
103obj-$(CONFIG_MEMBARRIER) += membarrier.o
103 104
104obj-$(CONFIG_HAS_IOMEM) += memremap.o 105obj-$(CONFIG_HAS_IOMEM) += memremap.o
105 106
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 2cf0f79f1fc9..2c9eae6ad970 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -46,7 +46,6 @@
46#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/spinlock.h> 47#include <linux/spinlock.h>
48#include <linux/rwsem.h> 48#include <linux/rwsem.h>
49#include <linux/percpu-rwsem.h>
50#include <linux/string.h> 49#include <linux/string.h>
51#include <linux/sort.h> 50#include <linux/sort.h>
52#include <linux/kmod.h> 51#include <linux/kmod.h>
@@ -104,8 +103,6 @@ static DEFINE_SPINLOCK(cgroup_idr_lock);
104 */ 103 */
105static DEFINE_SPINLOCK(release_agent_path_lock); 104static DEFINE_SPINLOCK(release_agent_path_lock);
106 105
107struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
108
109#define cgroup_assert_mutex_or_rcu_locked() \ 106#define cgroup_assert_mutex_or_rcu_locked() \
110 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 107 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
111 !lockdep_is_held(&cgroup_mutex), \ 108 !lockdep_is_held(&cgroup_mutex), \
@@ -874,6 +871,48 @@ static struct css_set *find_css_set(struct css_set *old_cset,
874 return cset; 871 return cset;
875} 872}
876 873
874void cgroup_threadgroup_change_begin(struct task_struct *tsk)
875{
876 down_read(&tsk->signal->group_rwsem);
877}
878
879void cgroup_threadgroup_change_end(struct task_struct *tsk)
880{
881 up_read(&tsk->signal->group_rwsem);
882}
883
884/**
885 * threadgroup_lock - lock threadgroup
886 * @tsk: member task of the threadgroup to lock
887 *
888 * Lock the threadgroup @tsk belongs to. No new task is allowed to enter
889 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
890 * change ->group_leader/pid. This is useful for cases where the threadgroup
891 * needs to stay stable across blockable operations.
892 *
893 * fork and exit explicitly call threadgroup_change_{begin|end}() for
894 * synchronization. While held, no new task will be added to threadgroup
895 * and no existing live task will have its PF_EXITING set.
896 *
897 * de_thread() does threadgroup_change_{begin|end}() when a non-leader
898 * sub-thread becomes a new leader.
899 */
900static void threadgroup_lock(struct task_struct *tsk)
901{
902 down_write(&tsk->signal->group_rwsem);
903}
904
905/**
906 * threadgroup_unlock - unlock threadgroup
907 * @tsk: member task of the threadgroup to unlock
908 *
909 * Reverse threadgroup_lock().
910 */
911static inline void threadgroup_unlock(struct task_struct *tsk)
912{
913 up_write(&tsk->signal->group_rwsem);
914}
915
877static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root) 916static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
878{ 917{
879 struct cgroup *root_cgrp = kf_root->kn->priv; 918 struct cgroup *root_cgrp = kf_root->kn->priv;
@@ -2074,9 +2113,9 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp,
2074 lockdep_assert_held(&css_set_rwsem); 2113 lockdep_assert_held(&css_set_rwsem);
2075 2114
2076 /* 2115 /*
2077 * We are synchronized through cgroup_threadgroup_rwsem against 2116 * We are synchronized through threadgroup_lock() against PF_EXITING
2078 * PF_EXITING setting such that we can't race against cgroup_exit() 2117 * setting such that we can't race against cgroup_exit() changing the
2079 * changing the css_set to init_css_set and dropping the old one. 2118 * css_set to init_css_set and dropping the old one.
2080 */ 2119 */
2081 WARN_ON_ONCE(tsk->flags & PF_EXITING); 2120 WARN_ON_ONCE(tsk->flags & PF_EXITING);
2082 old_cset = task_css_set(tsk); 2121 old_cset = task_css_set(tsk);
@@ -2133,11 +2172,10 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
2133 * @src_cset and add it to @preloaded_csets, which should later be cleaned 2172 * @src_cset and add it to @preloaded_csets, which should later be cleaned
2134 * up by cgroup_migrate_finish(). 2173 * up by cgroup_migrate_finish().
2135 * 2174 *
2136 * This function may be called without holding cgroup_threadgroup_rwsem 2175 * This function may be called without holding threadgroup_lock even if the
2137 * even if the target is a process. Threads may be created and destroyed 2176 * target is a process. Threads may be created and destroyed but as long
2138 * but as long as cgroup_mutex is not dropped, no new css_set can be put 2177 * as cgroup_mutex is not dropped, no new css_set can be put into play and
2139 * into play and the preloaded css_sets are guaranteed to cover all 2178 * the preloaded css_sets are guaranteed to cover all migrations.
2140 * migrations.
2141 */ 2179 */
2142static void cgroup_migrate_add_src(struct css_set *src_cset, 2180static void cgroup_migrate_add_src(struct css_set *src_cset,
2143 struct cgroup *dst_cgrp, 2181 struct cgroup *dst_cgrp,
@@ -2240,7 +2278,7 @@ err:
2240 * @threadgroup: whether @leader points to the whole process or a single task 2278 * @threadgroup: whether @leader points to the whole process or a single task
2241 * 2279 *
2242 * Migrate a process or task denoted by @leader to @cgrp. If migrating a 2280 * Migrate a process or task denoted by @leader to @cgrp. If migrating a
2243 * process, the caller must be holding cgroup_threadgroup_rwsem. The 2281 * process, the caller must be holding threadgroup_lock of @leader. The
2244 * caller is also responsible for invoking cgroup_migrate_add_src() and 2282 * caller is also responsible for invoking cgroup_migrate_add_src() and
2245 * cgroup_migrate_prepare_dst() on the targets before invoking this 2283 * cgroup_migrate_prepare_dst() on the targets before invoking this
2246 * function and following up with cgroup_migrate_finish(). 2284 * function and following up with cgroup_migrate_finish().
@@ -2368,7 +2406,7 @@ out_release_tset:
2368 * @leader: the task or the leader of the threadgroup to be attached 2406 * @leader: the task or the leader of the threadgroup to be attached
2369 * @threadgroup: attach the whole threadgroup? 2407 * @threadgroup: attach the whole threadgroup?
2370 * 2408 *
2371 * Call holding cgroup_mutex and cgroup_threadgroup_rwsem. 2409 * Call holding cgroup_mutex and threadgroup_lock of @leader.
2372 */ 2410 */
2373static int cgroup_attach_task(struct cgroup *dst_cgrp, 2411static int cgroup_attach_task(struct cgroup *dst_cgrp,
2374 struct task_struct *leader, bool threadgroup) 2412 struct task_struct *leader, bool threadgroup)
@@ -2460,13 +2498,14 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2460 if (!cgrp) 2498 if (!cgrp)
2461 return -ENODEV; 2499 return -ENODEV;
2462 2500
2463 percpu_down_write(&cgroup_threadgroup_rwsem); 2501retry_find_task:
2464 rcu_read_lock(); 2502 rcu_read_lock();
2465 if (pid) { 2503 if (pid) {
2466 tsk = find_task_by_vpid(pid); 2504 tsk = find_task_by_vpid(pid);
2467 if (!tsk) { 2505 if (!tsk) {
2506 rcu_read_unlock();
2468 ret = -ESRCH; 2507 ret = -ESRCH;
2469 goto out_unlock_rcu; 2508 goto out_unlock_cgroup;
2470 } 2509 }
2471 } else { 2510 } else {
2472 tsk = current; 2511 tsk = current;
@@ -2482,23 +2521,37 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2482 */ 2521 */
2483 if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) { 2522 if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
2484 ret = -EINVAL; 2523 ret = -EINVAL;
2485 goto out_unlock_rcu; 2524 rcu_read_unlock();
2525 goto out_unlock_cgroup;
2486 } 2526 }
2487 2527
2488 get_task_struct(tsk); 2528 get_task_struct(tsk);
2489 rcu_read_unlock(); 2529 rcu_read_unlock();
2490 2530
2531 threadgroup_lock(tsk);
2532 if (threadgroup) {
2533 if (!thread_group_leader(tsk)) {
2534 /*
2535 * a race with de_thread from another thread's exec()
2536 * may strip us of our leadership, if this happens,
2537 * there is no choice but to throw this task away and
2538 * try again; this is
2539 * "double-double-toil-and-trouble-check locking".
2540 */
2541 threadgroup_unlock(tsk);
2542 put_task_struct(tsk);
2543 goto retry_find_task;
2544 }
2545 }
2546
2491 ret = cgroup_procs_write_permission(tsk, cgrp, of); 2547 ret = cgroup_procs_write_permission(tsk, cgrp, of);
2492 if (!ret) 2548 if (!ret)
2493 ret = cgroup_attach_task(cgrp, tsk, threadgroup); 2549 ret = cgroup_attach_task(cgrp, tsk, threadgroup);
2494 2550
2495 put_task_struct(tsk); 2551 threadgroup_unlock(tsk);
2496 goto out_unlock_threadgroup;
2497 2552
2498out_unlock_rcu: 2553 put_task_struct(tsk);
2499 rcu_read_unlock(); 2554out_unlock_cgroup:
2500out_unlock_threadgroup:
2501 percpu_up_write(&cgroup_threadgroup_rwsem);
2502 cgroup_kn_unlock(of->kn); 2555 cgroup_kn_unlock(of->kn);
2503 return ret ?: nbytes; 2556 return ret ?: nbytes;
2504} 2557}
@@ -2643,8 +2696,6 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2643 2696
2644 lockdep_assert_held(&cgroup_mutex); 2697 lockdep_assert_held(&cgroup_mutex);
2645 2698
2646 percpu_down_write(&cgroup_threadgroup_rwsem);
2647
2648 /* look up all csses currently attached to @cgrp's subtree */ 2699 /* look up all csses currently attached to @cgrp's subtree */
2649 down_read(&css_set_rwsem); 2700 down_read(&css_set_rwsem);
2650 css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) { 2701 css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
@@ -2700,8 +2751,17 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2700 goto out_finish; 2751 goto out_finish;
2701 last_task = task; 2752 last_task = task;
2702 2753
2754 threadgroup_lock(task);
2755 /* raced against de_thread() from another thread? */
2756 if (!thread_group_leader(task)) {
2757 threadgroup_unlock(task);
2758 put_task_struct(task);
2759 continue;
2760 }
2761
2703 ret = cgroup_migrate(src_cset->dfl_cgrp, task, true); 2762 ret = cgroup_migrate(src_cset->dfl_cgrp, task, true);
2704 2763
2764 threadgroup_unlock(task);
2705 put_task_struct(task); 2765 put_task_struct(task);
2706 2766
2707 if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret)) 2767 if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret))
@@ -2711,7 +2771,6 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2711 2771
2712out_finish: 2772out_finish:
2713 cgroup_migrate_finish(&preloaded_csets); 2773 cgroup_migrate_finish(&preloaded_csets);
2714 percpu_up_write(&cgroup_threadgroup_rwsem);
2715 return ret; 2774 return ret;
2716} 2775}
2717 2776
@@ -5024,7 +5083,6 @@ int __init cgroup_init(void)
5024 unsigned long key; 5083 unsigned long key;
5025 int ssid, err; 5084 int ssid, err;
5026 5085
5027 BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
5028 BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files)); 5086 BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
5029 BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files)); 5087 BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
5030 5088
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
index 9656a3c36503..009cc9a17d95 100644
--- a/kernel/cpu_pm.c
+++ b/kernel/cpu_pm.c
@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
180 * low power state that may have caused some blocks in the same power domain 180 * low power state that may have caused some blocks in the same power domain
181 * to reset. 181 * to reset.
182 * 182 *
183 * Must be called after cpu_pm_exit has been called on all cpus in the power 183 * Must be called after cpu_cluster_pm_enter has been called for the power
184 * domain, and before cpu_pm_exit has been called on any cpu in the power 184 * domain, and before cpu_pm_exit has been called on any cpu in the power
185 * domain. Notified drivers can include VFP co-processor, interrupt controller 185 * domain. Notified drivers can include VFP co-processor, interrupt controller
186 * and its PM extensions, local CPU timers context save/restore which 186 * and its PM extensions, local CPU timers context save/restore which
diff --git a/kernel/fork.c b/kernel/fork.c
index 7d5f0f118a63..2845623fb582 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1149,6 +1149,10 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1149 tty_audit_fork(sig); 1149 tty_audit_fork(sig);
1150 sched_autogroup_fork(sig); 1150 sched_autogroup_fork(sig);
1151 1151
1152#ifdef CONFIG_CGROUPS
1153 init_rwsem(&sig->group_rwsem);
1154#endif
1155
1152 sig->oom_score_adj = current->signal->oom_score_adj; 1156 sig->oom_score_adj = current->signal->oom_score_adj;
1153 sig->oom_score_adj_min = current->signal->oom_score_adj_min; 1157 sig->oom_score_adj_min = current->signal->oom_score_adj_min;
1154 1158
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 6e40a9539763..e28169dd1c36 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -83,7 +83,7 @@ int irq_set_handler_data(unsigned int irq, void *data)
83 83
84 if (!desc) 84 if (!desc)
85 return -EINVAL; 85 return -EINVAL;
86 desc->irq_data.handler_data = data; 86 desc->irq_common_data.handler_data = data;
87 irq_put_desc_unlock(desc, flags); 87 irq_put_desc_unlock(desc, flags);
88 return 0; 88 return 0;
89} 89}
@@ -105,7 +105,7 @@ int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
105 105
106 if (!desc) 106 if (!desc)
107 return -EINVAL; 107 return -EINVAL;
108 desc->irq_data.msi_desc = entry; 108 desc->irq_common_data.msi_desc = entry;
109 if (entry && !irq_offset) 109 if (entry && !irq_offset)
110 entry->irq = irq_base; 110 entry->irq = irq_base;
111 irq_put_desc_unlock(desc, flags); 111 irq_put_desc_unlock(desc, flags);
@@ -372,7 +372,6 @@ static bool irq_may_run(struct irq_desc *desc)
372 372
373/** 373/**
374 * handle_simple_irq - Simple and software-decoded IRQs. 374 * handle_simple_irq - Simple and software-decoded IRQs.
375 * @irq: the interrupt number
376 * @desc: the interrupt description structure for this irq 375 * @desc: the interrupt description structure for this irq
377 * 376 *
378 * Simple interrupts are either sent from a demultiplexing interrupt 377 * Simple interrupts are either sent from a demultiplexing interrupt
@@ -382,8 +381,7 @@ static bool irq_may_run(struct irq_desc *desc)
382 * Note: The caller is expected to handle the ack, clear, mask and 381 * Note: The caller is expected to handle the ack, clear, mask and
383 * unmask issues if necessary. 382 * unmask issues if necessary.
384 */ 383 */
385void 384void handle_simple_irq(struct irq_desc *desc)
386handle_simple_irq(unsigned int irq, struct irq_desc *desc)
387{ 385{
388 raw_spin_lock(&desc->lock); 386 raw_spin_lock(&desc->lock);
389 387
@@ -425,7 +423,6 @@ static void cond_unmask_irq(struct irq_desc *desc)
425 423
426/** 424/**
427 * handle_level_irq - Level type irq handler 425 * handle_level_irq - Level type irq handler
428 * @irq: the interrupt number
429 * @desc: the interrupt description structure for this irq 426 * @desc: the interrupt description structure for this irq
430 * 427 *
431 * Level type interrupts are active as long as the hardware line has 428 * Level type interrupts are active as long as the hardware line has
@@ -433,8 +430,7 @@ static void cond_unmask_irq(struct irq_desc *desc)
433 * it after the associated handler has acknowledged the device, so the 430 * it after the associated handler has acknowledged the device, so the
434 * interrupt line is back to inactive. 431 * interrupt line is back to inactive.
435 */ 432 */
436void 433void handle_level_irq(struct irq_desc *desc)
437handle_level_irq(unsigned int irq, struct irq_desc *desc)
438{ 434{
439 raw_spin_lock(&desc->lock); 435 raw_spin_lock(&desc->lock);
440 mask_ack_irq(desc); 436 mask_ack_irq(desc);
@@ -496,7 +492,6 @@ static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
496 492
497/** 493/**
498 * handle_fasteoi_irq - irq handler for transparent controllers 494 * handle_fasteoi_irq - irq handler for transparent controllers
499 * @irq: the interrupt number
500 * @desc: the interrupt description structure for this irq 495 * @desc: the interrupt description structure for this irq
501 * 496 *
502 * Only a single callback will be issued to the chip: an ->eoi() 497 * Only a single callback will be issued to the chip: an ->eoi()
@@ -504,8 +499,7 @@ static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
504 * for modern forms of interrupt handlers, which handle the flow 499 * for modern forms of interrupt handlers, which handle the flow
505 * details in hardware, transparently. 500 * details in hardware, transparently.
506 */ 501 */
507void 502void handle_fasteoi_irq(struct irq_desc *desc)
508handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
509{ 503{
510 struct irq_chip *chip = desc->irq_data.chip; 504 struct irq_chip *chip = desc->irq_data.chip;
511 505
@@ -546,7 +540,6 @@ EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
546 540
547/** 541/**
548 * handle_edge_irq - edge type IRQ handler 542 * handle_edge_irq - edge type IRQ handler
549 * @irq: the interrupt number
550 * @desc: the interrupt description structure for this irq 543 * @desc: the interrupt description structure for this irq
551 * 544 *
552 * Interrupt occures on the falling and/or rising edge of a hardware 545 * Interrupt occures on the falling and/or rising edge of a hardware
@@ -560,8 +553,7 @@ EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
560 * the handler was running. If all pending interrupts are handled, the 553 * the handler was running. If all pending interrupts are handled, the
561 * loop is left. 554 * loop is left.
562 */ 555 */
563void 556void handle_edge_irq(struct irq_desc *desc)
564handle_edge_irq(unsigned int irq, struct irq_desc *desc)
565{ 557{
566 raw_spin_lock(&desc->lock); 558 raw_spin_lock(&desc->lock);
567 559
@@ -618,13 +610,12 @@ EXPORT_SYMBOL(handle_edge_irq);
618#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 610#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
619/** 611/**
620 * handle_edge_eoi_irq - edge eoi type IRQ handler 612 * handle_edge_eoi_irq - edge eoi type IRQ handler
621 * @irq: the interrupt number
622 * @desc: the interrupt description structure for this irq 613 * @desc: the interrupt description structure for this irq
623 * 614 *
624 * Similar as the above handle_edge_irq, but using eoi and w/o the 615 * Similar as the above handle_edge_irq, but using eoi and w/o the
625 * mask/unmask logic. 616 * mask/unmask logic.
626 */ 617 */
627void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) 618void handle_edge_eoi_irq(struct irq_desc *desc)
628{ 619{
629 struct irq_chip *chip = irq_desc_get_chip(desc); 620 struct irq_chip *chip = irq_desc_get_chip(desc);
630 621
@@ -665,13 +656,11 @@ out_eoi:
665 656
666/** 657/**
667 * handle_percpu_irq - Per CPU local irq handler 658 * handle_percpu_irq - Per CPU local irq handler
668 * @irq: the interrupt number
669 * @desc: the interrupt description structure for this irq 659 * @desc: the interrupt description structure for this irq
670 * 660 *
671 * Per CPU interrupts on SMP machines without locking requirements 661 * Per CPU interrupts on SMP machines without locking requirements
672 */ 662 */
673void 663void handle_percpu_irq(struct irq_desc *desc)
674handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
675{ 664{
676 struct irq_chip *chip = irq_desc_get_chip(desc); 665 struct irq_chip *chip = irq_desc_get_chip(desc);
677 666
@@ -688,7 +677,6 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
688 677
689/** 678/**
690 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 679 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
691 * @irq: the interrupt number
692 * @desc: the interrupt description structure for this irq 680 * @desc: the interrupt description structure for this irq
693 * 681 *
694 * Per CPU interrupts on SMP machines without locking requirements. Same as 682 * Per CPU interrupts on SMP machines without locking requirements. Same as
@@ -698,11 +686,12 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
698 * contain the real device id for the cpu on which this handler is 686 * contain the real device id for the cpu on which this handler is
699 * called 687 * called
700 */ 688 */
701void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) 689void handle_percpu_devid_irq(struct irq_desc *desc)
702{ 690{
703 struct irq_chip *chip = irq_desc_get_chip(desc); 691 struct irq_chip *chip = irq_desc_get_chip(desc);
704 struct irqaction *action = desc->action; 692 struct irqaction *action = desc->action;
705 void *dev_id = raw_cpu_ptr(action->percpu_dev_id); 693 void *dev_id = raw_cpu_ptr(action->percpu_dev_id);
694 unsigned int irq = irq_desc_get_irq(desc);
706 irqreturn_t res; 695 irqreturn_t res;
707 696
708 kstat_incr_irqs_this_cpu(desc); 697 kstat_incr_irqs_this_cpu(desc);
@@ -796,7 +785,7 @@ irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
796 return; 785 return;
797 786
798 __irq_do_set_handler(desc, handle, 1, NULL); 787 __irq_do_set_handler(desc, handle, 1, NULL);
799 desc->irq_data.handler_data = data; 788 desc->irq_common_data.handler_data = data;
800 789
801 irq_put_desc_busunlock(desc, flags); 790 irq_put_desc_busunlock(desc, flags);
802} 791}
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index b6eeea8a80c5..de41a68fc038 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -27,8 +27,10 @@
27 * 27 *
28 * Handles spurious and unhandled IRQ's. It also prints a debugmessage. 28 * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
29 */ 29 */
30void handle_bad_irq(unsigned int irq, struct irq_desc *desc) 30void handle_bad_irq(struct irq_desc *desc)
31{ 31{
32 unsigned int irq = irq_desc_get_irq(desc);
33
32 print_irq_desc(irq, desc); 34 print_irq_desc(irq, desc);
33 kstat_incr_irqs_this_cpu(desc); 35 kstat_incr_irqs_this_cpu(desc);
34 ack_bad_irq(irq); 36 ack_bad_irq(irq);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index eee4b385cffb..5ef0c2dbe930 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -194,7 +194,7 @@ static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
194 194
195static inline int irq_desc_get_node(struct irq_desc *desc) 195static inline int irq_desc_get_node(struct irq_desc *desc)
196{ 196{
197 return irq_data_get_node(&desc->irq_data); 197 return irq_common_data_get_node(&desc->irq_common_data);
198} 198}
199 199
200#ifdef CONFIG_PM_SLEEP 200#ifdef CONFIG_PM_SLEEP
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 0a2a4b697bcb..239e2ae2c947 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -38,12 +38,13 @@ static void __init init_irq_default_affinity(void)
38#ifdef CONFIG_SMP 38#ifdef CONFIG_SMP
39static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) 39static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
40{ 40{
41 if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) 41 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
42 gfp, node))
42 return -ENOMEM; 43 return -ENOMEM;
43 44
44#ifdef CONFIG_GENERIC_PENDING_IRQ 45#ifdef CONFIG_GENERIC_PENDING_IRQ
45 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { 46 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
46 free_cpumask_var(desc->irq_data.affinity); 47 free_cpumask_var(desc->irq_common_data.affinity);
47 return -ENOMEM; 48 return -ENOMEM;
48 } 49 }
49#endif 50#endif
@@ -52,11 +53,13 @@ static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
52 53
53static void desc_smp_init(struct irq_desc *desc, int node) 54static void desc_smp_init(struct irq_desc *desc, int node)
54{ 55{
55 desc->irq_data.node = node; 56 cpumask_copy(desc->irq_common_data.affinity, irq_default_affinity);
56 cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
57#ifdef CONFIG_GENERIC_PENDING_IRQ 57#ifdef CONFIG_GENERIC_PENDING_IRQ
58 cpumask_clear(desc->pending_mask); 58 cpumask_clear(desc->pending_mask);
59#endif 59#endif
60#ifdef CONFIG_NUMA
61 desc->irq_common_data.node = node;
62#endif
60} 63}
61 64
62#else 65#else
@@ -70,12 +73,13 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
70{ 73{
71 int cpu; 74 int cpu;
72 75
76 desc->irq_common_data.handler_data = NULL;
77 desc->irq_common_data.msi_desc = NULL;
78
73 desc->irq_data.common = &desc->irq_common_data; 79 desc->irq_data.common = &desc->irq_common_data;
74 desc->irq_data.irq = irq; 80 desc->irq_data.irq = irq;
75 desc->irq_data.chip = &no_irq_chip; 81 desc->irq_data.chip = &no_irq_chip;
76 desc->irq_data.chip_data = NULL; 82 desc->irq_data.chip_data = NULL;
77 desc->irq_data.handler_data = NULL;
78 desc->irq_data.msi_desc = NULL;
79 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); 83 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
80 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 84 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
81 desc->handle_irq = handle_bad_irq; 85 desc->handle_irq = handle_bad_irq;
@@ -121,7 +125,7 @@ static void free_masks(struct irq_desc *desc)
121#ifdef CONFIG_GENERIC_PENDING_IRQ 125#ifdef CONFIG_GENERIC_PENDING_IRQ
122 free_cpumask_var(desc->pending_mask); 126 free_cpumask_var(desc->pending_mask);
123#endif 127#endif
124 free_cpumask_var(desc->irq_data.affinity); 128 free_cpumask_var(desc->irq_common_data.affinity);
125} 129}
126#else 130#else
127static inline void free_masks(struct irq_desc *desc) { } 131static inline void free_masks(struct irq_desc *desc) { }
@@ -343,7 +347,7 @@ int generic_handle_irq(unsigned int irq)
343 347
344 if (!desc) 348 if (!desc)
345 return -EINVAL; 349 return -EINVAL;
346 generic_handle_irq_desc(irq, desc); 350 generic_handle_irq_desc(desc);
347 return 0; 351 return 0;
348} 352}
349EXPORT_SYMBOL_GPL(generic_handle_irq); 353EXPORT_SYMBOL_GPL(generic_handle_irq);
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 79baaf8a7813..dc9d27c0c158 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -844,7 +844,6 @@ static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain,
844 child->parent_data = irq_data; 844 child->parent_data = irq_data;
845 irq_data->irq = child->irq; 845 irq_data->irq = child->irq;
846 irq_data->common = child->common; 846 irq_data->common = child->common;
847 irq_data->node = child->node;
848 irq_data->domain = domain; 847 irq_data->domain = domain;
849 } 848 }
850 849
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index ad1b064f94fe..f9a59f6cabd2 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -192,7 +192,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
192 switch (ret) { 192 switch (ret) {
193 case IRQ_SET_MASK_OK: 193 case IRQ_SET_MASK_OK:
194 case IRQ_SET_MASK_OK_DONE: 194 case IRQ_SET_MASK_OK_DONE:
195 cpumask_copy(data->affinity, mask); 195 cpumask_copy(desc->irq_common_data.affinity, mask);
196 case IRQ_SET_MASK_OK_NOCOPY: 196 case IRQ_SET_MASK_OK_NOCOPY:
197 irq_set_thread_affinity(desc); 197 irq_set_thread_affinity(desc);
198 ret = 0; 198 ret = 0;
@@ -304,7 +304,7 @@ static void irq_affinity_notify(struct work_struct *work)
304 if (irq_move_pending(&desc->irq_data)) 304 if (irq_move_pending(&desc->irq_data))
305 irq_get_pending(cpumask, desc); 305 irq_get_pending(cpumask, desc);
306 else 306 else
307 cpumask_copy(cpumask, desc->irq_data.affinity); 307 cpumask_copy(cpumask, desc->irq_common_data.affinity);
308 raw_spin_unlock_irqrestore(&desc->lock, flags); 308 raw_spin_unlock_irqrestore(&desc->lock, flags);
309 309
310 notify->notify(notify, cpumask); 310 notify->notify(notify, cpumask);
@@ -375,9 +375,9 @@ static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
375 * one of the targets is online. 375 * one of the targets is online.
376 */ 376 */
377 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { 377 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
378 if (cpumask_intersects(desc->irq_data.affinity, 378 if (cpumask_intersects(desc->irq_common_data.affinity,
379 cpu_online_mask)) 379 cpu_online_mask))
380 set = desc->irq_data.affinity; 380 set = desc->irq_common_data.affinity;
381 else 381 else
382 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); 382 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
383 } 383 }
@@ -829,8 +829,8 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
829 * This code is triggered unconditionally. Check the affinity 829 * This code is triggered unconditionally. Check the affinity
830 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. 830 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
831 */ 831 */
832 if (desc->irq_data.affinity) 832 if (desc->irq_common_data.affinity)
833 cpumask_copy(mask, desc->irq_data.affinity); 833 cpumask_copy(mask, desc->irq_common_data.affinity);
834 else 834 else
835 valid = false; 835 valid = false;
836 raw_spin_unlock_irq(&desc->lock); 836 raw_spin_unlock_irq(&desc->lock);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 0e97c142ce40..e3a8c9577ba6 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -39,7 +39,7 @@ static struct proc_dir_entry *root_irq_dir;
39static int show_irq_affinity(int type, struct seq_file *m, void *v) 39static int show_irq_affinity(int type, struct seq_file *m, void *v)
40{ 40{
41 struct irq_desc *desc = irq_to_desc((long)m->private); 41 struct irq_desc *desc = irq_to_desc((long)m->private);
42 const struct cpumask *mask = desc->irq_data.affinity; 42 const struct cpumask *mask = desc->irq_common_data.affinity;
43 43
44#ifdef CONFIG_GENERIC_PENDING_IRQ 44#ifdef CONFIG_GENERIC_PENDING_IRQ
45 if (irqd_is_setaffinity_pending(&desc->irq_data)) 45 if (irqd_is_setaffinity_pending(&desc->irq_data))
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index dd95f44f99b2..b86886beee4f 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -38,7 +38,7 @@ static void resend_irqs(unsigned long arg)
38 clear_bit(irq, irqs_resend); 38 clear_bit(irq, irqs_resend);
39 desc = irq_to_desc(irq); 39 desc = irq_to_desc(irq);
40 local_irq_disable(); 40 local_irq_disable();
41 desc->handle_irq(irq, desc); 41 desc->handle_irq(desc);
42 local_irq_enable(); 42 local_irq_enable();
43 } 43 }
44} 44}
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 337c8818541d..87e9ce6a63c5 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -289,7 +289,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
289 if (pv_enabled()) 289 if (pv_enabled())
290 goto queue; 290 goto queue;
291 291
292 if (virt_queued_spin_lock(lock)) 292 if (virt_spin_lock(lock))
293 return; 293 return;
294 294
295 /* 295 /*
diff --git a/kernel/membarrier.c b/kernel/membarrier.c
new file mode 100644
index 000000000000..536c727a56e9
--- /dev/null
+++ b/kernel/membarrier.c
@@ -0,0 +1,66 @@
1/*
2 * Copyright (C) 2010, 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
3 *
4 * membarrier system call
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/syscalls.h>
18#include <linux/membarrier.h>
19
20/*
21 * Bitmask made from a "or" of all commands within enum membarrier_cmd,
22 * except MEMBARRIER_CMD_QUERY.
23 */
24#define MEMBARRIER_CMD_BITMASK (MEMBARRIER_CMD_SHARED)
25
26/**
27 * sys_membarrier - issue memory barriers on a set of threads
28 * @cmd: Takes command values defined in enum membarrier_cmd.
29 * @flags: Currently needs to be 0. For future extensions.
30 *
31 * If this system call is not implemented, -ENOSYS is returned. If the
32 * command specified does not exist, or if the command argument is invalid,
33 * this system call returns -EINVAL. For a given command, with flags argument
34 * set to 0, this system call is guaranteed to always return the same value
35 * until reboot.
36 *
37 * All memory accesses performed in program order from each targeted thread
38 * is guaranteed to be ordered with respect to sys_membarrier(). If we use
39 * the semantic "barrier()" to represent a compiler barrier forcing memory
40 * accesses to be performed in program order across the barrier, and
41 * smp_mb() to represent explicit memory barriers forcing full memory
42 * ordering across the barrier, we have the following ordering table for
43 * each pair of barrier(), sys_membarrier() and smp_mb():
44 *
45 * The pair ordering is detailed as (O: ordered, X: not ordered):
46 *
47 * barrier() smp_mb() sys_membarrier()
48 * barrier() X X O
49 * smp_mb() X O O
50 * sys_membarrier() O O O
51 */
52SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
53{
54 if (unlikely(flags))
55 return -EINVAL;
56 switch (cmd) {
57 case MEMBARRIER_CMD_QUERY:
58 return MEMBARRIER_CMD_BITMASK;
59 case MEMBARRIER_CMD_SHARED:
60 if (num_online_cpus() > 1)
61 synchronize_sched();
62 return 0;
63 default:
64 return -EINVAL;
65 }
66}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3595403921bd..2f9c92884817 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -621,18 +621,21 @@ int get_nohz_timer_target(void)
621 int i, cpu = smp_processor_id(); 621 int i, cpu = smp_processor_id();
622 struct sched_domain *sd; 622 struct sched_domain *sd;
623 623
624 if (!idle_cpu(cpu)) 624 if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
625 return cpu; 625 return cpu;
626 626
627 rcu_read_lock(); 627 rcu_read_lock();
628 for_each_domain(cpu, sd) { 628 for_each_domain(cpu, sd) {
629 for_each_cpu(i, sched_domain_span(sd)) { 629 for_each_cpu(i, sched_domain_span(sd)) {
630 if (!idle_cpu(i)) { 630 if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) {
631 cpu = i; 631 cpu = i;
632 goto unlock; 632 goto unlock;
633 } 633 }
634 } 634 }
635 } 635 }
636
637 if (!is_housekeeping_cpu(cpu))
638 cpu = housekeeping_any_cpu();
636unlock: 639unlock:
637 rcu_read_unlock(); 640 rcu_read_unlock();
638 return cpu; 641 return cpu;
@@ -2666,13 +2669,20 @@ unsigned long nr_running(void)
2666 2669
2667/* 2670/*
2668 * Check if only the current task is running on the cpu. 2671 * Check if only the current task is running on the cpu.
2672 *
2673 * Caution: this function does not check that the caller has disabled
2674 * preemption, thus the result might have a time-of-check-to-time-of-use
2675 * race. The caller is responsible to use it correctly, for example:
2676 *
2677 * - from a non-preemptable section (of course)
2678 *
2679 * - from a thread that is bound to a single CPU
2680 *
2681 * - in a loop with very short iterations (e.g. a polling loop)
2669 */ 2682 */
2670bool single_task_running(void) 2683bool single_task_running(void)
2671{ 2684{
2672 if (cpu_rq(smp_processor_id())->nr_running == 1) 2685 return raw_rq()->nr_running == 1;
2673 return true;
2674 else
2675 return false;
2676} 2686}
2677EXPORT_SYMBOL(single_task_running); 2687EXPORT_SYMBOL(single_task_running);
2678 2688
@@ -5178,24 +5188,47 @@ static void migrate_tasks(struct rq *dead_rq)
5178 break; 5188 break;
5179 5189
5180 /* 5190 /*
5181 * Ensure rq->lock covers the entire task selection 5191 * pick_next_task assumes pinned rq->lock.
5182 * until the migration.
5183 */ 5192 */
5184 lockdep_pin_lock(&rq->lock); 5193 lockdep_pin_lock(&rq->lock);
5185 next = pick_next_task(rq, &fake_task); 5194 next = pick_next_task(rq, &fake_task);
5186 BUG_ON(!next); 5195 BUG_ON(!next);
5187 next->sched_class->put_prev_task(rq, next); 5196 next->sched_class->put_prev_task(rq, next);
5188 5197
5198 /*
5199 * Rules for changing task_struct::cpus_allowed are holding
5200 * both pi_lock and rq->lock, such that holding either
5201 * stabilizes the mask.
5202 *
5203 * Drop rq->lock is not quite as disastrous as it usually is
5204 * because !cpu_active at this point, which means load-balance
5205 * will not interfere. Also, stop-machine.
5206 */
5207 lockdep_unpin_lock(&rq->lock);
5208 raw_spin_unlock(&rq->lock);
5209 raw_spin_lock(&next->pi_lock);
5210 raw_spin_lock(&rq->lock);
5211
5212 /*
5213 * Since we're inside stop-machine, _nothing_ should have
5214 * changed the task, WARN if weird stuff happened, because in
5215 * that case the above rq->lock drop is a fail too.
5216 */
5217 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
5218 raw_spin_unlock(&next->pi_lock);
5219 continue;
5220 }
5221
5189 /* Find suitable destination for @next, with force if needed. */ 5222 /* Find suitable destination for @next, with force if needed. */
5190 dest_cpu = select_fallback_rq(dead_rq->cpu, next); 5223 dest_cpu = select_fallback_rq(dead_rq->cpu, next);
5191 5224
5192 lockdep_unpin_lock(&rq->lock);
5193 rq = __migrate_task(rq, next, dest_cpu); 5225 rq = __migrate_task(rq, next, dest_cpu);
5194 if (rq != dead_rq) { 5226 if (rq != dead_rq) {
5195 raw_spin_unlock(&rq->lock); 5227 raw_spin_unlock(&rq->lock);
5196 rq = dead_rq; 5228 rq = dead_rq;
5197 raw_spin_lock(&rq->lock); 5229 raw_spin_lock(&rq->lock);
5198 } 5230 }
5231 raw_spin_unlock(&next->pi_lock);
5199 } 5232 }
5200 5233
5201 rq->stop = stop; 5234 rq->stop = stop;
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 272d9322bc5d..052e02672d12 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -106,10 +106,9 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
106} 106}
107EXPORT_SYMBOL_GPL(__wake_up_locked); 107EXPORT_SYMBOL_GPL(__wake_up_locked);
108 108
109void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr, 109void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
110 void *key)
111{ 110{
112 __wake_up_common(q, mode, nr, 0, key); 111 __wake_up_common(q, mode, 1, 0, key);
113} 112}
114EXPORT_SYMBOL_GPL(__wake_up_locked_key); 113EXPORT_SYMBOL_GPL(__wake_up_locked_key);
115 114
@@ -284,7 +283,7 @@ void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
284 if (!list_empty(&wait->task_list)) 283 if (!list_empty(&wait->task_list))
285 list_del_init(&wait->task_list); 284 list_del_init(&wait->task_list);
286 else if (waitqueue_active(q)) 285 else if (waitqueue_active(q))
287 __wake_up_locked_key(q, mode, 1, key); 286 __wake_up_locked_key(q, mode, key);
288 spin_unlock_irqrestore(&q->lock, flags); 287 spin_unlock_irqrestore(&q->lock, flags);
289} 288}
290EXPORT_SYMBOL(abort_exclusive_wait); 289EXPORT_SYMBOL(abort_exclusive_wait);
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 03c3875d9958..a02decf15583 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -245,3 +245,6 @@ cond_syscall(sys_bpf);
245 245
246/* execveat */ 246/* execveat */
247cond_syscall(sys_execveat); 247cond_syscall(sys_execveat);
248
249/* membarrier */
250cond_syscall(sys_membarrier);
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 50eb107f1198..a9b76a40319e 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -97,20 +97,6 @@ EXPORT_SYMBOL_GPL(clockevent_delta2ns);
97static int __clockevents_switch_state(struct clock_event_device *dev, 97static int __clockevents_switch_state(struct clock_event_device *dev,
98 enum clock_event_state state) 98 enum clock_event_state state)
99{ 99{
100 /* Transition with legacy set_mode() callback */
101 if (dev->set_mode) {
102 /* Legacy callback doesn't support new modes */
103 if (state > CLOCK_EVT_STATE_ONESHOT)
104 return -ENOSYS;
105 /*
106 * 'clock_event_state' and 'clock_event_mode' have 1-to-1
107 * mapping until *_ONESHOT, and so a simple cast will work.
108 */
109 dev->set_mode((enum clock_event_mode)state, dev);
110 dev->mode = (enum clock_event_mode)state;
111 return 0;
112 }
113
114 if (dev->features & CLOCK_EVT_FEAT_DUMMY) 100 if (dev->features & CLOCK_EVT_FEAT_DUMMY)
115 return 0; 101 return 0;
116 102
@@ -204,12 +190,8 @@ int clockevents_tick_resume(struct clock_event_device *dev)
204{ 190{
205 int ret = 0; 191 int ret = 0;
206 192
207 if (dev->set_mode) { 193 if (dev->tick_resume)
208 dev->set_mode(CLOCK_EVT_MODE_RESUME, dev);
209 dev->mode = CLOCK_EVT_MODE_RESUME;
210 } else if (dev->tick_resume) {
211 ret = dev->tick_resume(dev); 194 ret = dev->tick_resume(dev);
212 }
213 195
214 return ret; 196 return ret;
215} 197}
@@ -460,26 +442,6 @@ int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
460} 442}
461EXPORT_SYMBOL_GPL(clockevents_unbind_device); 443EXPORT_SYMBOL_GPL(clockevents_unbind_device);
462 444
463/* Sanity check of state transition callbacks */
464static int clockevents_sanity_check(struct clock_event_device *dev)
465{
466 /* Legacy set_mode() callback */
467 if (dev->set_mode) {
468 /* We shouldn't be supporting new modes now */
469 WARN_ON(dev->set_state_periodic || dev->set_state_oneshot ||
470 dev->set_state_shutdown || dev->tick_resume ||
471 dev->set_state_oneshot_stopped);
472
473 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
474 return 0;
475 }
476
477 if (dev->features & CLOCK_EVT_FEAT_DUMMY)
478 return 0;
479
480 return 0;
481}
482
483/** 445/**
484 * clockevents_register_device - register a clock event device 446 * clockevents_register_device - register a clock event device
485 * @dev: device to register 447 * @dev: device to register
@@ -488,8 +450,6 @@ void clockevents_register_device(struct clock_event_device *dev)
488{ 450{
489 unsigned long flags; 451 unsigned long flags;
490 452
491 BUG_ON(clockevents_sanity_check(dev));
492
493 /* Initialize state to DETACHED */ 453 /* Initialize state to DETACHED */
494 clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED); 454 clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
495 455
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index d11c55b6ab7d..4fcd99e12aa0 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -398,7 +398,6 @@ void tick_shutdown(unsigned int cpu)
398 * the set mode function! 398 * the set mode function!
399 */ 399 */
400 clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED); 400 clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
401 dev->mode = CLOCK_EVT_MODE_UNUSED;
402 clockevents_exchange_device(dev, NULL); 401 clockevents_exchange_device(dev, NULL);
403 dev->event_handler = clockevents_handle_noop; 402 dev->event_handler = clockevents_handle_noop;
404 td->evtdev = NULL; 403 td->evtdev = NULL;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3319e16f31e5..7c7ec4515983 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -290,16 +290,17 @@ static int __init tick_nohz_full_setup(char *str)
290__setup("nohz_full=", tick_nohz_full_setup); 290__setup("nohz_full=", tick_nohz_full_setup);
291 291
292static int tick_nohz_cpu_down_callback(struct notifier_block *nfb, 292static int tick_nohz_cpu_down_callback(struct notifier_block *nfb,
293 unsigned long action, 293 unsigned long action,
294 void *hcpu) 294 void *hcpu)
295{ 295{
296 unsigned int cpu = (unsigned long)hcpu; 296 unsigned int cpu = (unsigned long)hcpu;
297 297
298 switch (action & ~CPU_TASKS_FROZEN) { 298 switch (action & ~CPU_TASKS_FROZEN) {
299 case CPU_DOWN_PREPARE: 299 case CPU_DOWN_PREPARE:
300 /* 300 /*
301 * If we handle the timekeeping duty for full dynticks CPUs, 301 * The boot CPU handles housekeeping duty (unbound timers,
302 * we can't safely shutdown that CPU. 302 * workqueues, timekeeping, ...) on behalf of full dynticks
303 * CPUs. It must remain online when nohz full is enabled.
303 */ 304 */
304 if (tick_nohz_full_running && tick_do_timer_cpu == cpu) 305 if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
305 return NOTIFY_BAD; 306 return NOTIFY_BAD;
@@ -370,6 +371,12 @@ void __init tick_nohz_init(void)
370 cpu_notifier(tick_nohz_cpu_down_callback, 0); 371 cpu_notifier(tick_nohz_cpu_down_callback, 0);
371 pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", 372 pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
372 cpumask_pr_args(tick_nohz_full_mask)); 373 cpumask_pr_args(tick_nohz_full_mask));
374
375 /*
376 * We need at least one CPU to handle housekeeping work such
377 * as timekeeping, unbound timers, workqueues, ...
378 */
379 WARN_ON_ONCE(cpumask_empty(housekeeping_mask));
373} 380}
374#endif 381#endif
375 382
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index f6ee2e6b6f5d..3739ac6aa473 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1614,7 +1614,7 @@ static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
1614 negative = (tick_error < 0); 1614 negative = (tick_error < 0);
1615 1615
1616 /* Sort out the magnitude of the correction */ 1616 /* Sort out the magnitude of the correction */
1617 tick_error = abs(tick_error); 1617 tick_error = abs64(tick_error);
1618 for (adj = 0; tick_error > interval; adj++) 1618 for (adj = 0; tick_error > interval; adj++)
1619 tick_error >>= 1; 1619 tick_error >>= 1;
1620 1620
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 129c96033e46..f75e35b60149 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -225,7 +225,7 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
225 (unsigned long long) dev->min_delta_ns); 225 (unsigned long long) dev->min_delta_ns);
226 SEQ_printf(m, " mult: %u\n", dev->mult); 226 SEQ_printf(m, " mult: %u\n", dev->mult);
227 SEQ_printf(m, " shift: %u\n", dev->shift); 227 SEQ_printf(m, " shift: %u\n", dev->shift);
228 SEQ_printf(m, " mode: %d\n", dev->mode); 228 SEQ_printf(m, " mode: %d\n", clockevent_get_state(dev));
229 SEQ_printf(m, " next_event: %Ld nsecs\n", 229 SEQ_printf(m, " next_event: %Ld nsecs\n",
230 (unsigned long long) ktime_to_ns(dev->next_event)); 230 (unsigned long long) ktime_to_ns(dev->next_event));
231 231
@@ -233,40 +233,34 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
233 print_name_offset(m, dev->set_next_event); 233 print_name_offset(m, dev->set_next_event);
234 SEQ_printf(m, "\n"); 234 SEQ_printf(m, "\n");
235 235
236 if (dev->set_mode) { 236 if (dev->set_state_shutdown) {
237 SEQ_printf(m, " set_mode: "); 237 SEQ_printf(m, " shutdown: ");
238 print_name_offset(m, dev->set_mode); 238 print_name_offset(m, dev->set_state_shutdown);
239 SEQ_printf(m, "\n"); 239 SEQ_printf(m, "\n");
240 } else { 240 }
241 if (dev->set_state_shutdown) {
242 SEQ_printf(m, " shutdown: ");
243 print_name_offset(m, dev->set_state_shutdown);
244 SEQ_printf(m, "\n");
245 }
246 241
247 if (dev->set_state_periodic) { 242 if (dev->set_state_periodic) {
248 SEQ_printf(m, " periodic: "); 243 SEQ_printf(m, " periodic: ");
249 print_name_offset(m, dev->set_state_periodic); 244 print_name_offset(m, dev->set_state_periodic);
250 SEQ_printf(m, "\n"); 245 SEQ_printf(m, "\n");
251 } 246 }
252 247
253 if (dev->set_state_oneshot) { 248 if (dev->set_state_oneshot) {
254 SEQ_printf(m, " oneshot: "); 249 SEQ_printf(m, " oneshot: ");
255 print_name_offset(m, dev->set_state_oneshot); 250 print_name_offset(m, dev->set_state_oneshot);
256 SEQ_printf(m, "\n"); 251 SEQ_printf(m, "\n");
257 } 252 }
258 253
259 if (dev->set_state_oneshot_stopped) { 254 if (dev->set_state_oneshot_stopped) {
260 SEQ_printf(m, " oneshot stopped: "); 255 SEQ_printf(m, " oneshot stopped: ");
261 print_name_offset(m, dev->set_state_oneshot_stopped); 256 print_name_offset(m, dev->set_state_oneshot_stopped);
262 SEQ_printf(m, "\n"); 257 SEQ_printf(m, "\n");
263 } 258 }
264 259
265 if (dev->tick_resume) { 260 if (dev->tick_resume) {
266 SEQ_printf(m, " resume: "); 261 SEQ_printf(m, " resume: ");
267 print_name_offset(m, dev->tick_resume); 262 print_name_offset(m, dev->tick_resume);
268 SEQ_printf(m, "\n"); 263 SEQ_printf(m, "\n");
269 }
270 } 264 }
271 265
272 SEQ_printf(m, " event_handler: "); 266 SEQ_printf(m, " event_handler: ");
diff --git a/lib/iommu-common.c b/lib/iommu-common.c
index ff19f66d3f7f..b1c93e94ca7a 100644
--- a/lib/iommu-common.c
+++ b/lib/iommu-common.c
@@ -21,8 +21,7 @@ static DEFINE_PER_CPU(unsigned int, iommu_hash_common);
21 21
22static inline bool need_flush(struct iommu_map_table *iommu) 22static inline bool need_flush(struct iommu_map_table *iommu)
23{ 23{
24 return (iommu->lazy_flush != NULL && 24 return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
25 (iommu->flags & IOMMU_NEED_FLUSH) != 0);
26} 25}
27 26
28static inline void set_flush(struct iommu_map_table *iommu) 27static inline void set_flush(struct iommu_map_table *iommu)
@@ -211,7 +210,8 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
211 goto bail; 210 goto bail;
212 } 211 }
213 } 212 }
214 if (n < pool->hint || need_flush(iommu)) { 213 if (iommu->lazy_flush &&
214 (n < pool->hint || need_flush(iommu))) {
215 clear_flush(iommu); 215 clear_flush(iommu);
216 iommu->lazy_flush(iommu); 216 iommu->lazy_flush(iommu);
217 } 217 }
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index cc0c69710dcf..a54ff8949f91 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -187,10 +187,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
187 head = rht_dereference_bucket(new_tbl->buckets[new_hash], 187 head = rht_dereference_bucket(new_tbl->buckets[new_hash],
188 new_tbl, new_hash); 188 new_tbl, new_hash);
189 189
190 if (rht_is_a_nulls(head)) 190 RCU_INIT_POINTER(entry->next, head);
191 INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
192 else
193 RCU_INIT_POINTER(entry->next, head);
194 191
195 rcu_assign_pointer(new_tbl->buckets[new_hash], entry); 192 rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
196 spin_unlock(new_bucket_lock); 193 spin_unlock(new_bucket_lock);
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 54036ce2e2dd..5939f63d90cd 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -59,7 +59,11 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
59 } 59 }
60 60
61 exp = divisor[units] / (u32)blk_size; 61 exp = divisor[units] / (u32)blk_size;
62 if (size >= exp) { 62 /*
63 * size must be strictly greater than exp here to ensure that remainder
64 * is greater than divisor[units] coming out of the if below.
65 */
66 if (size > exp) {
63 remainder = do_div(size, divisor[units]); 67 remainder = do_div(size, divisor[units]);
64 remainder *= blk_size; 68 remainder *= blk_size;
65 i++; 69 i++;
diff --git a/mm/Kconfig b/mm/Kconfig
index 6413d027c0b2..0d9fdcd01e47 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -677,3 +677,6 @@ config ZONE_DEVICE
677 mapping in an O_DIRECT operation, among other things. 677 mapping in an O_DIRECT operation, among other things.
678 678
679 If FS_DAX is enabled, then say Y. 679 If FS_DAX is enabled, then say Y.
680
681config FRAME_VECTOR
682 bool
diff --git a/mm/Makefile b/mm/Makefile
index 56f8eed73f1a..2ed43191fc3b 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -80,3 +80,4 @@ obj-$(CONFIG_PAGE_EXTENSION) += page_ext.o
80obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o 80obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o
81obj-$(CONFIG_USERFAULTFD) += userfaultfd.o 81obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
82obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o 82obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
83obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c
index 23f744d77ce0..17ae14b5aefa 100644
--- a/mm/early_ioremap.c
+++ b/mm/early_ioremap.c
@@ -15,6 +15,7 @@
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
17#include <asm/fixmap.h> 17#include <asm/fixmap.h>
18#include <asm/early_ioremap.h>
18 19
19#ifdef CONFIG_MMU 20#ifdef CONFIG_MMU
20static int early_ioremap_debug __initdata; 21static int early_ioremap_debug __initdata;
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
new file mode 100644
index 000000000000..cdabcb93c6a6
--- /dev/null
+++ b/mm/frame_vector.c
@@ -0,0 +1,230 @@
1#include <linux/kernel.h>
2#include <linux/errno.h>
3#include <linux/err.h>
4#include <linux/mm.h>
5#include <linux/slab.h>
6#include <linux/vmalloc.h>
7#include <linux/pagemap.h>
8#include <linux/sched.h>
9
10/*
11 * get_vaddr_frames() - map virtual addresses to pfns
12 * @start: starting user address
13 * @nr_frames: number of pages / pfns from start to map
14 * @write: whether pages will be written to by the caller
15 * @force: whether to force write access even if user mapping is
16 * readonly. See description of the same argument of
17 get_user_pages().
18 * @vec: structure which receives pages / pfns of the addresses mapped.
19 * It should have space for at least nr_frames entries.
20 *
21 * This function maps virtual addresses from @start and fills @vec structure
22 * with page frame numbers or page pointers to corresponding pages (choice
23 * depends on the type of the vma underlying the virtual address). If @start
24 * belongs to a normal vma, the function grabs reference to each of the pages
25 * to pin them in memory. If @start belongs to VM_IO | VM_PFNMAP vma, we don't
26 * touch page structures and the caller must make sure pfns aren't reused for
27 * anything else while he is using them.
28 *
29 * The function returns number of pages mapped which may be less than
30 * @nr_frames. In particular we stop mapping if there are more vmas of
31 * different type underlying the specified range of virtual addresses.
32 * When the function isn't able to map a single page, it returns error.
33 *
34 * This function takes care of grabbing mmap_sem as necessary.
35 */
36int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
37 bool write, bool force, struct frame_vector *vec)
38{
39 struct mm_struct *mm = current->mm;
40 struct vm_area_struct *vma;
41 int ret = 0;
42 int err;
43 int locked;
44
45 if (nr_frames == 0)
46 return 0;
47
48 if (WARN_ON_ONCE(nr_frames > vec->nr_allocated))
49 nr_frames = vec->nr_allocated;
50
51 down_read(&mm->mmap_sem);
52 locked = 1;
53 vma = find_vma_intersection(mm, start, start + 1);
54 if (!vma) {
55 ret = -EFAULT;
56 goto out;
57 }
58 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
59 vec->got_ref = true;
60 vec->is_pfns = false;
61 ret = get_user_pages_locked(current, mm, start, nr_frames,
62 write, force, (struct page **)(vec->ptrs), &locked);
63 goto out;
64 }
65
66 vec->got_ref = false;
67 vec->is_pfns = true;
68 do {
69 unsigned long *nums = frame_vector_pfns(vec);
70
71 while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) {
72 err = follow_pfn(vma, start, &nums[ret]);
73 if (err) {
74 if (ret == 0)
75 ret = err;
76 goto out;
77 }
78 start += PAGE_SIZE;
79 ret++;
80 }
81 /*
82 * We stop if we have enough pages or if VMA doesn't completely
83 * cover the tail page.
84 */
85 if (ret >= nr_frames || start < vma->vm_end)
86 break;
87 vma = find_vma_intersection(mm, start, start + 1);
88 } while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP));
89out:
90 if (locked)
91 up_read(&mm->mmap_sem);
92 if (!ret)
93 ret = -EFAULT;
94 if (ret > 0)
95 vec->nr_frames = ret;
96 return ret;
97}
98EXPORT_SYMBOL(get_vaddr_frames);
99
100/**
101 * put_vaddr_frames() - drop references to pages if get_vaddr_frames() acquired
102 * them
103 * @vec: frame vector to put
104 *
105 * Drop references to pages if get_vaddr_frames() acquired them. We also
106 * invalidate the frame vector so that it is prepared for the next call into
107 * get_vaddr_frames().
108 */
109void put_vaddr_frames(struct frame_vector *vec)
110{
111 int i;
112 struct page **pages;
113
114 if (!vec->got_ref)
115 goto out;
116 pages = frame_vector_pages(vec);
117 /*
118 * frame_vector_pages() might needed to do a conversion when
119 * get_vaddr_frames() got pages but vec was later converted to pfns.
120 * But it shouldn't really fail to convert pfns back...
121 */
122 if (WARN_ON(IS_ERR(pages)))
123 goto out;
124 for (i = 0; i < vec->nr_frames; i++)
125 put_page(pages[i]);
126 vec->got_ref = false;
127out:
128 vec->nr_frames = 0;
129}
130EXPORT_SYMBOL(put_vaddr_frames);
131
132/**
133 * frame_vector_to_pages - convert frame vector to contain page pointers
134 * @vec: frame vector to convert
135 *
136 * Convert @vec to contain array of page pointers. If the conversion is
137 * successful, return 0. Otherwise return an error. Note that we do not grab
138 * page references for the page structures.
139 */
140int frame_vector_to_pages(struct frame_vector *vec)
141{
142 int i;
143 unsigned long *nums;
144 struct page **pages;
145
146 if (!vec->is_pfns)
147 return 0;
148 nums = frame_vector_pfns(vec);
149 for (i = 0; i < vec->nr_frames; i++)
150 if (!pfn_valid(nums[i]))
151 return -EINVAL;
152 pages = (struct page **)nums;
153 for (i = 0; i < vec->nr_frames; i++)
154 pages[i] = pfn_to_page(nums[i]);
155 vec->is_pfns = false;
156 return 0;
157}
158EXPORT_SYMBOL(frame_vector_to_pages);
159
160/**
161 * frame_vector_to_pfns - convert frame vector to contain pfns
162 * @vec: frame vector to convert
163 *
164 * Convert @vec to contain array of pfns.
165 */
166void frame_vector_to_pfns(struct frame_vector *vec)
167{
168 int i;
169 unsigned long *nums;
170 struct page **pages;
171
172 if (vec->is_pfns)
173 return;
174 pages = (struct page **)(vec->ptrs);
175 nums = (unsigned long *)pages;
176 for (i = 0; i < vec->nr_frames; i++)
177 nums[i] = page_to_pfn(pages[i]);
178 vec->is_pfns = true;
179}
180EXPORT_SYMBOL(frame_vector_to_pfns);
181
182/**
183 * frame_vector_create() - allocate & initialize structure for pinned pfns
184 * @nr_frames: number of pfns slots we should reserve
185 *
186 * Allocate and initialize struct pinned_pfns to be able to hold @nr_pfns
187 * pfns.
188 */
189struct frame_vector *frame_vector_create(unsigned int nr_frames)
190{
191 struct frame_vector *vec;
192 int size = sizeof(struct frame_vector) + sizeof(void *) * nr_frames;
193
194 if (WARN_ON_ONCE(nr_frames == 0))
195 return NULL;
196 /*
197 * This is absurdly high. It's here just to avoid strange effects when
198 * arithmetics overflows.
199 */
200 if (WARN_ON_ONCE(nr_frames > INT_MAX / sizeof(void *) / 2))
201 return NULL;
202 /*
203 * Avoid higher order allocations, use vmalloc instead. It should
204 * be rare anyway.
205 */
206 if (size <= PAGE_SIZE)
207 vec = kmalloc(size, GFP_KERNEL);
208 else
209 vec = vmalloc(size);
210 if (!vec)
211 return NULL;
212 vec->nr_allocated = nr_frames;
213 vec->nr_frames = 0;
214 return vec;
215}
216EXPORT_SYMBOL(frame_vector_create);
217
218/**
219 * frame_vector_destroy() - free memory allocated to carry frame vector
220 * @vec: Frame vector to free
221 *
222 * Free structure allocated by frame_vector_create() to carry frames.
223 */
224void frame_vector_destroy(struct frame_vector *vec)
225{
226 /* Make sure put_vaddr_frames() got called properly... */
227 VM_BUG_ON(vec->nr_frames > 0);
228 kvfree(vec);
229}
230EXPORT_SYMBOL(frame_vector_destroy);
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 7b28e9cdf1c7..8da211411b57 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -135,12 +135,11 @@ static __always_inline bool memory_is_poisoned_16(unsigned long addr)
135 135
136 if (unlikely(*shadow_addr)) { 136 if (unlikely(*shadow_addr)) {
137 u16 shadow_first_bytes = *(u16 *)shadow_addr; 137 u16 shadow_first_bytes = *(u16 *)shadow_addr;
138 s8 last_byte = (addr + 15) & KASAN_SHADOW_MASK;
139 138
140 if (unlikely(shadow_first_bytes)) 139 if (unlikely(shadow_first_bytes))
141 return true; 140 return true;
142 141
143 if (likely(!last_byte)) 142 if (likely(IS_ALIGNED(addr, 8)))
144 return false; 143 return false;
145 144
146 return memory_is_poisoned_1(addr + 15); 145 return memory_is_poisoned_1(addr + 15);
diff --git a/mm/migrate.c b/mm/migrate.c
index c3cb566af3e2..7452a00bbb50 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1075,7 +1075,7 @@ out:
1075 if (rc != MIGRATEPAGE_SUCCESS && put_new_page) 1075 if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
1076 put_new_page(new_hpage, private); 1076 put_new_page(new_hpage, private);
1077 else 1077 else
1078 put_page(new_hpage); 1078 putback_active_hugepage(new_hpage);
1079 1079
1080 if (result) { 1080 if (result) {
1081 if (rc) 1081 if (rc)
diff --git a/mm/mmap.c b/mm/mmap.c
index 971dd2cb77d2..79bcc9f92e48 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -612,8 +612,6 @@ static unsigned long count_vma_pages_range(struct mm_struct *mm,
612void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, 612void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
613 struct rb_node **rb_link, struct rb_node *rb_parent) 613 struct rb_node **rb_link, struct rb_node *rb_parent)
614{ 614{
615 WARN_ONCE(vma->vm_file && !vma->vm_ops, "missing vma->vm_ops");
616
617 /* Update tracking information for the gap following the new vma. */ 615 /* Update tracking information for the gap following the new vma. */
618 if (vma->vm_next) 616 if (vma->vm_next)
619 vma_gap_update(vma->vm_next); 617 vma_gap_update(vma->vm_next);
@@ -1492,13 +1490,14 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1492int vma_wants_writenotify(struct vm_area_struct *vma) 1490int vma_wants_writenotify(struct vm_area_struct *vma)
1493{ 1491{
1494 vm_flags_t vm_flags = vma->vm_flags; 1492 vm_flags_t vm_flags = vma->vm_flags;
1493 const struct vm_operations_struct *vm_ops = vma->vm_ops;
1495 1494
1496 /* If it was private or non-writable, the write bit is already clear */ 1495 /* If it was private or non-writable, the write bit is already clear */
1497 if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) 1496 if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
1498 return 0; 1497 return 0;
1499 1498
1500 /* The backer wishes to know when pages are first written to? */ 1499 /* The backer wishes to know when pages are first written to? */
1501 if (vma->vm_ops && vma->vm_ops->page_mkwrite) 1500 if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite))
1502 return 1; 1501 return 1;
1503 1502
1504 /* The open routine did something to the protections that pgprot_modify 1503 /* The open routine did something to the protections that pgprot_modify
@@ -1638,12 +1637,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
1638 */ 1637 */
1639 WARN_ON_ONCE(addr != vma->vm_start); 1638 WARN_ON_ONCE(addr != vma->vm_start);
1640 1639
1641 /* All file mapping must have ->vm_ops set */
1642 if (!vma->vm_ops) {
1643 static const struct vm_operations_struct dummy_ops = {};
1644 vma->vm_ops = &dummy_ops;
1645 }
1646
1647 addr = vma->vm_start; 1640 addr = vma->vm_start;
1648 vm_flags = vma->vm_flags; 1641 vm_flags = vma->vm_flags;
1649 } else if (vm_flags & VM_SHARED) { 1642 } else if (vm_flags & VM_SHARED) {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2d978b28a410..7f63a9381f71 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -175,7 +175,7 @@ static bool sane_reclaim(struct scan_control *sc)
175 if (!memcg) 175 if (!memcg)
176 return true; 176 return true;
177#ifdef CONFIG_CGROUP_WRITEBACK 177#ifdef CONFIG_CGROUP_WRITEBACK
178 if (memcg->css.cgroup) 178 if (cgroup_on_dfl(memcg->css.cgroup))
179 return true; 179 return true;
180#endif 180#endif
181 return false; 181 return false;
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 17e55dfecbe2..e07f551a863c 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -317,6 +317,9 @@ static int clip_constructor(struct neighbour *neigh)
317 317
318static int clip_encap(struct atm_vcc *vcc, int mode) 318static int clip_encap(struct atm_vcc *vcc, int mode)
319{ 319{
320 if (!CLIP_VCC(vcc))
321 return -EBADFD;
322
320 CLIP_VCC(vcc)->encap = mode; 323 CLIP_VCC(vcc)->encap = mode;
321 return 0; 324 return 0;
322} 325}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 4d56e593faad..25644e1bc479 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2311,12 +2311,6 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
2311 if (!conn) 2311 if (!conn)
2312 return 1; 2312 return 1;
2313 2313
2314 chan = conn->smp;
2315 if (!chan) {
2316 BT_ERR("SMP security requested but not available");
2317 return 1;
2318 }
2319
2320 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) 2314 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
2321 return 1; 2315 return 1;
2322 2316
@@ -2330,6 +2324,12 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
2330 if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) 2324 if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
2331 return 0; 2325 return 0;
2332 2326
2327 chan = conn->smp;
2328 if (!chan) {
2329 BT_ERR("SMP security requested but not available");
2330 return 1;
2331 }
2332
2333 l2cap_chan_lock(chan); 2333 l2cap_chan_lock(chan);
2334 2334
2335 /* If SMP is already in progress ignore this request */ 2335 /* If SMP is already in progress ignore this request */
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index b4d858a18eb6..03661d97463c 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1006,7 +1006,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1006 1006
1007 ih = igmpv3_report_hdr(skb); 1007 ih = igmpv3_report_hdr(skb);
1008 num = ntohs(ih->ngrec); 1008 num = ntohs(ih->ngrec);
1009 len = sizeof(*ih); 1009 len = skb_transport_offset(skb) + sizeof(*ih);
1010 1010
1011 for (i = 0; i < num; i++) { 1011 for (i = 0; i < num; i++) {
1012 len += sizeof(*grec); 1012 len += sizeof(*grec);
@@ -1067,7 +1067,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1067 1067
1068 icmp6h = icmp6_hdr(skb); 1068 icmp6h = icmp6_hdr(skb);
1069 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 1069 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1070 len = sizeof(*icmp6h); 1070 len = skb_transport_offset(skb) + sizeof(*icmp6h);
1071 1071
1072 for (i = 0; i < num; i++) { 1072 for (i = 0; i < num; i++) {
1073 __be16 *nsrcs, _nsrcs; 1073 __be16 *nsrcs, _nsrcs;
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 69a4d30a9ccf..54a00d66509e 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -357,6 +357,7 @@ ceph_parse_options(char *options, const char *dev_name,
357 opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT; 357 opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
358 opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; 358 opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT;
359 opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; 359 opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT;
360 opt->monc_ping_timeout = CEPH_MONC_PING_TIMEOUT_DEFAULT;
360 361
361 /* get mon ip(s) */ 362 /* get mon ip(s) */
362 /* ip1[:port1][,ip2[:port2]...] */ 363 /* ip1[:port1][,ip2[:port2]...] */
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 790fe89d90c0..4440edcce0d6 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -79,10 +79,6 @@ int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
79 return 0; 79 return 0;
80} 80}
81 81
82
83
84#define AES_KEY_SIZE 16
85
86static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void) 82static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
87{ 83{
88 return crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); 84 return crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index e3be1d22a247..b9b0e3b5da49 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -163,6 +163,7 @@ static struct kmem_cache *ceph_msg_data_cache;
163static char tag_msg = CEPH_MSGR_TAG_MSG; 163static char tag_msg = CEPH_MSGR_TAG_MSG;
164static char tag_ack = CEPH_MSGR_TAG_ACK; 164static char tag_ack = CEPH_MSGR_TAG_ACK;
165static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; 165static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
166static char tag_keepalive2 = CEPH_MSGR_TAG_KEEPALIVE2;
166 167
167#ifdef CONFIG_LOCKDEP 168#ifdef CONFIG_LOCKDEP
168static struct lock_class_key socket_class; 169static struct lock_class_key socket_class;
@@ -176,7 +177,7 @@ static struct lock_class_key socket_class;
176 177
177static void queue_con(struct ceph_connection *con); 178static void queue_con(struct ceph_connection *con);
178static void cancel_con(struct ceph_connection *con); 179static void cancel_con(struct ceph_connection *con);
179static void con_work(struct work_struct *); 180static void ceph_con_workfn(struct work_struct *);
180static void con_fault(struct ceph_connection *con); 181static void con_fault(struct ceph_connection *con);
181 182
182/* 183/*
@@ -276,22 +277,22 @@ static void _ceph_msgr_exit(void)
276 ceph_msgr_wq = NULL; 277 ceph_msgr_wq = NULL;
277 } 278 }
278 279
279 ceph_msgr_slab_exit();
280
281 BUG_ON(zero_page == NULL); 280 BUG_ON(zero_page == NULL);
282 page_cache_release(zero_page); 281 page_cache_release(zero_page);
283 zero_page = NULL; 282 zero_page = NULL;
283
284 ceph_msgr_slab_exit();
284} 285}
285 286
286int ceph_msgr_init(void) 287int ceph_msgr_init(void)
287{ 288{
289 if (ceph_msgr_slab_init())
290 return -ENOMEM;
291
288 BUG_ON(zero_page != NULL); 292 BUG_ON(zero_page != NULL);
289 zero_page = ZERO_PAGE(0); 293 zero_page = ZERO_PAGE(0);
290 page_cache_get(zero_page); 294 page_cache_get(zero_page);
291 295
292 if (ceph_msgr_slab_init())
293 return -ENOMEM;
294
295 /* 296 /*
296 * The number of active work items is limited by the number of 297 * The number of active work items is limited by the number of
297 * connections, so leave @max_active at default. 298 * connections, so leave @max_active at default.
@@ -749,7 +750,7 @@ void ceph_con_init(struct ceph_connection *con, void *private,
749 mutex_init(&con->mutex); 750 mutex_init(&con->mutex);
750 INIT_LIST_HEAD(&con->out_queue); 751 INIT_LIST_HEAD(&con->out_queue);
751 INIT_LIST_HEAD(&con->out_sent); 752 INIT_LIST_HEAD(&con->out_sent);
752 INIT_DELAYED_WORK(&con->work, con_work); 753 INIT_DELAYED_WORK(&con->work, ceph_con_workfn);
753 754
754 con->state = CON_STATE_CLOSED; 755 con->state = CON_STATE_CLOSED;
755} 756}
@@ -1351,7 +1352,16 @@ static void prepare_write_keepalive(struct ceph_connection *con)
1351{ 1352{
1352 dout("prepare_write_keepalive %p\n", con); 1353 dout("prepare_write_keepalive %p\n", con);
1353 con_out_kvec_reset(con); 1354 con_out_kvec_reset(con);
1354 con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive); 1355 if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) {
1356 struct timespec now = CURRENT_TIME;
1357
1358 con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2);
1359 ceph_encode_timespec(&con->out_temp_keepalive2, &now);
1360 con_out_kvec_add(con, sizeof(con->out_temp_keepalive2),
1361 &con->out_temp_keepalive2);
1362 } else {
1363 con_out_kvec_add(con, sizeof(tag_keepalive), &tag_keepalive);
1364 }
1355 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1365 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1356} 1366}
1357 1367
@@ -1625,6 +1635,12 @@ static void prepare_read_tag(struct ceph_connection *con)
1625 con->in_tag = CEPH_MSGR_TAG_READY; 1635 con->in_tag = CEPH_MSGR_TAG_READY;
1626} 1636}
1627 1637
1638static void prepare_read_keepalive_ack(struct ceph_connection *con)
1639{
1640 dout("prepare_read_keepalive_ack %p\n", con);
1641 con->in_base_pos = 0;
1642}
1643
1628/* 1644/*
1629 * Prepare to read a message. 1645 * Prepare to read a message.
1630 */ 1646 */
@@ -2322,13 +2338,6 @@ static int read_partial_message(struct ceph_connection *con)
2322 return ret; 2338 return ret;
2323 2339
2324 BUG_ON(!con->in_msg ^ skip); 2340 BUG_ON(!con->in_msg ^ skip);
2325 if (con->in_msg && data_len > con->in_msg->data_length) {
2326 pr_warn("%s skipping long message (%u > %zd)\n",
2327 __func__, data_len, con->in_msg->data_length);
2328 ceph_msg_put(con->in_msg);
2329 con->in_msg = NULL;
2330 skip = 1;
2331 }
2332 if (skip) { 2341 if (skip) {
2333 /* skip this message */ 2342 /* skip this message */
2334 dout("alloc_msg said skip message\n"); 2343 dout("alloc_msg said skip message\n");
@@ -2457,6 +2466,17 @@ static void process_message(struct ceph_connection *con)
2457 mutex_lock(&con->mutex); 2466 mutex_lock(&con->mutex);
2458} 2467}
2459 2468
2469static int read_keepalive_ack(struct ceph_connection *con)
2470{
2471 struct ceph_timespec ceph_ts;
2472 size_t size = sizeof(ceph_ts);
2473 int ret = read_partial(con, size, size, &ceph_ts);
2474 if (ret <= 0)
2475 return ret;
2476 ceph_decode_timespec(&con->last_keepalive_ack, &ceph_ts);
2477 prepare_read_tag(con);
2478 return 1;
2479}
2460 2480
2461/* 2481/*
2462 * Write something to the socket. Called in a worker thread when the 2482 * Write something to the socket. Called in a worker thread when the
@@ -2526,6 +2546,10 @@ more_kvec:
2526 2546
2527do_next: 2547do_next:
2528 if (con->state == CON_STATE_OPEN) { 2548 if (con->state == CON_STATE_OPEN) {
2549 if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) {
2550 prepare_write_keepalive(con);
2551 goto more;
2552 }
2529 /* is anything else pending? */ 2553 /* is anything else pending? */
2530 if (!list_empty(&con->out_queue)) { 2554 if (!list_empty(&con->out_queue)) {
2531 prepare_write_message(con); 2555 prepare_write_message(con);
@@ -2535,10 +2559,6 @@ do_next:
2535 prepare_write_ack(con); 2559 prepare_write_ack(con);
2536 goto more; 2560 goto more;
2537 } 2561 }
2538 if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) {
2539 prepare_write_keepalive(con);
2540 goto more;
2541 }
2542 } 2562 }
2543 2563
2544 /* Nothing to do! */ 2564 /* Nothing to do! */
@@ -2641,6 +2661,9 @@ more:
2641 case CEPH_MSGR_TAG_ACK: 2661 case CEPH_MSGR_TAG_ACK:
2642 prepare_read_ack(con); 2662 prepare_read_ack(con);
2643 break; 2663 break;
2664 case CEPH_MSGR_TAG_KEEPALIVE2_ACK:
2665 prepare_read_keepalive_ack(con);
2666 break;
2644 case CEPH_MSGR_TAG_CLOSE: 2667 case CEPH_MSGR_TAG_CLOSE:
2645 con_close_socket(con); 2668 con_close_socket(con);
2646 con->state = CON_STATE_CLOSED; 2669 con->state = CON_STATE_CLOSED;
@@ -2684,6 +2707,12 @@ more:
2684 process_ack(con); 2707 process_ack(con);
2685 goto more; 2708 goto more;
2686 } 2709 }
2710 if (con->in_tag == CEPH_MSGR_TAG_KEEPALIVE2_ACK) {
2711 ret = read_keepalive_ack(con);
2712 if (ret <= 0)
2713 goto out;
2714 goto more;
2715 }
2687 2716
2688out: 2717out:
2689 dout("try_read done on %p ret %d\n", con, ret); 2718 dout("try_read done on %p ret %d\n", con, ret);
@@ -2799,7 +2828,7 @@ static void con_fault_finish(struct ceph_connection *con)
2799/* 2828/*
2800 * Do some work on a connection. Drop a connection ref when we're done. 2829 * Do some work on a connection. Drop a connection ref when we're done.
2801 */ 2830 */
2802static void con_work(struct work_struct *work) 2831static void ceph_con_workfn(struct work_struct *work)
2803{ 2832{
2804 struct ceph_connection *con = container_of(work, struct ceph_connection, 2833 struct ceph_connection *con = container_of(work, struct ceph_connection,
2805 work.work); 2834 work.work);
@@ -3101,6 +3130,20 @@ void ceph_con_keepalive(struct ceph_connection *con)
3101} 3130}
3102EXPORT_SYMBOL(ceph_con_keepalive); 3131EXPORT_SYMBOL(ceph_con_keepalive);
3103 3132
3133bool ceph_con_keepalive_expired(struct ceph_connection *con,
3134 unsigned long interval)
3135{
3136 if (interval > 0 &&
3137 (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) {
3138 struct timespec now = CURRENT_TIME;
3139 struct timespec ts;
3140 jiffies_to_timespec(interval, &ts);
3141 ts = timespec_add(con->last_keepalive_ack, ts);
3142 return timespec_compare(&now, &ts) >= 0;
3143 }
3144 return false;
3145}
3146
3104static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type) 3147static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type)
3105{ 3148{
3106 struct ceph_msg_data *data; 3149 struct ceph_msg_data *data;
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 9d6ff1215928..edda01626a45 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -149,6 +149,10 @@ static int __open_session(struct ceph_mon_client *monc)
149 CEPH_ENTITY_TYPE_MON, monc->cur_mon, 149 CEPH_ENTITY_TYPE_MON, monc->cur_mon,
150 &monc->monmap->mon_inst[monc->cur_mon].addr); 150 &monc->monmap->mon_inst[monc->cur_mon].addr);
151 151
152 /* send an initial keepalive to ensure our timestamp is
153 * valid by the time we are in an OPENED state */
154 ceph_con_keepalive(&monc->con);
155
152 /* initiatiate authentication handshake */ 156 /* initiatiate authentication handshake */
153 ret = ceph_auth_build_hello(monc->auth, 157 ret = ceph_auth_build_hello(monc->auth,
154 monc->m_auth->front.iov_base, 158 monc->m_auth->front.iov_base,
@@ -170,14 +174,19 @@ static bool __sub_expired(struct ceph_mon_client *monc)
170 */ 174 */
171static void __schedule_delayed(struct ceph_mon_client *monc) 175static void __schedule_delayed(struct ceph_mon_client *monc)
172{ 176{
173 unsigned int delay; 177 struct ceph_options *opt = monc->client->options;
178 unsigned long delay;
174 179
175 if (monc->cur_mon < 0 || __sub_expired(monc)) 180 if (monc->cur_mon < 0 || __sub_expired(monc)) {
176 delay = 10 * HZ; 181 delay = 10 * HZ;
177 else 182 } else {
178 delay = 20 * HZ; 183 delay = 20 * HZ;
179 dout("__schedule_delayed after %u\n", delay); 184 if (opt->monc_ping_timeout > 0)
180 schedule_delayed_work(&monc->delayed_work, delay); 185 delay = min(delay, opt->monc_ping_timeout / 3);
186 }
187 dout("__schedule_delayed after %lu\n", delay);
188 schedule_delayed_work(&monc->delayed_work,
189 round_jiffies_relative(delay));
181} 190}
182 191
183/* 192/*
@@ -743,11 +752,23 @@ static void delayed_work(struct work_struct *work)
743 __close_session(monc); 752 __close_session(monc);
744 __open_session(monc); /* continue hunting */ 753 __open_session(monc); /* continue hunting */
745 } else { 754 } else {
746 ceph_con_keepalive(&monc->con); 755 struct ceph_options *opt = monc->client->options;
756 int is_auth = ceph_auth_is_authenticated(monc->auth);
757 if (ceph_con_keepalive_expired(&monc->con,
758 opt->monc_ping_timeout)) {
759 dout("monc keepalive timeout\n");
760 is_auth = 0;
761 __close_session(monc);
762 monc->hunting = true;
763 __open_session(monc);
764 }
747 765
748 __validate_auth(monc); 766 if (!monc->hunting) {
767 ceph_con_keepalive(&monc->con);
768 __validate_auth(monc);
769 }
749 770
750 if (ceph_auth_is_authenticated(monc->auth)) 771 if (is_auth)
751 __send_subscribe(monc); 772 __send_subscribe(monc);
752 } 773 }
753 __schedule_delayed(monc); 774 __schedule_delayed(monc);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 50033677c0fa..80b94e37c94a 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -2817,8 +2817,9 @@ out:
2817} 2817}
2818 2818
2819/* 2819/*
2820 * lookup and return message for incoming reply. set up reply message 2820 * Lookup and return message for incoming reply. Don't try to do
2821 * pages. 2821 * anything about a larger than preallocated data portion of the
2822 * message at the moment - for now, just skip the message.
2822 */ 2823 */
2823static struct ceph_msg *get_reply(struct ceph_connection *con, 2824static struct ceph_msg *get_reply(struct ceph_connection *con,
2824 struct ceph_msg_header *hdr, 2825 struct ceph_msg_header *hdr,
@@ -2836,10 +2837,10 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
2836 mutex_lock(&osdc->request_mutex); 2837 mutex_lock(&osdc->request_mutex);
2837 req = __lookup_request(osdc, tid); 2838 req = __lookup_request(osdc, tid);
2838 if (!req) { 2839 if (!req) {
2839 *skip = 1; 2840 pr_warn("%s osd%d tid %llu unknown, skipping\n",
2841 __func__, osd->o_osd, tid);
2840 m = NULL; 2842 m = NULL;
2841 dout("get_reply unknown tid %llu from osd%d\n", tid, 2843 *skip = 1;
2842 osd->o_osd);
2843 goto out; 2844 goto out;
2844 } 2845 }
2845 2846
@@ -2849,10 +2850,9 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
2849 ceph_msg_revoke_incoming(req->r_reply); 2850 ceph_msg_revoke_incoming(req->r_reply);
2850 2851
2851 if (front_len > req->r_reply->front_alloc_len) { 2852 if (front_len > req->r_reply->front_alloc_len) {
2852 pr_warn("get_reply front %d > preallocated %d (%u#%llu)\n", 2853 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
2853 front_len, req->r_reply->front_alloc_len, 2854 __func__, osd->o_osd, req->r_tid, front_len,
2854 (unsigned int)con->peer_name.type, 2855 req->r_reply->front_alloc_len);
2855 le64_to_cpu(con->peer_name.num));
2856 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS, 2856 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
2857 false); 2857 false);
2858 if (!m) 2858 if (!m)
@@ -2860,37 +2860,22 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
2860 ceph_msg_put(req->r_reply); 2860 ceph_msg_put(req->r_reply);
2861 req->r_reply = m; 2861 req->r_reply = m;
2862 } 2862 }
2863 m = ceph_msg_get(req->r_reply);
2864
2865 if (data_len > 0) {
2866 struct ceph_osd_data *osd_data;
2867 2863
2868 /* 2864 if (data_len > req->r_reply->data_length) {
2869 * XXX This is assuming there is only one op containing 2865 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
2870 * XXX page data. Probably OK for reads, but this 2866 __func__, osd->o_osd, req->r_tid, data_len,
2871 * XXX ought to be done more generally. 2867 req->r_reply->data_length);
2872 */ 2868 m = NULL;
2873 osd_data = osd_req_op_extent_osd_data(req, 0); 2869 *skip = 1;
2874 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { 2870 goto out;
2875 if (osd_data->pages &&
2876 unlikely(osd_data->length < data_len)) {
2877
2878 pr_warn("tid %lld reply has %d bytes we had only %llu bytes ready\n",
2879 tid, data_len, osd_data->length);
2880 *skip = 1;
2881 ceph_msg_put(m);
2882 m = NULL;
2883 goto out;
2884 }
2885 }
2886 } 2871 }
2887 *skip = 0; 2872
2873 m = ceph_msg_get(req->r_reply);
2888 dout("get_reply tid %lld %p\n", tid, m); 2874 dout("get_reply tid %lld %p\n", tid, m);
2889 2875
2890out: 2876out:
2891 mutex_unlock(&osdc->request_mutex); 2877 mutex_unlock(&osdc->request_mutex);
2892 return m; 2878 return m;
2893
2894} 2879}
2895 2880
2896static struct ceph_msg *alloc_msg(struct ceph_connection *con, 2881static struct ceph_msg *alloc_msg(struct ceph_connection *con,
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 4a3125836b64..7d8f581d9f1f 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -1300,7 +1300,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
1300 ceph_decode_addr(&addr); 1300 ceph_decode_addr(&addr);
1301 pr_info("osd%d up\n", osd); 1301 pr_info("osd%d up\n", osd);
1302 BUG_ON(osd >= map->max_osd); 1302 BUG_ON(osd >= map->max_osd);
1303 map->osd_state[osd] |= CEPH_OSD_UP; 1303 map->osd_state[osd] |= CEPH_OSD_UP | CEPH_OSD_EXISTS;
1304 map->osd_addr[osd] = addr; 1304 map->osd_addr[osd] = addr;
1305 } 1305 }
1306 1306
diff --git a/net/core/dev.c b/net/core/dev.c
index 464c22b6261a..323c04edd779 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4723,6 +4723,8 @@ void napi_disable(struct napi_struct *n)
4723 4723
4724 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) 4724 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
4725 msleep(1); 4725 msleep(1);
4726 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
4727 msleep(1);
4726 4728
4727 hrtimer_cancel(&n->timer); 4729 hrtimer_cancel(&n->timer);
4728 4730
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index bf77e3639ce0..365de66436ac 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -631,15 +631,17 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
631{ 631{
632 int idx = 0; 632 int idx = 0;
633 struct fib_rule *rule; 633 struct fib_rule *rule;
634 int err = 0;
634 635
635 rcu_read_lock(); 636 rcu_read_lock();
636 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 637 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
637 if (idx < cb->args[1]) 638 if (idx < cb->args[1])
638 goto skip; 639 goto skip;
639 640
640 if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, 641 err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
641 cb->nlh->nlmsg_seq, RTM_NEWRULE, 642 cb->nlh->nlmsg_seq, RTM_NEWRULE,
642 NLM_F_MULTI, ops) < 0) 643 NLM_F_MULTI, ops);
644 if (err)
643 break; 645 break;
644skip: 646skip:
645 idx++; 647 idx++;
@@ -648,7 +650,7 @@ skip:
648 cb->args[1] = idx; 650 cb->args[1] = idx;
649 rules_ops_put(ops); 651 rules_ops_put(ops);
650 652
651 return skb->len; 653 return err;
652} 654}
653 655
654static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) 656static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
@@ -664,7 +666,9 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
664 if (ops == NULL) 666 if (ops == NULL)
665 return -EAFNOSUPPORT; 667 return -EAFNOSUPPORT;
666 668
667 return dump_rules(skb, cb, ops); 669 dump_rules(skb, cb, ops);
670
671 return skb->len;
668 } 672 }
669 673
670 rcu_read_lock(); 674 rcu_read_lock();
diff --git a/net/core/filter.c b/net/core/filter.c
index 96bd962c292d..60e3fe7c59c0 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -478,9 +478,9 @@ do_pass:
478 bpf_src = BPF_X; 478 bpf_src = BPF_X;
479 } else { 479 } else {
480 insn->dst_reg = BPF_REG_A; 480 insn->dst_reg = BPF_REG_A;
481 insn->src_reg = BPF_REG_X;
482 insn->imm = fp->k; 481 insn->imm = fp->k;
483 bpf_src = BPF_SRC(fp->code); 482 bpf_src = BPF_SRC(fp->code);
483 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
484 } 484 }
485 485
486 /* Common case where 'jump_false' is next insn. */ 486 /* Common case where 'jump_false' is next insn. */
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 49b599062af1..b4c530065106 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1478,6 +1478,15 @@ static int of_dev_node_match(struct device *dev, const void *data)
1478 return ret == 0 ? dev->of_node == data : ret; 1478 return ret == 0 ? dev->of_node == data : ret;
1479} 1479}
1480 1480
1481/*
1482 * of_find_net_device_by_node - lookup the net device for the device node
1483 * @np: OF device node
1484 *
1485 * Looks up the net_device structure corresponding with the device node.
1486 * If successful, returns a pointer to the net_device with the embedded
1487 * struct device refcount incremented by one, or NULL on failure. The
1488 * refcount must be dropped when done with the net_device.
1489 */
1481struct net_device *of_find_net_device_by_node(struct device_node *np) 1490struct net_device *of_find_net_device_by_node(struct device_node *np)
1482{ 1491{
1483 struct device *dev; 1492 struct device *dev;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 6aa3db8dfc3b..8bdada242a7d 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -142,7 +142,7 @@ static void queue_process(struct work_struct *work)
142 */ 142 */
143static int poll_one_napi(struct napi_struct *napi, int budget) 143static int poll_one_napi(struct napi_struct *napi, int budget)
144{ 144{
145 int work; 145 int work = 0;
146 146
147 /* net_rx_action's ->poll() invocations and our's are 147 /* net_rx_action's ->poll() invocations and our's are
148 * synchronized by this test which is only made while 148 * synchronized by this test which is only made while
@@ -151,7 +151,12 @@ static int poll_one_napi(struct napi_struct *napi, int budget)
151 if (!test_bit(NAPI_STATE_SCHED, &napi->state)) 151 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
152 return budget; 152 return budget;
153 153
154 set_bit(NAPI_STATE_NPSVC, &napi->state); 154 /* If we set this bit but see that it has already been set,
155 * that indicates that napi has been disabled and we need
156 * to abort this operation
157 */
158 if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
159 goto out;
155 160
156 work = napi->poll(napi, budget); 161 work = napi->poll(napi, budget);
157 WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll); 162 WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll);
@@ -159,6 +164,7 @@ static int poll_one_napi(struct napi_struct *napi, int budget)
159 164
160 clear_bit(NAPI_STATE_NPSVC, &napi->state); 165 clear_bit(NAPI_STATE_NPSVC, &napi->state);
161 166
167out:
162 return budget - work; 168 return budget - work;
163} 169}
164 170
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index e5452296ec2f..474a6da3b51a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3047,6 +3047,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3047 u32 portid = NETLINK_CB(cb->skb).portid; 3047 u32 portid = NETLINK_CB(cb->skb).portid;
3048 u32 seq = cb->nlh->nlmsg_seq; 3048 u32 seq = cb->nlh->nlmsg_seq;
3049 u32 filter_mask = 0; 3049 u32 filter_mask = 0;
3050 int err;
3050 3051
3051 if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) { 3052 if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
3052 struct nlattr *extfilt; 3053 struct nlattr *extfilt;
@@ -3067,20 +3068,25 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3067 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 3068 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3068 3069
3069 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { 3070 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
3070 if (idx >= cb->args[0] && 3071 if (idx >= cb->args[0]) {
3071 br_dev->netdev_ops->ndo_bridge_getlink( 3072 err = br_dev->netdev_ops->ndo_bridge_getlink(
3072 skb, portid, seq, dev, filter_mask, 3073 skb, portid, seq, dev,
3073 NLM_F_MULTI) < 0) 3074 filter_mask, NLM_F_MULTI);
3074 break; 3075 if (err < 0 && err != -EOPNOTSUPP)
3076 break;
3077 }
3075 idx++; 3078 idx++;
3076 } 3079 }
3077 3080
3078 if (ops->ndo_bridge_getlink) { 3081 if (ops->ndo_bridge_getlink) {
3079 if (idx >= cb->args[0] && 3082 if (idx >= cb->args[0]) {
3080 ops->ndo_bridge_getlink(skb, portid, seq, dev, 3083 err = ops->ndo_bridge_getlink(skb, portid,
3081 filter_mask, 3084 seq, dev,
3082 NLM_F_MULTI) < 0) 3085 filter_mask,
3083 break; 3086 NLM_F_MULTI);
3087 if (err < 0 && err != -EOPNOTSUPP)
3088 break;
3089 }
3084 idx++; 3090 idx++;
3085 } 3091 }
3086 } 3092 }
diff --git a/net/core/sock.c b/net/core/sock.c
index ca2984afe16e..3307c02244d3 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2740,10 +2740,8 @@ static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
2740 return; 2740 return;
2741 kfree(rsk_prot->slab_name); 2741 kfree(rsk_prot->slab_name);
2742 rsk_prot->slab_name = NULL; 2742 rsk_prot->slab_name = NULL;
2743 if (rsk_prot->slab) { 2743 kmem_cache_destroy(rsk_prot->slab);
2744 kmem_cache_destroy(rsk_prot->slab); 2744 rsk_prot->slab = NULL;
2745 rsk_prot->slab = NULL;
2746 }
2747} 2745}
2748 2746
2749static int req_prot_init(const struct proto *prot) 2747static int req_prot_init(const struct proto *prot)
@@ -2828,10 +2826,8 @@ void proto_unregister(struct proto *prot)
2828 list_del(&prot->node); 2826 list_del(&prot->node);
2829 mutex_unlock(&proto_list_mutex); 2827 mutex_unlock(&proto_list_mutex);
2830 2828
2831 if (prot->slab != NULL) { 2829 kmem_cache_destroy(prot->slab);
2832 kmem_cache_destroy(prot->slab); 2830 prot->slab = NULL;
2833 prot->slab = NULL;
2834 }
2835 2831
2836 req_prot_cleanup(prot->rsk_prot); 2832 req_prot_cleanup(prot->rsk_prot);
2837 2833
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index bd9e718c2a20..3de0d0362d7f 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -398,12 +398,8 @@ out_err:
398 398
399void dccp_ackvec_exit(void) 399void dccp_ackvec_exit(void)
400{ 400{
401 if (dccp_ackvec_slab != NULL) { 401 kmem_cache_destroy(dccp_ackvec_slab);
402 kmem_cache_destroy(dccp_ackvec_slab); 402 dccp_ackvec_slab = NULL;
403 dccp_ackvec_slab = NULL; 403 kmem_cache_destroy(dccp_ackvec_record_slab);
404 } 404 dccp_ackvec_record_slab = NULL;
405 if (dccp_ackvec_record_slab != NULL) {
406 kmem_cache_destroy(dccp_ackvec_record_slab);
407 dccp_ackvec_record_slab = NULL;
408 }
409} 405}
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 83498975165f..90f77d08cc37 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -95,8 +95,7 @@ static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_f
95 95
96static void ccid_kmem_cache_destroy(struct kmem_cache *slab) 96static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
97{ 97{
98 if (slab != NULL) 98 kmem_cache_destroy(slab);
99 kmem_cache_destroy(slab);
100} 99}
101 100
102static int __init ccid_activate(struct ccid_operations *ccid_ops) 101static int __init ccid_activate(struct ccid_operations *ccid_ops)
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 30addee2dd03..838f524cf11a 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -48,8 +48,6 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
48 tw->tw_ipv6only = sk->sk_ipv6only; 48 tw->tw_ipv6only = sk->sk_ipv6only;
49 } 49 }
50#endif 50#endif
51 /* Linkage updates. */
52 __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
53 51
54 /* Get the TIME_WAIT timeout firing. */ 52 /* Get the TIME_WAIT timeout firing. */
55 if (timeo < rto) 53 if (timeo < rto)
@@ -60,6 +58,8 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
60 timeo = DCCP_TIMEWAIT_LEN; 58 timeo = DCCP_TIMEWAIT_LEN;
61 59
62 inet_twsk_schedule(tw, timeo); 60 inet_twsk_schedule(tw, timeo);
61 /* Linkage updates. */
62 __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
63 inet_twsk_put(tw); 63 inet_twsk_put(tw);
64 } else { 64 } else {
65 /* Sorry, if we're out of memory, just CLOSE this 65 /* Sorry, if we're out of memory, just CLOSE this
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 76e3800765f8..c59fa5d9c22c 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -634,6 +634,10 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
634 port_index++; 634 port_index++;
635 } 635 }
636 kfree(pd->chip[i].rtable); 636 kfree(pd->chip[i].rtable);
637
638 /* Drop our reference to the MDIO bus device */
639 if (pd->chip[i].host_dev)
640 put_device(pd->chip[i].host_dev);
637 } 641 }
638 kfree(pd->chip); 642 kfree(pd->chip);
639} 643}
@@ -661,16 +665,22 @@ static int dsa_of_probe(struct device *dev)
661 return -EPROBE_DEFER; 665 return -EPROBE_DEFER;
662 666
663 ethernet = of_parse_phandle(np, "dsa,ethernet", 0); 667 ethernet = of_parse_phandle(np, "dsa,ethernet", 0);
664 if (!ethernet) 668 if (!ethernet) {
665 return -EINVAL; 669 ret = -EINVAL;
670 goto out_put_mdio;
671 }
666 672
667 ethernet_dev = of_find_net_device_by_node(ethernet); 673 ethernet_dev = of_find_net_device_by_node(ethernet);
668 if (!ethernet_dev) 674 if (!ethernet_dev) {
669 return -EPROBE_DEFER; 675 ret = -EPROBE_DEFER;
676 goto out_put_mdio;
677 }
670 678
671 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 679 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
672 if (!pd) 680 if (!pd) {
673 return -ENOMEM; 681 ret = -ENOMEM;
682 goto out_put_ethernet;
683 }
674 684
675 dev->platform_data = pd; 685 dev->platform_data = pd;
676 pd->of_netdev = ethernet_dev; 686 pd->of_netdev = ethernet_dev;
@@ -691,7 +701,9 @@ static int dsa_of_probe(struct device *dev)
691 cd = &pd->chip[chip_index]; 701 cd = &pd->chip[chip_index];
692 702
693 cd->of_node = child; 703 cd->of_node = child;
694 cd->host_dev = &mdio_bus->dev; 704
705 /* When assigning the host device, increment its refcount */
706 cd->host_dev = get_device(&mdio_bus->dev);
695 707
696 sw_addr = of_get_property(child, "reg", NULL); 708 sw_addr = of_get_property(child, "reg", NULL);
697 if (!sw_addr) 709 if (!sw_addr)
@@ -711,6 +723,12 @@ static int dsa_of_probe(struct device *dev)
711 ret = -EPROBE_DEFER; 723 ret = -EPROBE_DEFER;
712 goto out_free_chip; 724 goto out_free_chip;
713 } 725 }
726
727 /* Drop the mdio_bus device ref, replacing the host
728 * device with the mdio_bus_switch device, keeping
729 * the refcount from of_mdio_find_bus() above.
730 */
731 put_device(cd->host_dev);
714 cd->host_dev = &mdio_bus_switch->dev; 732 cd->host_dev = &mdio_bus_switch->dev;
715 } 733 }
716 734
@@ -744,6 +762,10 @@ static int dsa_of_probe(struct device *dev)
744 } 762 }
745 } 763 }
746 764
765 /* The individual chips hold their own refcount on the mdio bus,
766 * so drop ours */
767 put_device(&mdio_bus->dev);
768
747 return 0; 769 return 0;
748 770
749out_free_chip: 771out_free_chip:
@@ -751,6 +773,10 @@ out_free_chip:
751out_free: 773out_free:
752 kfree(pd); 774 kfree(pd);
753 dev->platform_data = NULL; 775 dev->platform_data = NULL;
776out_put_ethernet:
777 put_device(&ethernet_dev->dev);
778out_put_mdio:
779 put_device(&mdio_bus->dev);
754 return ret; 780 return ret;
755} 781}
756 782
@@ -762,6 +788,7 @@ static void dsa_of_remove(struct device *dev)
762 return; 788 return;
763 789
764 dsa_of_free_platform_data(pd); 790 dsa_of_free_platform_data(pd);
791 put_device(&pd->of_netdev->dev);
765 kfree(pd); 792 kfree(pd);
766} 793}
767#else 794#else
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index d25efc93d8f1..b6ca0890d018 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -78,7 +78,7 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
78 78
79 trailer = skb_tail_pointer(skb) - 4; 79 trailer = skb_tail_pointer(skb) - 4;
80 if (trailer[0] != 0x80 || (trailer[1] & 0xf8) != 0x00 || 80 if (trailer[0] != 0x80 || (trailer[1] & 0xf8) != 0x00 ||
81 (trailer[3] & 0xef) != 0x00 || trailer[3] != 0x00) 81 (trailer[2] & 0xef) != 0x00 || trailer[3] != 0x00)
82 goto out_drop; 82 goto out_drop;
83 83
84 source_port = trailer[1] & 7; 84 source_port = trailer[1] & 7;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 61ff5ea31283..01308e6e6127 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -113,6 +113,8 @@
113#include <net/arp.h> 113#include <net/arp.h>
114#include <net/ax25.h> 114#include <net/ax25.h>
115#include <net/netrom.h> 115#include <net/netrom.h>
116#include <net/dst_metadata.h>
117#include <net/ip_tunnels.h>
116 118
117#include <linux/uaccess.h> 119#include <linux/uaccess.h>
118 120
@@ -296,7 +298,8 @@ static void arp_send_dst(int type, int ptype, __be32 dest_ip,
296 struct net_device *dev, __be32 src_ip, 298 struct net_device *dev, __be32 src_ip,
297 const unsigned char *dest_hw, 299 const unsigned char *dest_hw,
298 const unsigned char *src_hw, 300 const unsigned char *src_hw,
299 const unsigned char *target_hw, struct sk_buff *oskb) 301 const unsigned char *target_hw,
302 struct dst_entry *dst)
300{ 303{
301 struct sk_buff *skb; 304 struct sk_buff *skb;
302 305
@@ -309,9 +312,7 @@ static void arp_send_dst(int type, int ptype, __be32 dest_ip,
309 if (!skb) 312 if (!skb)
310 return; 313 return;
311 314
312 if (oskb) 315 skb_dst_set(skb, dst);
313 skb_dst_copy(skb, oskb);
314
315 arp_xmit(skb); 316 arp_xmit(skb);
316} 317}
317 318
@@ -333,6 +334,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
333 __be32 target = *(__be32 *)neigh->primary_key; 334 __be32 target = *(__be32 *)neigh->primary_key;
334 int probes = atomic_read(&neigh->probes); 335 int probes = atomic_read(&neigh->probes);
335 struct in_device *in_dev; 336 struct in_device *in_dev;
337 struct dst_entry *dst = NULL;
336 338
337 rcu_read_lock(); 339 rcu_read_lock();
338 in_dev = __in_dev_get_rcu(dev); 340 in_dev = __in_dev_get_rcu(dev);
@@ -381,9 +383,10 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
381 } 383 }
382 } 384 }
383 385
386 if (skb && !(dev->priv_flags & IFF_XMIT_DST_RELEASE))
387 dst = dst_clone(skb_dst(skb));
384 arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr, 388 arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
385 dst_hw, dev->dev_addr, NULL, 389 dst_hw, dev->dev_addr, NULL, dst);
386 dev->priv_flags & IFF_XMIT_DST_RELEASE ? NULL : skb);
387} 390}
388 391
389static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip) 392static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
@@ -654,6 +657,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
654 u16 dev_type = dev->type; 657 u16 dev_type = dev->type;
655 int addr_type; 658 int addr_type;
656 struct neighbour *n; 659 struct neighbour *n;
660 struct dst_entry *reply_dst = NULL;
657 bool is_garp = false; 661 bool is_garp = false;
658 662
659 /* arp_rcv below verifies the ARP header and verifies the device 663 /* arp_rcv below verifies the ARP header and verifies the device
@@ -754,13 +758,18 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
754 * cache. 758 * cache.
755 */ 759 */
756 760
761 if (arp->ar_op == htons(ARPOP_REQUEST) && skb_metadata_dst(skb))
762 reply_dst = (struct dst_entry *)
763 iptunnel_metadata_reply(skb_metadata_dst(skb),
764 GFP_ATOMIC);
765
757 /* Special case: IPv4 duplicate address detection packet (RFC2131) */ 766 /* Special case: IPv4 duplicate address detection packet (RFC2131) */
758 if (sip == 0) { 767 if (sip == 0) {
759 if (arp->ar_op == htons(ARPOP_REQUEST) && 768 if (arp->ar_op == htons(ARPOP_REQUEST) &&
760 inet_addr_type_dev_table(net, dev, tip) == RTN_LOCAL && 769 inet_addr_type_dev_table(net, dev, tip) == RTN_LOCAL &&
761 !arp_ignore(in_dev, sip, tip)) 770 !arp_ignore(in_dev, sip, tip))
762 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, 771 arp_send_dst(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip,
763 dev->dev_addr, sha); 772 sha, dev->dev_addr, sha, reply_dst);
764 goto out; 773 goto out;
765 } 774 }
766 775
@@ -779,9 +788,10 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
779 if (!dont_send) { 788 if (!dont_send) {
780 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 789 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
781 if (n) { 790 if (n) {
782 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, 791 arp_send_dst(ARPOP_REPLY, ETH_P_ARP,
783 dev, tip, sha, dev->dev_addr, 792 sip, dev, tip, sha,
784 sha); 793 dev->dev_addr, sha,
794 reply_dst);
785 neigh_release(n); 795 neigh_release(n);
786 } 796 }
787 } 797 }
@@ -799,9 +809,10 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
799 if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED || 809 if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED ||
800 skb->pkt_type == PACKET_HOST || 810 skb->pkt_type == PACKET_HOST ||
801 NEIGH_VAR(in_dev->arp_parms, PROXY_DELAY) == 0) { 811 NEIGH_VAR(in_dev->arp_parms, PROXY_DELAY) == 0) {
802 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, 812 arp_send_dst(ARPOP_REPLY, ETH_P_ARP,
803 dev, tip, sha, dev->dev_addr, 813 sip, dev, tip, sha,
804 sha); 814 dev->dev_addr, sha,
815 reply_dst);
805 } else { 816 } else {
806 pneigh_enqueue(&arp_tbl, 817 pneigh_enqueue(&arp_tbl,
807 in_dev->arp_parms, skb); 818 in_dev->arp_parms, skb);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 26d6ffb6d23c..6c2af797f2f9 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1426,7 +1426,7 @@ found:
1426 nh->nh_flags & RTNH_F_LINKDOWN && 1426 nh->nh_flags & RTNH_F_LINKDOWN &&
1427 !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) 1427 !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
1428 continue; 1428 continue;
1429 if (!(flp->flowi4_flags & FLOWI_FLAG_VRFSRC)) { 1429 if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) {
1430 if (flp->flowi4_oif && 1430 if (flp->flowi4_oif &&
1431 flp->flowi4_oif != nh->nh_oif) 1431 flp->flowi4_oif != nh->nh_oif)
1432 continue; 1432 continue;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 79fe05befcae..e5eb8ac4089d 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -427,7 +427,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
427 fl4.flowi4_mark = mark; 427 fl4.flowi4_mark = mark;
428 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); 428 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
429 fl4.flowi4_proto = IPPROTO_ICMP; 429 fl4.flowi4_proto = IPPROTO_ICMP;
430 fl4.flowi4_oif = vrf_master_ifindex(skb->dev) ? : skb->dev->ifindex; 430 fl4.flowi4_oif = vrf_master_ifindex(skb->dev);
431 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 431 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
432 rt = ip_route_output_key(net, &fl4); 432 rt = ip_route_output_key(net, &fl4);
433 if (IS_ERR(rt)) 433 if (IS_ERR(rt))
@@ -461,7 +461,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
461 fl4->flowi4_proto = IPPROTO_ICMP; 461 fl4->flowi4_proto = IPPROTO_ICMP;
462 fl4->fl4_icmp_type = type; 462 fl4->fl4_icmp_type = type;
463 fl4->fl4_icmp_code = code; 463 fl4->fl4_icmp_code = code;
464 fl4->flowi4_oif = vrf_master_ifindex(skb_in->dev) ? : skb_in->dev->ifindex; 464 fl4->flowi4_oif = vrf_master_ifindex(skb_in->dev);
465 465
466 security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); 466 security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
467 rt = __ip_route_output_key(net, fl4); 467 rt = __ip_route_output_key(net, fl4);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index bac205136e1c..ba2f90d90cb5 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -685,20 +685,20 @@ void reqsk_queue_hash_req(struct request_sock_queue *queue,
685 req->num_timeout = 0; 685 req->num_timeout = 0;
686 req->sk = NULL; 686 req->sk = NULL;
687 687
688 setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
689 mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
690 req->rsk_hash = hash;
691
688 /* before letting lookups find us, make sure all req fields 692 /* before letting lookups find us, make sure all req fields
689 * are committed to memory and refcnt initialized. 693 * are committed to memory and refcnt initialized.
690 */ 694 */
691 smp_wmb(); 695 smp_wmb();
692 atomic_set(&req->rsk_refcnt, 2); 696 atomic_set(&req->rsk_refcnt, 2);
693 setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
694 req->rsk_hash = hash;
695 697
696 spin_lock(&queue->syn_wait_lock); 698 spin_lock(&queue->syn_wait_lock);
697 req->dl_next = lopt->syn_table[hash]; 699 req->dl_next = lopt->syn_table[hash];
698 lopt->syn_table[hash] = req; 700 lopt->syn_table[hash] = req;
699 spin_unlock(&queue->syn_wait_lock); 701 spin_unlock(&queue->syn_wait_lock);
700
701 mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
702} 702}
703EXPORT_SYMBOL(reqsk_queue_hash_req); 703EXPORT_SYMBOL(reqsk_queue_hash_req);
704 704
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index ae22cc24fbe8..c67f9bd7699c 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -123,13 +123,15 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
123 /* 123 /*
124 * Step 2: Hash TW into tcp ehash chain. 124 * Step 2: Hash TW into tcp ehash chain.
125 * Notes : 125 * Notes :
126 * - tw_refcnt is set to 3 because : 126 * - tw_refcnt is set to 4 because :
127 * - We have one reference from bhash chain. 127 * - We have one reference from bhash chain.
128 * - We have one reference from ehash chain. 128 * - We have one reference from ehash chain.
129 * - We have one reference from timer.
130 * - One reference for ourself (our caller will release it).
129 * We can use atomic_set() because prior spin_lock()/spin_unlock() 131 * We can use atomic_set() because prior spin_lock()/spin_unlock()
130 * committed into memory all tw fields. 132 * committed into memory all tw fields.
131 */ 133 */
132 atomic_set(&tw->tw_refcnt, 1 + 1 + 1); 134 atomic_set(&tw->tw_refcnt, 4);
133 inet_twsk_add_node_rcu(tw, &ehead->chain); 135 inet_twsk_add_node_rcu(tw, &ehead->chain);
134 136
135 /* Step 3: Remove SK from hash chain */ 137 /* Step 3: Remove SK from hash chain */
@@ -217,7 +219,7 @@ void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
217} 219}
218EXPORT_SYMBOL(inet_twsk_deschedule_put); 220EXPORT_SYMBOL(inet_twsk_deschedule_put);
219 221
220void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo) 222void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
221{ 223{
222 /* timeout := RTO * 3.5 224 /* timeout := RTO * 3.5
223 * 225 *
@@ -245,12 +247,14 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
245 */ 247 */
246 248
247 tw->tw_kill = timeo <= 4*HZ; 249 tw->tw_kill = timeo <= 4*HZ;
248 if (!mod_timer_pinned(&tw->tw_timer, jiffies + timeo)) { 250 if (!rearm) {
249 atomic_inc(&tw->tw_refcnt); 251 BUG_ON(mod_timer_pinned(&tw->tw_timer, jiffies + timeo));
250 atomic_inc(&tw->tw_dr->tw_count); 252 atomic_inc(&tw->tw_dr->tw_count);
253 } else {
254 mod_timer_pending(&tw->tw_timer, jiffies + timeo);
251 } 255 }
252} 256}
253EXPORT_SYMBOL_GPL(inet_twsk_schedule); 257EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
254 258
255void inet_twsk_purge(struct inet_hashinfo *hashinfo, 259void inet_twsk_purge(struct inet_hashinfo *hashinfo,
256 struct inet_timewait_death_row *twdr, int family) 260 struct inet_timewait_death_row *twdr, int family)
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 29ed6c5a5185..84dce6a92f93 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -46,12 +46,13 @@
46#include <net/net_namespace.h> 46#include <net/net_namespace.h>
47#include <net/netns/generic.h> 47#include <net/netns/generic.h>
48#include <net/rtnetlink.h> 48#include <net/rtnetlink.h>
49#include <net/dst_metadata.h>
49 50
50int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, 51int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
51 __be32 src, __be32 dst, __u8 proto, 52 __be32 src, __be32 dst, __u8 proto,
52 __u8 tos, __u8 ttl, __be16 df, bool xnet) 53 __u8 tos, __u8 ttl, __be16 df, bool xnet)
53{ 54{
54 int pkt_len = skb->len; 55 int pkt_len = skb->len - skb_inner_network_offset(skb);
55 struct iphdr *iph; 56 struct iphdr *iph;
56 int err; 57 int err;
57 58
@@ -119,6 +120,33 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
119} 120}
120EXPORT_SYMBOL_GPL(iptunnel_pull_header); 121EXPORT_SYMBOL_GPL(iptunnel_pull_header);
121 122
123struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
124 gfp_t flags)
125{
126 struct metadata_dst *res;
127 struct ip_tunnel_info *dst, *src;
128
129 if (!md || md->u.tun_info.mode & IP_TUNNEL_INFO_TX)
130 return NULL;
131
132 res = metadata_dst_alloc(0, flags);
133 if (!res)
134 return NULL;
135
136 dst = &res->u.tun_info;
137 src = &md->u.tun_info;
138 dst->key.tun_id = src->key.tun_id;
139 if (src->mode & IP_TUNNEL_INFO_IPV6)
140 memcpy(&dst->key.u.ipv6.dst, &src->key.u.ipv6.src,
141 sizeof(struct in6_addr));
142 else
143 dst->key.u.ipv4.dst = src->key.u.ipv4.src;
144 dst->mode = src->mode | IP_TUNNEL_INFO_TX;
145
146 return res;
147}
148EXPORT_SYMBOL_GPL(iptunnel_metadata_reply);
149
122struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, 150struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
123 bool csum_help, 151 bool csum_help,
124 int gso_type_mask) 152 int gso_type_mask)
@@ -198,8 +226,6 @@ static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
198 [LWTUNNEL_IP_SRC] = { .type = NLA_U32 }, 226 [LWTUNNEL_IP_SRC] = { .type = NLA_U32 },
199 [LWTUNNEL_IP_TTL] = { .type = NLA_U8 }, 227 [LWTUNNEL_IP_TTL] = { .type = NLA_U8 },
200 [LWTUNNEL_IP_TOS] = { .type = NLA_U8 }, 228 [LWTUNNEL_IP_TOS] = { .type = NLA_U8 },
201 [LWTUNNEL_IP_SPORT] = { .type = NLA_U16 },
202 [LWTUNNEL_IP_DPORT] = { .type = NLA_U16 },
203 [LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 }, 229 [LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 },
204}; 230};
205 231
@@ -239,12 +265,6 @@ static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
239 if (tb[LWTUNNEL_IP_TOS]) 265 if (tb[LWTUNNEL_IP_TOS])
240 tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]); 266 tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
241 267
242 if (tb[LWTUNNEL_IP_SPORT])
243 tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP_SPORT]);
244
245 if (tb[LWTUNNEL_IP_DPORT])
246 tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP_DPORT]);
247
248 if (tb[LWTUNNEL_IP_FLAGS]) 268 if (tb[LWTUNNEL_IP_FLAGS])
249 tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP_FLAGS]); 269 tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP_FLAGS]);
250 270
@@ -266,8 +286,6 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb,
266 nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) || 286 nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
267 nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) || 287 nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
268 nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) || 288 nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
269 nla_put_u16(skb, LWTUNNEL_IP_SPORT, tun_info->key.tp_src) ||
270 nla_put_u16(skb, LWTUNNEL_IP_DPORT, tun_info->key.tp_dst) ||
271 nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags)) 289 nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags))
272 return -ENOMEM; 290 return -ENOMEM;
273 291
@@ -281,8 +299,6 @@ static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
281 + nla_total_size(4) /* LWTUNNEL_IP_SRC */ 299 + nla_total_size(4) /* LWTUNNEL_IP_SRC */
282 + nla_total_size(1) /* LWTUNNEL_IP_TOS */ 300 + nla_total_size(1) /* LWTUNNEL_IP_TOS */
283 + nla_total_size(1) /* LWTUNNEL_IP_TTL */ 301 + nla_total_size(1) /* LWTUNNEL_IP_TTL */
284 + nla_total_size(2) /* LWTUNNEL_IP_SPORT */
285 + nla_total_size(2) /* LWTUNNEL_IP_DPORT */
286 + nla_total_size(2); /* LWTUNNEL_IP_FLAGS */ 302 + nla_total_size(2); /* LWTUNNEL_IP_FLAGS */
287} 303}
288 304
@@ -305,8 +321,6 @@ static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
305 [LWTUNNEL_IP6_SRC] = { .len = sizeof(struct in6_addr) }, 321 [LWTUNNEL_IP6_SRC] = { .len = sizeof(struct in6_addr) },
306 [LWTUNNEL_IP6_HOPLIMIT] = { .type = NLA_U8 }, 322 [LWTUNNEL_IP6_HOPLIMIT] = { .type = NLA_U8 },
307 [LWTUNNEL_IP6_TC] = { .type = NLA_U8 }, 323 [LWTUNNEL_IP6_TC] = { .type = NLA_U8 },
308 [LWTUNNEL_IP6_SPORT] = { .type = NLA_U16 },
309 [LWTUNNEL_IP6_DPORT] = { .type = NLA_U16 },
310 [LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 }, 324 [LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 },
311}; 325};
312 326
@@ -346,12 +360,6 @@ static int ip6_tun_build_state(struct net_device *dev, struct nlattr *attr,
346 if (tb[LWTUNNEL_IP6_TC]) 360 if (tb[LWTUNNEL_IP6_TC])
347 tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]); 361 tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
348 362
349 if (tb[LWTUNNEL_IP6_SPORT])
350 tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP6_SPORT]);
351
352 if (tb[LWTUNNEL_IP6_DPORT])
353 tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP6_DPORT]);
354
355 if (tb[LWTUNNEL_IP6_FLAGS]) 363 if (tb[LWTUNNEL_IP6_FLAGS])
356 tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP6_FLAGS]); 364 tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP6_FLAGS]);
357 365
@@ -373,8 +381,6 @@ static int ip6_tun_fill_encap_info(struct sk_buff *skb,
373 nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) || 381 nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
374 nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) || 382 nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) ||
375 nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) || 383 nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) ||
376 nla_put_u16(skb, LWTUNNEL_IP6_SPORT, tun_info->key.tp_src) ||
377 nla_put_u16(skb, LWTUNNEL_IP6_DPORT, tun_info->key.tp_dst) ||
378 nla_put_u16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags)) 384 nla_put_u16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
379 return -ENOMEM; 385 return -ENOMEM;
380 386
@@ -388,8 +394,6 @@ static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
388 + nla_total_size(16) /* LWTUNNEL_IP6_SRC */ 394 + nla_total_size(16) /* LWTUNNEL_IP6_SRC */
389 + nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */ 395 + nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */
390 + nla_total_size(1) /* LWTUNNEL_IP6_TC */ 396 + nla_total_size(1) /* LWTUNNEL_IP6_TC */
391 + nla_total_size(2) /* LWTUNNEL_IP6_SPORT */
392 + nla_total_size(2) /* LWTUNNEL_IP6_DPORT */
393 + nla_total_size(2); /* LWTUNNEL_IP6_FLAGS */ 397 + nla_total_size(2); /* LWTUNNEL_IP6_FLAGS */
394} 398}
395 399
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index f7afcba8b1a1..6bab84503cd9 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2036,6 +2036,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2036 struct fib_result res; 2036 struct fib_result res;
2037 struct rtable *rth; 2037 struct rtable *rth;
2038 int orig_oif; 2038 int orig_oif;
2039 int err = -ENETUNREACH;
2039 2040
2040 res.tclassid = 0; 2041 res.tclassid = 0;
2041 res.fi = NULL; 2042 res.fi = NULL;
@@ -2144,7 +2145,8 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2144 goto make_route; 2145 goto make_route;
2145 } 2146 }
2146 2147
2147 if (fib_lookup(net, fl4, &res, 0)) { 2148 err = fib_lookup(net, fl4, &res, 0);
2149 if (err) {
2148 res.fi = NULL; 2150 res.fi = NULL;
2149 res.table = NULL; 2151 res.table = NULL;
2150 if (fl4->flowi4_oif) { 2152 if (fl4->flowi4_oif) {
@@ -2172,7 +2174,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2172 res.type = RTN_UNICAST; 2174 res.type = RTN_UNICAST;
2173 goto make_route; 2175 goto make_route;
2174 } 2176 }
2175 rth = ERR_PTR(-ENETUNREACH); 2177 rth = ERR_PTR(err);
2176 goto out; 2178 goto out;
2177 } 2179 }
2178 2180
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index c6ded6b2a79f..448c2615fece 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -154,14 +154,20 @@ static void bictcp_init(struct sock *sk)
154static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) 154static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
155{ 155{
156 if (event == CA_EVENT_TX_START) { 156 if (event == CA_EVENT_TX_START) {
157 s32 delta = tcp_time_stamp - tcp_sk(sk)->lsndtime;
158 struct bictcp *ca = inet_csk_ca(sk); 157 struct bictcp *ca = inet_csk_ca(sk);
158 u32 now = tcp_time_stamp;
159 s32 delta;
160
161 delta = now - tcp_sk(sk)->lsndtime;
159 162
160 /* We were application limited (idle) for a while. 163 /* We were application limited (idle) for a while.
161 * Shift epoch_start to keep cwnd growth to cubic curve. 164 * Shift epoch_start to keep cwnd growth to cubic curve.
162 */ 165 */
163 if (ca->epoch_start && delta > 0) 166 if (ca->epoch_start && delta > 0) {
164 ca->epoch_start += delta; 167 ca->epoch_start += delta;
168 if (after(ca->epoch_start, now))
169 ca->epoch_start = now;
170 }
165 return; 171 return;
166 } 172 }
167} 173}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index e0a87c238882..e4fe62b6b106 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -162,9 +162,9 @@ kill_with_rst:
162 if (tcp_death_row.sysctl_tw_recycle && 162 if (tcp_death_row.sysctl_tw_recycle &&
163 tcptw->tw_ts_recent_stamp && 163 tcptw->tw_ts_recent_stamp &&
164 tcp_tw_remember_stamp(tw)) 164 tcp_tw_remember_stamp(tw))
165 inet_twsk_schedule(tw, tw->tw_timeout); 165 inet_twsk_reschedule(tw, tw->tw_timeout);
166 else 166 else
167 inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); 167 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
168 return TCP_TW_ACK; 168 return TCP_TW_ACK;
169 } 169 }
170 170
@@ -201,7 +201,7 @@ kill:
201 return TCP_TW_SUCCESS; 201 return TCP_TW_SUCCESS;
202 } 202 }
203 } 203 }
204 inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); 204 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
205 205
206 if (tmp_opt.saw_tstamp) { 206 if (tmp_opt.saw_tstamp) {
207 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 207 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
@@ -251,7 +251,7 @@ kill:
251 * Do not reschedule in the last case. 251 * Do not reschedule in the last case.
252 */ 252 */
253 if (paws_reject || th->ack) 253 if (paws_reject || th->ack)
254 inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); 254 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
255 255
256 return tcp_timewait_check_oow_rate_limit( 256 return tcp_timewait_check_oow_rate_limit(
257 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); 257 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
@@ -322,9 +322,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
322 } while (0); 322 } while (0);
323#endif 323#endif
324 324
325 /* Linkage updates. */
326 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
327
328 /* Get the TIME_WAIT timeout firing. */ 325 /* Get the TIME_WAIT timeout firing. */
329 if (timeo < rto) 326 if (timeo < rto)
330 timeo = rto; 327 timeo = rto;
@@ -338,6 +335,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
338 } 335 }
339 336
340 inet_twsk_schedule(tw, timeo); 337 inet_twsk_schedule(tw, timeo);
338 /* Linkage updates. */
339 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
341 inet_twsk_put(tw); 340 inet_twsk_put(tw);
342 } else { 341 } else {
343 /* Sorry, if we're out of memory, just CLOSE this 342 /* Sorry, if we're out of memory, just CLOSE this
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 53ce6cf55598..9e53dd9bfcad 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2893,6 +2893,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2893 skb_reserve(skb, MAX_TCP_HEADER); 2893 skb_reserve(skb, MAX_TCP_HEADER);
2894 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 2894 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
2895 TCPHDR_ACK | TCPHDR_RST); 2895 TCPHDR_ACK | TCPHDR_RST);
2896 skb_mstamp_get(&skb->skb_mstamp);
2896 /* Send it off. */ 2897 /* Send it off. */
2897 if (tcp_transmit_skb(sk, skb, 0, priority)) 2898 if (tcp_transmit_skb(sk, skb, 0, priority))
2898 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2899 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index c0a15e7f359f..f7d1d5e19e95 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1024,7 +1024,8 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1024 if (netif_index_is_vrf(net, ipc.oif)) { 1024 if (netif_index_is_vrf(net, ipc.oif)) {
1025 flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, 1025 flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
1026 RT_SCOPE_UNIVERSE, sk->sk_protocol, 1026 RT_SCOPE_UNIVERSE, sk->sk_protocol,
1027 (flow_flags | FLOWI_FLAG_VRFSRC), 1027 (flow_flags | FLOWI_FLAG_VRFSRC |
1028 FLOWI_FLAG_SKIP_NH_OIF),
1028 faddr, saddr, dport, 1029 faddr, saddr, dport,
1029 inet->inet_sport); 1030 inet->inet_sport);
1030 1031
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 671011055ad5..0304d1680ca2 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -33,6 +33,8 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
33 if (saddr) 33 if (saddr)
34 fl4->saddr = saddr->a4; 34 fl4->saddr = saddr->a4;
35 35
36 fl4->flowi4_flags = FLOWI_FLAG_SKIP_NH_OIF;
37
36 rt = __ip_route_output_key(net, fl4); 38 rt = __ip_route_output_key(net, fl4);
37 if (!IS_ERR(rt)) 39 if (!IS_ERR(rt))
38 return &rt->dst; 40 return &rt->dst;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 759d28ad16b7..c8380f1876f1 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5132,13 +5132,12 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5132 5132
5133 rt = addrconf_get_prefix_route(&ifp->peer_addr, 128, 5133 rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
5134 ifp->idev->dev, 0, 0); 5134 ifp->idev->dev, 0, 0);
5135 if (rt && ip6_del_rt(rt)) 5135 if (rt)
5136 dst_free(&rt->dst); 5136 ip6_del_rt(rt);
5137 } 5137 }
5138 dst_hold(&ifp->rt->dst); 5138 dst_hold(&ifp->rt->dst);
5139 5139
5140 if (ip6_del_rt(ifp->rt)) 5140 ip6_del_rt(ifp->rt);
5141 dst_free(&ifp->rt->dst);
5142 5141
5143 rt_genid_bump_ipv6(net); 5142 rt_genid_bump_ipv6(net);
5144 break; 5143 break;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 418d9823692b..7d2e0023c72d 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -155,6 +155,11 @@ static void node_free(struct fib6_node *fn)
155 kmem_cache_free(fib6_node_kmem, fn); 155 kmem_cache_free(fib6_node_kmem, fn);
156} 156}
157 157
158static void rt6_rcu_free(struct rt6_info *rt)
159{
160 call_rcu(&rt->dst.rcu_head, dst_rcu_free);
161}
162
158static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) 163static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
159{ 164{
160 int cpu; 165 int cpu;
@@ -169,7 +174,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
169 ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu); 174 ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu);
170 pcpu_rt = *ppcpu_rt; 175 pcpu_rt = *ppcpu_rt;
171 if (pcpu_rt) { 176 if (pcpu_rt) {
172 dst_free(&pcpu_rt->dst); 177 rt6_rcu_free(pcpu_rt);
173 *ppcpu_rt = NULL; 178 *ppcpu_rt = NULL;
174 } 179 }
175 } 180 }
@@ -181,7 +186,7 @@ static void rt6_release(struct rt6_info *rt)
181{ 186{
182 if (atomic_dec_and_test(&rt->rt6i_ref)) { 187 if (atomic_dec_and_test(&rt->rt6i_ref)) {
183 rt6_free_pcpu(rt); 188 rt6_free_pcpu(rt);
184 dst_free(&rt->dst); 189 rt6_rcu_free(rt);
185 } 190 }
186} 191}
187 192
@@ -846,7 +851,7 @@ add:
846 *ins = rt; 851 *ins = rt;
847 rt->rt6i_node = fn; 852 rt->rt6i_node = fn;
848 atomic_inc(&rt->rt6i_ref); 853 atomic_inc(&rt->rt6i_ref);
849 inet6_rt_notify(RTM_NEWROUTE, rt, info); 854 inet6_rt_notify(RTM_NEWROUTE, rt, info, 0);
850 info->nl_net->ipv6.rt6_stats->fib_rt_entries++; 855 info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
851 856
852 if (!(fn->fn_flags & RTN_RTINFO)) { 857 if (!(fn->fn_flags & RTN_RTINFO)) {
@@ -872,7 +877,7 @@ add:
872 rt->rt6i_node = fn; 877 rt->rt6i_node = fn;
873 rt->dst.rt6_next = iter->dst.rt6_next; 878 rt->dst.rt6_next = iter->dst.rt6_next;
874 atomic_inc(&rt->rt6i_ref); 879 atomic_inc(&rt->rt6i_ref);
875 inet6_rt_notify(RTM_NEWROUTE, rt, info); 880 inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
876 if (!(fn->fn_flags & RTN_RTINFO)) { 881 if (!(fn->fn_flags & RTN_RTINFO)) {
877 info->nl_net->ipv6.rt6_stats->fib_route_nodes++; 882 info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
878 fn->fn_flags |= RTN_RTINFO; 883 fn->fn_flags |= RTN_RTINFO;
@@ -933,6 +938,10 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
933 int replace_required = 0; 938 int replace_required = 0;
934 int sernum = fib6_new_sernum(info->nl_net); 939 int sernum = fib6_new_sernum(info->nl_net);
935 940
941 if (WARN_ON_ONCE((rt->dst.flags & DST_NOCACHE) &&
942 !atomic_read(&rt->dst.__refcnt)))
943 return -EINVAL;
944
936 if (info->nlh) { 945 if (info->nlh) {
937 if (!(info->nlh->nlmsg_flags & NLM_F_CREATE)) 946 if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
938 allow_create = 0; 947 allow_create = 0;
@@ -1025,6 +1034,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
1025 fib6_start_gc(info->nl_net, rt); 1034 fib6_start_gc(info->nl_net, rt);
1026 if (!(rt->rt6i_flags & RTF_CACHE)) 1035 if (!(rt->rt6i_flags & RTF_CACHE))
1027 fib6_prune_clones(info->nl_net, pn); 1036 fib6_prune_clones(info->nl_net, pn);
1037 rt->dst.flags &= ~DST_NOCACHE;
1028 } 1038 }
1029 1039
1030out: 1040out:
@@ -1049,7 +1059,8 @@ out:
1049 atomic_inc(&pn->leaf->rt6i_ref); 1059 atomic_inc(&pn->leaf->rt6i_ref);
1050 } 1060 }
1051#endif 1061#endif
1052 dst_free(&rt->dst); 1062 if (!(rt->dst.flags & DST_NOCACHE))
1063 dst_free(&rt->dst);
1053 } 1064 }
1054 return err; 1065 return err;
1055 1066
@@ -1060,7 +1071,8 @@ out:
1060st_failure: 1071st_failure:
1061 if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) 1072 if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
1062 fib6_repair_tree(info->nl_net, fn); 1073 fib6_repair_tree(info->nl_net, fn);
1063 dst_free(&rt->dst); 1074 if (!(rt->dst.flags & DST_NOCACHE))
1075 dst_free(&rt->dst);
1064 return err; 1076 return err;
1065#endif 1077#endif
1066} 1078}
@@ -1410,7 +1422,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1410 1422
1411 fib6_purge_rt(rt, fn, net); 1423 fib6_purge_rt(rt, fn, net);
1412 1424
1413 inet6_rt_notify(RTM_DELROUTE, rt, info); 1425 inet6_rt_notify(RTM_DELROUTE, rt, info, 0);
1414 rt6_release(rt); 1426 rt6_release(rt);
1415} 1427}
1416 1428
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 4038c694ec03..3c7b9310b33f 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -404,13 +404,13 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
404 struct ipv6_tlv_tnl_enc_lim *tel; 404 struct ipv6_tlv_tnl_enc_lim *tel;
405 __u32 mtu; 405 __u32 mtu;
406 case ICMPV6_DEST_UNREACH: 406 case ICMPV6_DEST_UNREACH:
407 net_warn_ratelimited("%s: Path to destination invalid or inactive!\n", 407 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
408 t->parms.name); 408 t->parms.name);
409 break; 409 break;
410 case ICMPV6_TIME_EXCEED: 410 case ICMPV6_TIME_EXCEED:
411 if (code == ICMPV6_EXC_HOPLIMIT) { 411 if (code == ICMPV6_EXC_HOPLIMIT) {
412 net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 412 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
413 t->parms.name); 413 t->parms.name);
414 } 414 }
415 break; 415 break;
416 case ICMPV6_PARAMPROB: 416 case ICMPV6_PARAMPROB:
@@ -421,12 +421,12 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
421 if (teli && teli == be32_to_cpu(info) - 2) { 421 if (teli && teli == be32_to_cpu(info) - 2) {
422 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 422 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
423 if (tel->encap_limit == 0) { 423 if (tel->encap_limit == 0) {
424 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 424 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
425 t->parms.name); 425 t->parms.name);
426 } 426 }
427 } else { 427 } else {
428 net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 428 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
429 t->parms.name); 429 t->parms.name);
430 } 430 }
431 break; 431 break;
432 case ICMPV6_PKT_TOOBIG: 432 case ICMPV6_PKT_TOOBIG:
@@ -634,20 +634,20 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
634 } 634 }
635 635
636 if (!fl6->flowi6_mark) 636 if (!fl6->flowi6_mark)
637 dst = ip6_tnl_dst_check(tunnel); 637 dst = ip6_tnl_dst_get(tunnel);
638 638
639 if (!dst) { 639 if (!dst) {
640 ndst = ip6_route_output(net, NULL, fl6); 640 dst = ip6_route_output(net, NULL, fl6);
641 641
642 if (ndst->error) 642 if (dst->error)
643 goto tx_err_link_failure; 643 goto tx_err_link_failure;
644 ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0); 644 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
645 if (IS_ERR(ndst)) { 645 if (IS_ERR(dst)) {
646 err = PTR_ERR(ndst); 646 err = PTR_ERR(dst);
647 ndst = NULL; 647 dst = NULL;
648 goto tx_err_link_failure; 648 goto tx_err_link_failure;
649 } 649 }
650 dst = ndst; 650 ndst = dst;
651 } 651 }
652 652
653 tdev = dst->dev; 653 tdev = dst->dev;
@@ -702,12 +702,9 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
702 skb = new_skb; 702 skb = new_skb;
703 } 703 }
704 704
705 if (fl6->flowi6_mark) { 705 if (!fl6->flowi6_mark && ndst)
706 skb_dst_set(skb, dst); 706 ip6_tnl_dst_set(tunnel, ndst);
707 ndst = NULL; 707 skb_dst_set(skb, dst);
708 } else {
709 skb_dst_set_noref(skb, dst);
710 }
711 708
712 proto = NEXTHDR_GRE; 709 proto = NEXTHDR_GRE;
713 if (encap_limit >= 0) { 710 if (encap_limit >= 0) {
@@ -762,14 +759,12 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
762 skb_set_inner_protocol(skb, protocol); 759 skb_set_inner_protocol(skb, protocol);
763 760
764 ip6tunnel_xmit(NULL, skb, dev); 761 ip6tunnel_xmit(NULL, skb, dev);
765 if (ndst)
766 ip6_tnl_dst_store(tunnel, ndst);
767 return 0; 762 return 0;
768tx_err_link_failure: 763tx_err_link_failure:
769 stats->tx_carrier_errors++; 764 stats->tx_carrier_errors++;
770 dst_link_failure(skb); 765 dst_link_failure(skb);
771tx_err_dst_release: 766tx_err_dst_release:
772 dst_release(ndst); 767 dst_release(dst);
773 return err; 768 return err;
774} 769}
775 770
@@ -1223,6 +1218,9 @@ static const struct net_device_ops ip6gre_netdev_ops = {
1223 1218
1224static void ip6gre_dev_free(struct net_device *dev) 1219static void ip6gre_dev_free(struct net_device *dev)
1225{ 1220{
1221 struct ip6_tnl *t = netdev_priv(dev);
1222
1223 ip6_tnl_dst_destroy(t);
1226 free_percpu(dev->tstats); 1224 free_percpu(dev->tstats);
1227 free_netdev(dev); 1225 free_netdev(dev);
1228} 1226}
@@ -1245,9 +1243,10 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
1245 netif_keep_dst(dev); 1243 netif_keep_dst(dev);
1246} 1244}
1247 1245
1248static int ip6gre_tunnel_init(struct net_device *dev) 1246static int ip6gre_tunnel_init_common(struct net_device *dev)
1249{ 1247{
1250 struct ip6_tnl *tunnel; 1248 struct ip6_tnl *tunnel;
1249 int ret;
1251 1250
1252 tunnel = netdev_priv(dev); 1251 tunnel = netdev_priv(dev);
1253 1252
@@ -1255,16 +1254,37 @@ static int ip6gre_tunnel_init(struct net_device *dev)
1255 tunnel->net = dev_net(dev); 1254 tunnel->net = dev_net(dev);
1256 strcpy(tunnel->parms.name, dev->name); 1255 strcpy(tunnel->parms.name, dev->name);
1257 1256
1257 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1258 if (!dev->tstats)
1259 return -ENOMEM;
1260
1261 ret = ip6_tnl_dst_init(tunnel);
1262 if (ret) {
1263 free_percpu(dev->tstats);
1264 dev->tstats = NULL;
1265 return ret;
1266 }
1267
1268 return 0;
1269}
1270
1271static int ip6gre_tunnel_init(struct net_device *dev)
1272{
1273 struct ip6_tnl *tunnel;
1274 int ret;
1275
1276 ret = ip6gre_tunnel_init_common(dev);
1277 if (ret)
1278 return ret;
1279
1280 tunnel = netdev_priv(dev);
1281
1258 memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr)); 1282 memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
1259 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr)); 1283 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
1260 1284
1261 if (ipv6_addr_any(&tunnel->parms.raddr)) 1285 if (ipv6_addr_any(&tunnel->parms.raddr))
1262 dev->header_ops = &ip6gre_header_ops; 1286 dev->header_ops = &ip6gre_header_ops;
1263 1287
1264 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1265 if (!dev->tstats)
1266 return -ENOMEM;
1267
1268 return 0; 1288 return 0;
1269} 1289}
1270 1290
@@ -1460,19 +1480,16 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
1460static int ip6gre_tap_init(struct net_device *dev) 1480static int ip6gre_tap_init(struct net_device *dev)
1461{ 1481{
1462 struct ip6_tnl *tunnel; 1482 struct ip6_tnl *tunnel;
1483 int ret;
1463 1484
1464 tunnel = netdev_priv(dev); 1485 ret = ip6gre_tunnel_init_common(dev);
1486 if (ret)
1487 return ret;
1465 1488
1466 tunnel->dev = dev; 1489 tunnel = netdev_priv(dev);
1467 tunnel->net = dev_net(dev);
1468 strcpy(tunnel->parms.name, dev->name);
1469 1490
1470 ip6gre_tnl_link_config(tunnel, 1); 1491 ip6gre_tnl_link_config(tunnel, 1);
1471 1492
1472 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1473 if (!dev->tstats)
1474 return -ENOMEM;
1475
1476 return 0; 1493 return 0;
1477} 1494}
1478 1495
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 60c565309d0a..a598fe2c0849 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -600,20 +600,22 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb,
600 frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr, 600 frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
601 &ipv6_hdr(skb)->saddr); 601 &ipv6_hdr(skb)->saddr);
602 602
603 hroom = LL_RESERVED_SPACE(rt->dst.dev);
603 if (skb_has_frag_list(skb)) { 604 if (skb_has_frag_list(skb)) {
604 int first_len = skb_pagelen(skb); 605 int first_len = skb_pagelen(skb);
605 struct sk_buff *frag2; 606 struct sk_buff *frag2;
606 607
607 if (first_len - hlen > mtu || 608 if (first_len - hlen > mtu ||
608 ((first_len - hlen) & 7) || 609 ((first_len - hlen) & 7) ||
609 skb_cloned(skb)) 610 skb_cloned(skb) ||
611 skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
610 goto slow_path; 612 goto slow_path;
611 613
612 skb_walk_frags(skb, frag) { 614 skb_walk_frags(skb, frag) {
613 /* Correct geometry. */ 615 /* Correct geometry. */
614 if (frag->len > mtu || 616 if (frag->len > mtu ||
615 ((frag->len & 7) && frag->next) || 617 ((frag->len & 7) && frag->next) ||
616 skb_headroom(frag) < hlen) 618 skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
617 goto slow_path_clean; 619 goto slow_path_clean;
618 620
619 /* Partially cloned skb? */ 621 /* Partially cloned skb? */
@@ -630,8 +632,6 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb,
630 632
631 err = 0; 633 err = 0;
632 offset = 0; 634 offset = 0;
633 frag = skb_shinfo(skb)->frag_list;
634 skb_frag_list_init(skb);
635 /* BUILD HEADER */ 635 /* BUILD HEADER */
636 636
637 *prevhdr = NEXTHDR_FRAGMENT; 637 *prevhdr = NEXTHDR_FRAGMENT;
@@ -639,8 +639,11 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb,
639 if (!tmp_hdr) { 639 if (!tmp_hdr) {
640 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 640 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
641 IPSTATS_MIB_FRAGFAILS); 641 IPSTATS_MIB_FRAGFAILS);
642 return -ENOMEM; 642 err = -ENOMEM;
643 goto fail;
643 } 644 }
645 frag = skb_shinfo(skb)->frag_list;
646 skb_frag_list_init(skb);
644 647
645 __skb_pull(skb, hlen); 648 __skb_pull(skb, hlen);
646 fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr)); 649 fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
@@ -737,7 +740,6 @@ slow_path:
737 */ 740 */
738 741
739 *prevhdr = NEXTHDR_FRAGMENT; 742 *prevhdr = NEXTHDR_FRAGMENT;
740 hroom = LL_RESERVED_SPACE(rt->dst.dev);
741 troom = rt->dst.dev->needed_tailroom; 743 troom = rt->dst.dev->needed_tailroom;
742 744
743 /* 745 /*
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index b0ab420612bc..eabffbb89795 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -126,36 +126,92 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
126 * Locking : hash tables are protected by RCU and RTNL 126 * Locking : hash tables are protected by RCU and RTNL
127 */ 127 */
128 128
129struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) 129static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
130 struct dst_entry *dst)
130{ 131{
131 struct dst_entry *dst = t->dst_cache; 132 write_seqlock_bh(&idst->lock);
133 dst_release(rcu_dereference_protected(
134 idst->dst,
135 lockdep_is_held(&idst->lock.lock)));
136 if (dst) {
137 dst_hold(dst);
138 idst->cookie = rt6_get_cookie((struct rt6_info *)dst);
139 } else {
140 idst->cookie = 0;
141 }
142 rcu_assign_pointer(idst->dst, dst);
143 write_sequnlock_bh(&idst->lock);
144}
145
146struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t)
147{
148 struct ip6_tnl_dst *idst;
149 struct dst_entry *dst;
150 unsigned int seq;
151 u32 cookie;
132 152
133 if (dst && dst->obsolete && 153 idst = raw_cpu_ptr(t->dst_cache);
134 !dst->ops->check(dst, t->dst_cookie)) { 154
135 t->dst_cache = NULL; 155 rcu_read_lock();
156 do {
157 seq = read_seqbegin(&idst->lock);
158 dst = rcu_dereference(idst->dst);
159 cookie = idst->cookie;
160 } while (read_seqretry(&idst->lock, seq));
161
162 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
163 dst = NULL;
164 rcu_read_unlock();
165
166 if (dst && dst->obsolete && !dst->ops->check(dst, cookie)) {
167 ip6_tnl_per_cpu_dst_set(idst, NULL);
136 dst_release(dst); 168 dst_release(dst);
137 return NULL; 169 dst = NULL;
138 } 170 }
139
140 return dst; 171 return dst;
141} 172}
142EXPORT_SYMBOL_GPL(ip6_tnl_dst_check); 173EXPORT_SYMBOL_GPL(ip6_tnl_dst_get);
143 174
144void ip6_tnl_dst_reset(struct ip6_tnl *t) 175void ip6_tnl_dst_reset(struct ip6_tnl *t)
145{ 176{
146 dst_release(t->dst_cache); 177 int i;
147 t->dst_cache = NULL; 178
179 for_each_possible_cpu(i)
180 ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), NULL);
148} 181}
149EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset); 182EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
150 183
151void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst) 184void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst)
185{
186 ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), dst);
187
188}
189EXPORT_SYMBOL_GPL(ip6_tnl_dst_set);
190
191void ip6_tnl_dst_destroy(struct ip6_tnl *t)
152{ 192{
153 struct rt6_info *rt = (struct rt6_info *) dst; 193 if (!t->dst_cache)
154 t->dst_cookie = rt6_get_cookie(rt); 194 return;
155 dst_release(t->dst_cache); 195
156 t->dst_cache = dst; 196 ip6_tnl_dst_reset(t);
197 free_percpu(t->dst_cache);
157} 198}
158EXPORT_SYMBOL_GPL(ip6_tnl_dst_store); 199EXPORT_SYMBOL_GPL(ip6_tnl_dst_destroy);
200
201int ip6_tnl_dst_init(struct ip6_tnl *t)
202{
203 int i;
204
205 t->dst_cache = alloc_percpu(struct ip6_tnl_dst);
206 if (!t->dst_cache)
207 return -ENOMEM;
208
209 for_each_possible_cpu(i)
210 seqlock_init(&per_cpu_ptr(t->dst_cache, i)->lock);
211
212 return 0;
213}
214EXPORT_SYMBOL_GPL(ip6_tnl_dst_init);
159 215
160/** 216/**
161 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses 217 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
@@ -271,6 +327,9 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
271 327
272static void ip6_dev_free(struct net_device *dev) 328static void ip6_dev_free(struct net_device *dev)
273{ 329{
330 struct ip6_tnl *t = netdev_priv(dev);
331
332 ip6_tnl_dst_destroy(t);
274 free_percpu(dev->tstats); 333 free_percpu(dev->tstats);
275 free_netdev(dev); 334 free_netdev(dev);
276} 335}
@@ -510,14 +569,14 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
510 struct ipv6_tlv_tnl_enc_lim *tel; 569 struct ipv6_tlv_tnl_enc_lim *tel;
511 __u32 mtu; 570 __u32 mtu;
512 case ICMPV6_DEST_UNREACH: 571 case ICMPV6_DEST_UNREACH:
513 net_warn_ratelimited("%s: Path to destination invalid or inactive!\n", 572 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
514 t->parms.name); 573 t->parms.name);
515 rel_msg = 1; 574 rel_msg = 1;
516 break; 575 break;
517 case ICMPV6_TIME_EXCEED: 576 case ICMPV6_TIME_EXCEED:
518 if ((*code) == ICMPV6_EXC_HOPLIMIT) { 577 if ((*code) == ICMPV6_EXC_HOPLIMIT) {
519 net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 578 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
520 t->parms.name); 579 t->parms.name);
521 rel_msg = 1; 580 rel_msg = 1;
522 } 581 }
523 break; 582 break;
@@ -529,13 +588,13 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
529 if (teli && teli == *info - 2) { 588 if (teli && teli == *info - 2) {
530 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 589 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
531 if (tel->encap_limit == 0) { 590 if (tel->encap_limit == 0) {
532 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 591 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
533 t->parms.name); 592 t->parms.name);
534 rel_msg = 1; 593 rel_msg = 1;
535 } 594 }
536 } else { 595 } else {
537 net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 596 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
538 t->parms.name); 597 t->parms.name);
539 } 598 }
540 break; 599 break;
541 case ICMPV6_PKT_TOOBIG: 600 case ICMPV6_PKT_TOOBIG:
@@ -1010,23 +1069,23 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
1010 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1069 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1011 neigh_release(neigh); 1070 neigh_release(neigh);
1012 } else if (!fl6->flowi6_mark) 1071 } else if (!fl6->flowi6_mark)
1013 dst = ip6_tnl_dst_check(t); 1072 dst = ip6_tnl_dst_get(t);
1014 1073
1015 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) 1074 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1016 goto tx_err_link_failure; 1075 goto tx_err_link_failure;
1017 1076
1018 if (!dst) { 1077 if (!dst) {
1019 ndst = ip6_route_output(net, NULL, fl6); 1078 dst = ip6_route_output(net, NULL, fl6);
1020 1079
1021 if (ndst->error) 1080 if (dst->error)
1022 goto tx_err_link_failure; 1081 goto tx_err_link_failure;
1023 ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0); 1082 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
1024 if (IS_ERR(ndst)) { 1083 if (IS_ERR(dst)) {
1025 err = PTR_ERR(ndst); 1084 err = PTR_ERR(dst);
1026 ndst = NULL; 1085 dst = NULL;
1027 goto tx_err_link_failure; 1086 goto tx_err_link_failure;
1028 } 1087 }
1029 dst = ndst; 1088 ndst = dst;
1030 } 1089 }
1031 1090
1032 tdev = dst->dev; 1091 tdev = dst->dev;
@@ -1072,12 +1131,11 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
1072 consume_skb(skb); 1131 consume_skb(skb);
1073 skb = new_skb; 1132 skb = new_skb;
1074 } 1133 }
1075 if (fl6->flowi6_mark) { 1134
1076 skb_dst_set(skb, dst); 1135 if (!fl6->flowi6_mark && ndst)
1077 ndst = NULL; 1136 ip6_tnl_dst_set(t, ndst);
1078 } else { 1137 skb_dst_set(skb, dst);
1079 skb_dst_set_noref(skb, dst); 1138
1080 }
1081 skb->transport_header = skb->network_header; 1139 skb->transport_header = skb->network_header;
1082 1140
1083 proto = fl6->flowi6_proto; 1141 proto = fl6->flowi6_proto;
@@ -1101,14 +1159,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
1101 ipv6h->saddr = fl6->saddr; 1159 ipv6h->saddr = fl6->saddr;
1102 ipv6h->daddr = fl6->daddr; 1160 ipv6h->daddr = fl6->daddr;
1103 ip6tunnel_xmit(NULL, skb, dev); 1161 ip6tunnel_xmit(NULL, skb, dev);
1104 if (ndst)
1105 ip6_tnl_dst_store(t, ndst);
1106 return 0; 1162 return 0;
1107tx_err_link_failure: 1163tx_err_link_failure:
1108 stats->tx_carrier_errors++; 1164 stats->tx_carrier_errors++;
1109 dst_link_failure(skb); 1165 dst_link_failure(skb);
1110tx_err_dst_release: 1166tx_err_dst_release:
1111 dst_release(ndst); 1167 dst_release(dst);
1112 return err; 1168 return err;
1113} 1169}
1114 1170
@@ -1573,12 +1629,21 @@ static inline int
1573ip6_tnl_dev_init_gen(struct net_device *dev) 1629ip6_tnl_dev_init_gen(struct net_device *dev)
1574{ 1630{
1575 struct ip6_tnl *t = netdev_priv(dev); 1631 struct ip6_tnl *t = netdev_priv(dev);
1632 int ret;
1576 1633
1577 t->dev = dev; 1634 t->dev = dev;
1578 t->net = dev_net(dev); 1635 t->net = dev_net(dev);
1579 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1636 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1580 if (!dev->tstats) 1637 if (!dev->tstats)
1581 return -ENOMEM; 1638 return -ENOMEM;
1639
1640 ret = ip6_tnl_dst_init(t);
1641 if (ret) {
1642 free_percpu(dev->tstats);
1643 dev->tstats = NULL;
1644 return ret;
1645 }
1646
1582 return 0; 1647 return 0;
1583} 1648}
1584 1649
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 118f8fa1a809..6fbf6fdde7e7 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1298,8 +1298,7 @@ static void ip6_link_failure(struct sk_buff *skb)
1298 if (rt) { 1298 if (rt) {
1299 if (rt->rt6i_flags & RTF_CACHE) { 1299 if (rt->rt6i_flags & RTF_CACHE) {
1300 dst_hold(&rt->dst); 1300 dst_hold(&rt->dst);
1301 if (ip6_del_rt(rt)) 1301 ip6_del_rt(rt);
1302 dst_free(&rt->dst);
1303 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) { 1302 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
1304 rt->rt6i_node->fn_sernum = -1; 1303 rt->rt6i_node->fn_sernum = -1;
1305 } 1304 }
@@ -1862,9 +1861,11 @@ int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret)
1862 rt->dst.input = ip6_pkt_prohibit; 1861 rt->dst.input = ip6_pkt_prohibit;
1863 break; 1862 break;
1864 case RTN_THROW: 1863 case RTN_THROW:
1864 case RTN_UNREACHABLE:
1865 default: 1865 default:
1866 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN 1866 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1867 : -ENETUNREACH; 1867 : (cfg->fc_type == RTN_UNREACHABLE)
1868 ? -EHOSTUNREACH : -ENETUNREACH;
1868 rt->dst.output = ip6_pkt_discard_out; 1869 rt->dst.output = ip6_pkt_discard_out;
1869 rt->dst.input = ip6_pkt_discard; 1870 rt->dst.input = ip6_pkt_discard;
1870 break; 1871 break;
@@ -2004,7 +2005,8 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
2004 struct fib6_table *table; 2005 struct fib6_table *table;
2005 struct net *net = dev_net(rt->dst.dev); 2006 struct net *net = dev_net(rt->dst.dev);
2006 2007
2007 if (rt == net->ipv6.ip6_null_entry) { 2008 if (rt == net->ipv6.ip6_null_entry ||
2009 rt->dst.flags & DST_NOCACHE) {
2008 err = -ENOENT; 2010 err = -ENOENT;
2009 goto out; 2011 goto out;
2010 } 2012 }
@@ -2491,6 +2493,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2491 rt->rt6i_dst.addr = *addr; 2493 rt->rt6i_dst.addr = *addr;
2492 rt->rt6i_dst.plen = 128; 2494 rt->rt6i_dst.plen = 128;
2493 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); 2495 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
2496 rt->dst.flags |= DST_NOCACHE;
2494 2497
2495 atomic_set(&rt->dst.__refcnt, 1); 2498 atomic_set(&rt->dst.__refcnt, 1);
2496 2499
@@ -3279,7 +3282,8 @@ errout:
3279 return err; 3282 return err;
3280} 3283}
3281 3284
3282void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info) 3285void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
3286 unsigned int nlm_flags)
3283{ 3287{
3284 struct sk_buff *skb; 3288 struct sk_buff *skb;
3285 struct net *net = info->nl_net; 3289 struct net *net = info->nl_net;
@@ -3294,7 +3298,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
3294 goto errout; 3298 goto errout;
3295 3299
3296 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, 3300 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
3297 event, info->portid, seq, 0, 0, 0); 3301 event, info->portid, seq, 0, 0, nlm_flags);
3298 if (err < 0) { 3302 if (err < 0) {
3299 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ 3303 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
3300 WARN_ON(err == -EMSGSIZE); 3304 WARN_ON(err == -EMSGSIZE);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 17b1fe961c5d..7a77a1470f25 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2474,6 +2474,7 @@ static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
2474 2474
2475 bss_conf->cqm_rssi_thold = rssi_thold; 2475 bss_conf->cqm_rssi_thold = rssi_thold;
2476 bss_conf->cqm_rssi_hyst = rssi_hyst; 2476 bss_conf->cqm_rssi_hyst = rssi_hyst;
2477 sdata->u.mgd.last_cqm_event_signal = 0;
2477 2478
2478 /* tell the driver upon association, unless already associated */ 2479 /* tell the driver upon association, unless already associated */
2479 if (sdata->u.mgd.associated && 2480 if (sdata->u.mgd.associated &&
@@ -2518,15 +2519,17 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
2518 continue; 2519 continue;
2519 2520
2520 for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) { 2521 for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) {
2521 if (~sdata->rc_rateidx_mcs_mask[i][j]) 2522 if (~sdata->rc_rateidx_mcs_mask[i][j]) {
2522 sdata->rc_has_mcs_mask[i] = true; 2523 sdata->rc_has_mcs_mask[i] = true;
2524 break;
2525 }
2526 }
2523 2527
2524 if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) 2528 for (j = 0; j < NL80211_VHT_NSS_MAX; j++) {
2529 if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) {
2525 sdata->rc_has_vht_mcs_mask[i] = true; 2530 sdata->rc_has_vht_mcs_mask[i] = true;
2526
2527 if (sdata->rc_has_mcs_mask[i] &&
2528 sdata->rc_has_vht_mcs_mask[i])
2529 break; 2531 break;
2532 }
2530 } 2533 }
2531 } 2534 }
2532 2535
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 675d12c69e32..a5d41dfa9f05 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -107,12 +107,17 @@ EXPORT_SYMBOL(nf_log_register);
107 107
108void nf_log_unregister(struct nf_logger *logger) 108void nf_log_unregister(struct nf_logger *logger)
109{ 109{
110 const struct nf_logger *log;
110 int i; 111 int i;
111 112
112 mutex_lock(&nf_log_mutex); 113 mutex_lock(&nf_log_mutex);
113 for (i = 0; i < NFPROTO_NUMPROTO; i++) 114 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
114 RCU_INIT_POINTER(loggers[i][logger->type], NULL); 115 log = nft_log_dereference(loggers[i][logger->type]);
116 if (log == logger)
117 RCU_INIT_POINTER(loggers[i][logger->type], NULL);
118 }
115 mutex_unlock(&nf_log_mutex); 119 mutex_unlock(&nf_log_mutex);
120 synchronize_rcu();
116} 121}
117EXPORT_SYMBOL(nf_log_unregister); 122EXPORT_SYMBOL(nf_log_unregister);
118 123
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 66def315eb56..9c8fab00164b 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -619,6 +619,13 @@ struct nft_xt {
619 619
620static struct nft_expr_type nft_match_type; 620static struct nft_expr_type nft_match_type;
621 621
622static bool nft_match_cmp(const struct xt_match *match,
623 const char *name, u32 rev, u32 family)
624{
625 return strcmp(match->name, name) == 0 && match->revision == rev &&
626 (match->family == NFPROTO_UNSPEC || match->family == family);
627}
628
622static const struct nft_expr_ops * 629static const struct nft_expr_ops *
623nft_match_select_ops(const struct nft_ctx *ctx, 630nft_match_select_ops(const struct nft_ctx *ctx,
624 const struct nlattr * const tb[]) 631 const struct nlattr * const tb[])
@@ -626,7 +633,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
626 struct nft_xt *nft_match; 633 struct nft_xt *nft_match;
627 struct xt_match *match; 634 struct xt_match *match;
628 char *mt_name; 635 char *mt_name;
629 __u32 rev, family; 636 u32 rev, family;
630 637
631 if (tb[NFTA_MATCH_NAME] == NULL || 638 if (tb[NFTA_MATCH_NAME] == NULL ||
632 tb[NFTA_MATCH_REV] == NULL || 639 tb[NFTA_MATCH_REV] == NULL ||
@@ -641,8 +648,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
641 list_for_each_entry(nft_match, &nft_match_list, head) { 648 list_for_each_entry(nft_match, &nft_match_list, head) {
642 struct xt_match *match = nft_match->ops.data; 649 struct xt_match *match = nft_match->ops.data;
643 650
644 if (strcmp(match->name, mt_name) == 0 && 651 if (nft_match_cmp(match, mt_name, rev, family)) {
645 match->revision == rev && match->family == family) {
646 if (!try_module_get(match->me)) 652 if (!try_module_get(match->me))
647 return ERR_PTR(-ENOENT); 653 return ERR_PTR(-ENOENT);
648 654
@@ -693,6 +699,13 @@ static LIST_HEAD(nft_target_list);
693 699
694static struct nft_expr_type nft_target_type; 700static struct nft_expr_type nft_target_type;
695 701
702static bool nft_target_cmp(const struct xt_target *tg,
703 const char *name, u32 rev, u32 family)
704{
705 return strcmp(tg->name, name) == 0 && tg->revision == rev &&
706 (tg->family == NFPROTO_UNSPEC || tg->family == family);
707}
708
696static const struct nft_expr_ops * 709static const struct nft_expr_ops *
697nft_target_select_ops(const struct nft_ctx *ctx, 710nft_target_select_ops(const struct nft_ctx *ctx,
698 const struct nlattr * const tb[]) 711 const struct nlattr * const tb[])
@@ -700,7 +713,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
700 struct nft_xt *nft_target; 713 struct nft_xt *nft_target;
701 struct xt_target *target; 714 struct xt_target *target;
702 char *tg_name; 715 char *tg_name;
703 __u32 rev, family; 716 u32 rev, family;
704 717
705 if (tb[NFTA_TARGET_NAME] == NULL || 718 if (tb[NFTA_TARGET_NAME] == NULL ||
706 tb[NFTA_TARGET_REV] == NULL || 719 tb[NFTA_TARGET_REV] == NULL ||
@@ -715,8 +728,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
715 list_for_each_entry(nft_target, &nft_target_list, head) { 728 list_for_each_entry(nft_target, &nft_target_list, head) {
716 struct xt_target *target = nft_target->ops.data; 729 struct xt_target *target = nft_target->ops.data;
717 730
718 if (strcmp(target->name, tg_name) == 0 && 731 if (nft_target_cmp(target, tg_name, rev, family)) {
719 target->revision == rev && target->family == family) {
720 if (!try_module_get(target->me)) 732 if (!try_module_get(target->me))
721 return ERR_PTR(-ENOENT); 733 return ERR_PTR(-ENOENT);
722 734
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 7f86d3b55060..8f060d7f9a0e 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -125,6 +125,24 @@ static inline u32 netlink_group_mask(u32 group)
125 return group ? 1 << (group - 1) : 0; 125 return group ? 1 << (group - 1) : 0;
126} 126}
127 127
128static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
129 gfp_t gfp_mask)
130{
131 unsigned int len = skb_end_offset(skb);
132 struct sk_buff *new;
133
134 new = alloc_skb(len, gfp_mask);
135 if (new == NULL)
136 return NULL;
137
138 NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
139 NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
140 NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
141
142 memcpy(skb_put(new, len), skb->data, len);
143 return new;
144}
145
128int netlink_add_tap(struct netlink_tap *nt) 146int netlink_add_tap(struct netlink_tap *nt)
129{ 147{
130 if (unlikely(nt->dev->type != ARPHRD_NETLINK)) 148 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
@@ -206,7 +224,11 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
206 int ret = -ENOMEM; 224 int ret = -ENOMEM;
207 225
208 dev_hold(dev); 226 dev_hold(dev);
209 nskb = skb_clone(skb, GFP_ATOMIC); 227
228 if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
229 nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
230 else
231 nskb = skb_clone(skb, GFP_ATOMIC);
210 if (nskb) { 232 if (nskb) {
211 nskb->dev = dev; 233 nskb->dev = dev;
212 nskb->protocol = htons((u16) sk->sk_protocol); 234 nskb->protocol = htons((u16) sk->sk_protocol);
@@ -279,11 +301,6 @@ static void netlink_rcv_wake(struct sock *sk)
279} 301}
280 302
281#ifdef CONFIG_NETLINK_MMAP 303#ifdef CONFIG_NETLINK_MMAP
282static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
283{
284 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
285}
286
287static bool netlink_rx_is_mmaped(struct sock *sk) 304static bool netlink_rx_is_mmaped(struct sock *sk)
288{ 305{
289 return nlk_sk(sk)->rx_ring.pg_vec != NULL; 306 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
@@ -846,7 +863,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
846} 863}
847 864
848#else /* CONFIG_NETLINK_MMAP */ 865#else /* CONFIG_NETLINK_MMAP */
849#define netlink_skb_is_mmaped(skb) false
850#define netlink_rx_is_mmaped(sk) false 866#define netlink_rx_is_mmaped(sk) false
851#define netlink_tx_is_mmaped(sk) false 867#define netlink_tx_is_mmaped(sk) false
852#define netlink_mmap sock_no_mmap 868#define netlink_mmap sock_no_mmap
@@ -1094,8 +1110,8 @@ static int netlink_insert(struct sock *sk, u32 portid)
1094 1110
1095 lock_sock(sk); 1111 lock_sock(sk);
1096 1112
1097 err = -EBUSY; 1113 err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY;
1098 if (nlk_sk(sk)->portid) 1114 if (nlk_sk(sk)->bound)
1099 goto err; 1115 goto err;
1100 1116
1101 err = -ENOMEM; 1117 err = -ENOMEM;
@@ -1115,10 +1131,14 @@ static int netlink_insert(struct sock *sk, u32 portid)
1115 err = -EOVERFLOW; 1131 err = -EOVERFLOW;
1116 if (err == -EEXIST) 1132 if (err == -EEXIST)
1117 err = -EADDRINUSE; 1133 err = -EADDRINUSE;
1118 nlk_sk(sk)->portid = 0;
1119 sock_put(sk); 1134 sock_put(sk);
1135 goto err;
1120 } 1136 }
1121 1137
1138 /* We need to ensure that the socket is hashed and visible. */
1139 smp_wmb();
1140 nlk_sk(sk)->bound = portid;
1141
1122err: 1142err:
1123 release_sock(sk); 1143 release_sock(sk);
1124 return err; 1144 return err;
@@ -1503,6 +1523,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1503 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 1523 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1504 int err; 1524 int err;
1505 long unsigned int groups = nladdr->nl_groups; 1525 long unsigned int groups = nladdr->nl_groups;
1526 bool bound;
1506 1527
1507 if (addr_len < sizeof(struct sockaddr_nl)) 1528 if (addr_len < sizeof(struct sockaddr_nl))
1508 return -EINVAL; 1529 return -EINVAL;
@@ -1519,9 +1540,14 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1519 return err; 1540 return err;
1520 } 1541 }
1521 1542
1522 if (nlk->portid) 1543 bound = nlk->bound;
1544 if (bound) {
1545 /* Ensure nlk->portid is up-to-date. */
1546 smp_rmb();
1547
1523 if (nladdr->nl_pid != nlk->portid) 1548 if (nladdr->nl_pid != nlk->portid)
1524 return -EINVAL; 1549 return -EINVAL;
1550 }
1525 1551
1526 if (nlk->netlink_bind && groups) { 1552 if (nlk->netlink_bind && groups) {
1527 int group; 1553 int group;
@@ -1537,7 +1563,10 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1537 } 1563 }
1538 } 1564 }
1539 1565
1540 if (!nlk->portid) { 1566 /* No need for barriers here as we return to user-space without
1567 * using any of the bound attributes.
1568 */
1569 if (!bound) {
1541 err = nladdr->nl_pid ? 1570 err = nladdr->nl_pid ?
1542 netlink_insert(sk, nladdr->nl_pid) : 1571 netlink_insert(sk, nladdr->nl_pid) :
1543 netlink_autobind(sock); 1572 netlink_autobind(sock);
@@ -1585,7 +1614,10 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1585 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) 1614 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1586 return -EPERM; 1615 return -EPERM;
1587 1616
1588 if (!nlk->portid) 1617 /* No need for barriers here as we return to user-space without
1618 * using any of the bound attributes.
1619 */
1620 if (!nlk->bound)
1589 err = netlink_autobind(sock); 1621 err = netlink_autobind(sock);
1590 1622
1591 if (err == 0) { 1623 if (err == 0) {
@@ -2426,10 +2458,13 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2426 dst_group = nlk->dst_group; 2458 dst_group = nlk->dst_group;
2427 } 2459 }
2428 2460
2429 if (!nlk->portid) { 2461 if (!nlk->bound) {
2430 err = netlink_autobind(sock); 2462 err = netlink_autobind(sock);
2431 if (err) 2463 if (err)
2432 goto out; 2464 goto out;
2465 } else {
2466 /* Ensure nlk is hashed and visible. */
2467 smp_rmb();
2433 } 2468 }
2434 2469
2435 /* It's a really convoluted way for userland to ask for mmaped 2470 /* It's a really convoluted way for userland to ask for mmaped
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index 89008405d6b4..14437d9b1965 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -35,6 +35,7 @@ struct netlink_sock {
35 unsigned long state; 35 unsigned long state;
36 size_t max_recvmsg_len; 36 size_t max_recvmsg_len;
37 wait_queue_head_t wait; 37 wait_queue_head_t wait;
38 bool bound;
38 bool cb_running; 39 bool cb_running;
39 struct netlink_callback cb; 40 struct netlink_callback cb;
40 struct mutex *cb_mutex; 41 struct mutex *cb_mutex;
@@ -59,6 +60,15 @@ static inline struct netlink_sock *nlk_sk(struct sock *sk)
59 return container_of(sk, struct netlink_sock, sk); 60 return container_of(sk, struct netlink_sock, sk);
60} 61}
61 62
63static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb)
64{
65#ifdef CONFIG_NETLINK_MMAP
66 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
67#else
68 return false;
69#endif /* CONFIG_NETLINK_MMAP */
70}
71
62struct netlink_table { 72struct netlink_table {
63 struct rhashtable hash; 73 struct rhashtable hash;
64 struct hlist_head mc_list; 74 struct hlist_head mc_list;
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 2a071f470d57..d143aa9f6654 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -5,7 +5,8 @@
5config OPENVSWITCH 5config OPENVSWITCH
6 tristate "Open vSwitch" 6 tristate "Open vSwitch"
7 depends on INET 7 depends on INET
8 depends on (!NF_CONNTRACK || NF_CONNTRACK) 8 depends on !NF_CONNTRACK || \
9 (NF_CONNTRACK && (!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6))
9 select LIBCRC32C 10 select LIBCRC32C
10 select MPLS 11 select MPLS
11 select NET_MPLS_GSO 12 select NET_MPLS_GSO
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index aaf5cbd6d9ae..eb759e3a88ca 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -275,13 +275,15 @@ static int ovs_ct_helper(struct sk_buff *skb, u16 proto)
275 case NFPROTO_IPV6: { 275 case NFPROTO_IPV6: {
276 u8 nexthdr = ipv6_hdr(skb)->nexthdr; 276 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
277 __be16 frag_off; 277 __be16 frag_off;
278 int ofs;
278 279
279 protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), 280 ofs = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
280 &nexthdr, &frag_off); 281 &frag_off);
281 if (protoff < 0 || (frag_off & htons(~0x7)) != 0) { 282 if (ofs < 0 || (frag_off & htons(~0x7)) != 0) {
282 pr_debug("proto header not found\n"); 283 pr_debug("proto header not found\n");
283 return NF_ACCEPT; 284 return NF_ACCEPT;
284 } 285 }
286 protoff = ofs;
285 break; 287 break;
286 } 288 }
287 default: 289 default:
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 2913594c5123..a75828091e21 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -951,7 +951,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
951 if (error) 951 if (error)
952 goto err_kfree_flow; 952 goto err_kfree_flow;
953 953
954 ovs_flow_mask_key(&new_flow->key, &key, &mask); 954 ovs_flow_mask_key(&new_flow->key, &key, true, &mask);
955 955
956 /* Extract flow identifier. */ 956 /* Extract flow identifier. */
957 error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID], 957 error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
@@ -1079,7 +1079,7 @@ static struct sw_flow_actions *get_flow_actions(struct net *net,
1079 struct sw_flow_key masked_key; 1079 struct sw_flow_key masked_key;
1080 int error; 1080 int error;
1081 1081
1082 ovs_flow_mask_key(&masked_key, key, mask); 1082 ovs_flow_mask_key(&masked_key, key, true, mask);
1083 error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log); 1083 error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
1084 if (error) { 1084 if (error) {
1085 OVS_NLERR(log, 1085 OVS_NLERR(log,
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index c92d6a262bc5..5c030a4d7338 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -57,6 +57,7 @@ struct ovs_len_tbl {
57}; 57};
58 58
59#define OVS_ATTR_NESTED -1 59#define OVS_ATTR_NESTED -1
60#define OVS_ATTR_VARIABLE -2
60 61
61static void update_range(struct sw_flow_match *match, 62static void update_range(struct sw_flow_match *match,
62 size_t offset, size_t size, bool is_mask) 63 size_t offset, size_t size, bool is_mask)
@@ -304,6 +305,10 @@ size_t ovs_key_attr_size(void)
304 + nla_total_size(28); /* OVS_KEY_ATTR_ND */ 305 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
305} 306}
306 307
308static const struct ovs_len_tbl ovs_vxlan_ext_key_lens[OVS_VXLAN_EXT_MAX + 1] = {
309 [OVS_VXLAN_EXT_GBP] = { .len = sizeof(u32) },
310};
311
307static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = { 312static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
308 [OVS_TUNNEL_KEY_ATTR_ID] = { .len = sizeof(u64) }, 313 [OVS_TUNNEL_KEY_ATTR_ID] = { .len = sizeof(u64) },
309 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = sizeof(u32) }, 314 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = sizeof(u32) },
@@ -315,8 +320,9 @@ static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1]
315 [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = sizeof(u16) }, 320 [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = sizeof(u16) },
316 [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = sizeof(u16) }, 321 [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = sizeof(u16) },
317 [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 }, 322 [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 },
318 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_NESTED }, 323 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_VARIABLE },
319 [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED }, 324 [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED,
325 .next = ovs_vxlan_ext_key_lens },
320}; 326};
321 327
322/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ 328/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
@@ -349,6 +355,13 @@ static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
349 [OVS_KEY_ATTR_CT_LABEL] = { .len = sizeof(struct ovs_key_ct_label) }, 355 [OVS_KEY_ATTR_CT_LABEL] = { .len = sizeof(struct ovs_key_ct_label) },
350}; 356};
351 357
358static bool check_attr_len(unsigned int attr_len, unsigned int expected_len)
359{
360 return expected_len == attr_len ||
361 expected_len == OVS_ATTR_NESTED ||
362 expected_len == OVS_ATTR_VARIABLE;
363}
364
352static bool is_all_zero(const u8 *fp, size_t size) 365static bool is_all_zero(const u8 *fp, size_t size)
353{ 366{
354 int i; 367 int i;
@@ -388,7 +401,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
388 } 401 }
389 402
390 expected_len = ovs_key_lens[type].len; 403 expected_len = ovs_key_lens[type].len;
391 if (nla_len(nla) != expected_len && expected_len != OVS_ATTR_NESTED) { 404 if (!check_attr_len(nla_len(nla), expected_len)) {
392 OVS_NLERR(log, "Key %d has unexpected len %d expected %d", 405 OVS_NLERR(log, "Key %d has unexpected len %d expected %d",
393 type, nla_len(nla), expected_len); 406 type, nla_len(nla), expected_len);
394 return -EINVAL; 407 return -EINVAL;
@@ -473,29 +486,50 @@ static int genev_tun_opt_from_nlattr(const struct nlattr *a,
473 return 0; 486 return 0;
474} 487}
475 488
476static const struct nla_policy vxlan_opt_policy[OVS_VXLAN_EXT_MAX + 1] = { 489static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr,
477 [OVS_VXLAN_EXT_GBP] = { .type = NLA_U32 },
478};
479
480static int vxlan_tun_opt_from_nlattr(const struct nlattr *a,
481 struct sw_flow_match *match, bool is_mask, 490 struct sw_flow_match *match, bool is_mask,
482 bool log) 491 bool log)
483{ 492{
484 struct nlattr *tb[OVS_VXLAN_EXT_MAX+1]; 493 struct nlattr *a;
494 int rem;
485 unsigned long opt_key_offset; 495 unsigned long opt_key_offset;
486 struct vxlan_metadata opts; 496 struct vxlan_metadata opts;
487 int err;
488 497
489 BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts)); 498 BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
490 499
491 err = nla_parse_nested(tb, OVS_VXLAN_EXT_MAX, a, vxlan_opt_policy);
492 if (err < 0)
493 return err;
494
495 memset(&opts, 0, sizeof(opts)); 500 memset(&opts, 0, sizeof(opts));
501 nla_for_each_nested(a, attr, rem) {
502 int type = nla_type(a);
496 503
497 if (tb[OVS_VXLAN_EXT_GBP]) 504 if (type > OVS_VXLAN_EXT_MAX) {
498 opts.gbp = nla_get_u32(tb[OVS_VXLAN_EXT_GBP]); 505 OVS_NLERR(log, "VXLAN extension %d out of range max %d",
506 type, OVS_VXLAN_EXT_MAX);
507 return -EINVAL;
508 }
509
510 if (!check_attr_len(nla_len(a),
511 ovs_vxlan_ext_key_lens[type].len)) {
512 OVS_NLERR(log, "VXLAN extension %d has unexpected len %d expected %d",
513 type, nla_len(a),
514 ovs_vxlan_ext_key_lens[type].len);
515 return -EINVAL;
516 }
517
518 switch (type) {
519 case OVS_VXLAN_EXT_GBP:
520 opts.gbp = nla_get_u32(a);
521 break;
522 default:
523 OVS_NLERR(log, "Unknown VXLAN extension attribute %d",
524 type);
525 return -EINVAL;
526 }
527 }
528 if (rem) {
529 OVS_NLERR(log, "VXLAN extension message has %d unknown bytes.",
530 rem);
531 return -EINVAL;
532 }
499 533
500 if (!is_mask) 534 if (!is_mask)
501 SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false); 535 SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false);
@@ -528,8 +562,8 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
528 return -EINVAL; 562 return -EINVAL;
529 } 563 }
530 564
531 if (ovs_tunnel_key_lens[type].len != nla_len(a) && 565 if (!check_attr_len(nla_len(a),
532 ovs_tunnel_key_lens[type].len != OVS_ATTR_NESTED) { 566 ovs_tunnel_key_lens[type].len)) {
533 OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d", 567 OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d",
534 type, nla_len(a), ovs_tunnel_key_lens[type].len); 568 type, nla_len(a), ovs_tunnel_key_lens[type].len);
535 return -EINVAL; 569 return -EINVAL;
@@ -1052,10 +1086,13 @@ static void nlattr_set(struct nlattr *attr, u8 val,
1052 1086
1053 /* The nlattr stream should already have been validated */ 1087 /* The nlattr stream should already have been validated */
1054 nla_for_each_nested(nla, attr, rem) { 1088 nla_for_each_nested(nla, attr, rem) {
1055 if (tbl && tbl[nla_type(nla)].len == OVS_ATTR_NESTED) 1089 if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) {
1056 nlattr_set(nla, val, tbl[nla_type(nla)].next); 1090 if (tbl[nla_type(nla)].next)
1057 else 1091 tbl = tbl[nla_type(nla)].next;
1092 nlattr_set(nla, val, tbl);
1093 } else {
1058 memset(nla_data(nla), val, nla_len(nla)); 1094 memset(nla_data(nla), val, nla_len(nla));
1095 }
1059 } 1096 }
1060} 1097}
1061 1098
@@ -1922,8 +1959,7 @@ static int validate_set(const struct nlattr *a,
1922 key_len /= 2; 1959 key_len /= 2;
1923 1960
1924 if (key_type > OVS_KEY_ATTR_MAX || 1961 if (key_type > OVS_KEY_ATTR_MAX ||
1925 (ovs_key_lens[key_type].len != key_len && 1962 !check_attr_len(key_len, ovs_key_lens[key_type].len))
1926 ovs_key_lens[key_type].len != OVS_ATTR_NESTED))
1927 return -EINVAL; 1963 return -EINVAL;
1928 1964
1929 if (masked && !validate_masked(nla_data(ovs_key), key_len)) 1965 if (masked && !validate_masked(nla_data(ovs_key), key_len))
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index d22d8e948d0f..f2ea83ba4763 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -57,20 +57,21 @@ static u16 range_n_bytes(const struct sw_flow_key_range *range)
57} 57}
58 58
59void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, 59void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
60 const struct sw_flow_mask *mask) 60 bool full, const struct sw_flow_mask *mask)
61{ 61{
62 const long *m = (const long *)((const u8 *)&mask->key + 62 int start = full ? 0 : mask->range.start;
63 mask->range.start); 63 int len = full ? sizeof *dst : range_n_bytes(&mask->range);
64 const long *s = (const long *)((const u8 *)src + 64 const long *m = (const long *)((const u8 *)&mask->key + start);
65 mask->range.start); 65 const long *s = (const long *)((const u8 *)src + start);
66 long *d = (long *)((u8 *)dst + mask->range.start); 66 long *d = (long *)((u8 *)dst + start);
67 int i; 67 int i;
68 68
69 /* The memory outside of the 'mask->range' are not set since 69 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
70 * further operations on 'dst' only uses contents within 70 * if 'full' is false the memory outside of the 'mask->range' is left
71 * 'mask->range'. 71 * uninitialized. This can be used as an optimization when further
72 * operations on 'dst' only use contents within 'mask->range'.
72 */ 73 */
73 for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long)) 74 for (i = 0; i < len; i += sizeof(long))
74 *d++ = *s++ & *m++; 75 *d++ = *s++ & *m++;
75} 76}
76 77
@@ -475,7 +476,7 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
475 u32 hash; 476 u32 hash;
476 struct sw_flow_key masked_key; 477 struct sw_flow_key masked_key;
477 478
478 ovs_flow_mask_key(&masked_key, unmasked, mask); 479 ovs_flow_mask_key(&masked_key, unmasked, false, mask);
479 hash = flow_hash(&masked_key, &mask->range); 480 hash = flow_hash(&masked_key, &mask->range);
480 head = find_bucket(ti, hash); 481 head = find_bucket(ti, hash);
481 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) { 482 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 616eda10d955..2dd9900f533d 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -86,5 +86,5 @@ struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *,
86bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *); 86bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *);
87 87
88void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, 88void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
89 const struct sw_flow_mask *mask); 89 bool full, const struct sw_flow_mask *mask);
90#endif /* flow_table.h */ 90#endif /* flow_table.h */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 7b8e39a22387..aa4b15c35884 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -230,6 +230,8 @@ struct packet_skb_cb {
230 } sa; 230 } sa;
231}; 231};
232 232
233#define vio_le() virtio_legacy_is_little_endian()
234
233#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 235#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
234 236
235#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) 237#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
@@ -2680,15 +2682,15 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2680 goto out_unlock; 2682 goto out_unlock;
2681 2683
2682 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 2684 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2683 (__virtio16_to_cpu(false, vnet_hdr.csum_start) + 2685 (__virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) +
2684 __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2 > 2686 __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2 >
2685 __virtio16_to_cpu(false, vnet_hdr.hdr_len))) 2687 __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len)))
2686 vnet_hdr.hdr_len = __cpu_to_virtio16(false, 2688 vnet_hdr.hdr_len = __cpu_to_virtio16(vio_le(),
2687 __virtio16_to_cpu(false, vnet_hdr.csum_start) + 2689 __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) +
2688 __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2); 2690 __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2);
2689 2691
2690 err = -EINVAL; 2692 err = -EINVAL;
2691 if (__virtio16_to_cpu(false, vnet_hdr.hdr_len) > len) 2693 if (__virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len) > len)
2692 goto out_unlock; 2694 goto out_unlock;
2693 2695
2694 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { 2696 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
@@ -2731,7 +2733,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2731 hlen = LL_RESERVED_SPACE(dev); 2733 hlen = LL_RESERVED_SPACE(dev);
2732 tlen = dev->needed_tailroom; 2734 tlen = dev->needed_tailroom;
2733 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, 2735 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
2734 __virtio16_to_cpu(false, vnet_hdr.hdr_len), 2736 __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len),
2735 msg->msg_flags & MSG_DONTWAIT, &err); 2737 msg->msg_flags & MSG_DONTWAIT, &err);
2736 if (skb == NULL) 2738 if (skb == NULL)
2737 goto out_unlock; 2739 goto out_unlock;
@@ -2778,8 +2780,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2778 2780
2779 if (po->has_vnet_hdr) { 2781 if (po->has_vnet_hdr) {
2780 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 2782 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2781 u16 s = __virtio16_to_cpu(false, vnet_hdr.csum_start); 2783 u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start);
2782 u16 o = __virtio16_to_cpu(false, vnet_hdr.csum_offset); 2784 u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset);
2783 if (!skb_partial_csum_set(skb, s, o)) { 2785 if (!skb_partial_csum_set(skb, s, o)) {
2784 err = -EINVAL; 2786 err = -EINVAL;
2785 goto out_free; 2787 goto out_free;
@@ -2787,7 +2789,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2787 } 2789 }
2788 2790
2789 skb_shinfo(skb)->gso_size = 2791 skb_shinfo(skb)->gso_size =
2790 __virtio16_to_cpu(false, vnet_hdr.gso_size); 2792 __virtio16_to_cpu(vio_le(), vnet_hdr.gso_size);
2791 skb_shinfo(skb)->gso_type = gso_type; 2793 skb_shinfo(skb)->gso_type = gso_type;
2792 2794
2793 /* Header must be checked, and gso_segs computed. */ 2795 /* Header must be checked, and gso_segs computed. */
@@ -3161,9 +3163,9 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3161 3163
3162 /* This is a hint as to how much should be linear. */ 3164 /* This is a hint as to how much should be linear. */
3163 vnet_hdr.hdr_len = 3165 vnet_hdr.hdr_len =
3164 __cpu_to_virtio16(false, skb_headlen(skb)); 3166 __cpu_to_virtio16(vio_le(), skb_headlen(skb));
3165 vnet_hdr.gso_size = 3167 vnet_hdr.gso_size =
3166 __cpu_to_virtio16(false, sinfo->gso_size); 3168 __cpu_to_virtio16(vio_le(), sinfo->gso_size);
3167 if (sinfo->gso_type & SKB_GSO_TCPV4) 3169 if (sinfo->gso_type & SKB_GSO_TCPV4)
3168 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 3170 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
3169 else if (sinfo->gso_type & SKB_GSO_TCPV6) 3171 else if (sinfo->gso_type & SKB_GSO_TCPV6)
@@ -3181,9 +3183,9 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3181 3183
3182 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3184 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3183 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 3185 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
3184 vnet_hdr.csum_start = __cpu_to_virtio16(false, 3186 vnet_hdr.csum_start = __cpu_to_virtio16(vio_le(),
3185 skb_checksum_start_offset(skb)); 3187 skb_checksum_start_offset(skb));
3186 vnet_hdr.csum_offset = __cpu_to_virtio16(false, 3188 vnet_hdr.csum_offset = __cpu_to_virtio16(vio_le(),
3187 skb->csum_offset); 3189 skb->csum_offset);
3188 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 3190 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3189 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID; 3191 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 715e01e5910a..f23a3b68bba6 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -33,7 +33,6 @@
33 33
34struct fw_head { 34struct fw_head {
35 u32 mask; 35 u32 mask;
36 bool mask_set;
37 struct fw_filter __rcu *ht[HTSIZE]; 36 struct fw_filter __rcu *ht[HTSIZE];
38 struct rcu_head rcu; 37 struct rcu_head rcu;
39}; 38};
@@ -84,7 +83,7 @@ static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp,
84 } 83 }
85 } 84 }
86 } else { 85 } else {
87 /* old method */ 86 /* Old method: classify the packet using its skb mark. */
88 if (id && (TC_H_MAJ(id) == 0 || 87 if (id && (TC_H_MAJ(id) == 0 ||
89 !(TC_H_MAJ(id ^ tp->q->handle)))) { 88 !(TC_H_MAJ(id ^ tp->q->handle)))) {
90 res->classid = id; 89 res->classid = id;
@@ -114,14 +113,9 @@ static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
114 113
115static int fw_init(struct tcf_proto *tp) 114static int fw_init(struct tcf_proto *tp)
116{ 115{
117 struct fw_head *head; 116 /* We don't allocate fw_head here, because in the old method
118 117 * we don't need it at all.
119 head = kzalloc(sizeof(struct fw_head), GFP_KERNEL); 118 */
120 if (head == NULL)
121 return -ENOBUFS;
122
123 head->mask_set = false;
124 rcu_assign_pointer(tp->root, head);
125 return 0; 119 return 0;
126} 120}
127 121
@@ -252,7 +246,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
252 int err; 246 int err;
253 247
254 if (!opt) 248 if (!opt)
255 return handle ? -EINVAL : 0; 249 return handle ? -EINVAL : 0; /* Succeed if it is old method. */
256 250
257 err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy); 251 err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy);
258 if (err < 0) 252 if (err < 0)
@@ -302,11 +296,17 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
302 if (!handle) 296 if (!handle)
303 return -EINVAL; 297 return -EINVAL;
304 298
305 if (!head->mask_set) { 299 if (!head) {
306 head->mask = 0xFFFFFFFF; 300 u32 mask = 0xFFFFFFFF;
307 if (tb[TCA_FW_MASK]) 301 if (tb[TCA_FW_MASK])
308 head->mask = nla_get_u32(tb[TCA_FW_MASK]); 302 mask = nla_get_u32(tb[TCA_FW_MASK]);
309 head->mask_set = true; 303
304 head = kzalloc(sizeof(*head), GFP_KERNEL);
305 if (!head)
306 return -ENOBUFS;
307 head->mask = mask;
308
309 rcu_assign_pointer(tp->root, head);
310 } 310 }
311 311
312 f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); 312 f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index b7143337e4fa..3d9ea9a48289 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1186,7 +1186,7 @@ static void sctp_v4_del_protocol(void)
1186 unregister_inetaddr_notifier(&sctp_inetaddr_notifier); 1186 unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
1187} 1187}
1188 1188
1189static int __net_init sctp_net_init(struct net *net) 1189static int __net_init sctp_defaults_init(struct net *net)
1190{ 1190{
1191 int status; 1191 int status;
1192 1192
@@ -1279,12 +1279,6 @@ static int __net_init sctp_net_init(struct net *net)
1279 1279
1280 sctp_dbg_objcnt_init(net); 1280 sctp_dbg_objcnt_init(net);
1281 1281
1282 /* Initialize the control inode/socket for handling OOTB packets. */
1283 if ((status = sctp_ctl_sock_init(net))) {
1284 pr_err("Failed to initialize the SCTP control sock\n");
1285 goto err_ctl_sock_init;
1286 }
1287
1288 /* Initialize the local address list. */ 1282 /* Initialize the local address list. */
1289 INIT_LIST_HEAD(&net->sctp.local_addr_list); 1283 INIT_LIST_HEAD(&net->sctp.local_addr_list);
1290 spin_lock_init(&net->sctp.local_addr_lock); 1284 spin_lock_init(&net->sctp.local_addr_lock);
@@ -1300,9 +1294,6 @@ static int __net_init sctp_net_init(struct net *net)
1300 1294
1301 return 0; 1295 return 0;
1302 1296
1303err_ctl_sock_init:
1304 sctp_dbg_objcnt_exit(net);
1305 sctp_proc_exit(net);
1306err_init_proc: 1297err_init_proc:
1307 cleanup_sctp_mibs(net); 1298 cleanup_sctp_mibs(net);
1308err_init_mibs: 1299err_init_mibs:
@@ -1311,15 +1302,12 @@ err_sysctl_register:
1311 return status; 1302 return status;
1312} 1303}
1313 1304
1314static void __net_exit sctp_net_exit(struct net *net) 1305static void __net_exit sctp_defaults_exit(struct net *net)
1315{ 1306{
1316 /* Free the local address list */ 1307 /* Free the local address list */
1317 sctp_free_addr_wq(net); 1308 sctp_free_addr_wq(net);
1318 sctp_free_local_addr_list(net); 1309 sctp_free_local_addr_list(net);
1319 1310
1320 /* Free the control endpoint. */
1321 inet_ctl_sock_destroy(net->sctp.ctl_sock);
1322
1323 sctp_dbg_objcnt_exit(net); 1311 sctp_dbg_objcnt_exit(net);
1324 1312
1325 sctp_proc_exit(net); 1313 sctp_proc_exit(net);
@@ -1327,9 +1315,32 @@ static void __net_exit sctp_net_exit(struct net *net)
1327 sctp_sysctl_net_unregister(net); 1315 sctp_sysctl_net_unregister(net);
1328} 1316}
1329 1317
1330static struct pernet_operations sctp_net_ops = { 1318static struct pernet_operations sctp_defaults_ops = {
1331 .init = sctp_net_init, 1319 .init = sctp_defaults_init,
1332 .exit = sctp_net_exit, 1320 .exit = sctp_defaults_exit,
1321};
1322
1323static int __net_init sctp_ctrlsock_init(struct net *net)
1324{
1325 int status;
1326
1327 /* Initialize the control inode/socket for handling OOTB packets. */
1328 status = sctp_ctl_sock_init(net);
1329 if (status)
1330 pr_err("Failed to initialize the SCTP control sock\n");
1331
1332 return status;
1333}
1334
1335static void __net_init sctp_ctrlsock_exit(struct net *net)
1336{
1337 /* Free the control endpoint. */
1338 inet_ctl_sock_destroy(net->sctp.ctl_sock);
1339}
1340
1341static struct pernet_operations sctp_ctrlsock_ops = {
1342 .init = sctp_ctrlsock_init,
1343 .exit = sctp_ctrlsock_exit,
1333}; 1344};
1334 1345
1335/* Initialize the universe into something sensible. */ 1346/* Initialize the universe into something sensible. */
@@ -1462,8 +1473,11 @@ static __init int sctp_init(void)
1462 sctp_v4_pf_init(); 1473 sctp_v4_pf_init();
1463 sctp_v6_pf_init(); 1474 sctp_v6_pf_init();
1464 1475
1465 status = sctp_v4_protosw_init(); 1476 status = register_pernet_subsys(&sctp_defaults_ops);
1477 if (status)
1478 goto err_register_defaults;
1466 1479
1480 status = sctp_v4_protosw_init();
1467 if (status) 1481 if (status)
1468 goto err_protosw_init; 1482 goto err_protosw_init;
1469 1483
@@ -1471,9 +1485,9 @@ static __init int sctp_init(void)
1471 if (status) 1485 if (status)
1472 goto err_v6_protosw_init; 1486 goto err_v6_protosw_init;
1473 1487
1474 status = register_pernet_subsys(&sctp_net_ops); 1488 status = register_pernet_subsys(&sctp_ctrlsock_ops);
1475 if (status) 1489 if (status)
1476 goto err_register_pernet_subsys; 1490 goto err_register_ctrlsock;
1477 1491
1478 status = sctp_v4_add_protocol(); 1492 status = sctp_v4_add_protocol();
1479 if (status) 1493 if (status)
@@ -1489,12 +1503,14 @@ out:
1489err_v6_add_protocol: 1503err_v6_add_protocol:
1490 sctp_v4_del_protocol(); 1504 sctp_v4_del_protocol();
1491err_add_protocol: 1505err_add_protocol:
1492 unregister_pernet_subsys(&sctp_net_ops); 1506 unregister_pernet_subsys(&sctp_ctrlsock_ops);
1493err_register_pernet_subsys: 1507err_register_ctrlsock:
1494 sctp_v6_protosw_exit(); 1508 sctp_v6_protosw_exit();
1495err_v6_protosw_init: 1509err_v6_protosw_init:
1496 sctp_v4_protosw_exit(); 1510 sctp_v4_protosw_exit();
1497err_protosw_init: 1511err_protosw_init:
1512 unregister_pernet_subsys(&sctp_defaults_ops);
1513err_register_defaults:
1498 sctp_v4_pf_exit(); 1514 sctp_v4_pf_exit();
1499 sctp_v6_pf_exit(); 1515 sctp_v6_pf_exit();
1500 sctp_sysctl_unregister(); 1516 sctp_sysctl_unregister();
@@ -1527,12 +1543,14 @@ static __exit void sctp_exit(void)
1527 sctp_v6_del_protocol(); 1543 sctp_v6_del_protocol();
1528 sctp_v4_del_protocol(); 1544 sctp_v4_del_protocol();
1529 1545
1530 unregister_pernet_subsys(&sctp_net_ops); 1546 unregister_pernet_subsys(&sctp_ctrlsock_ops);
1531 1547
1532 /* Free protosw registrations */ 1548 /* Free protosw registrations */
1533 sctp_v6_protosw_exit(); 1549 sctp_v6_protosw_exit();
1534 sctp_v4_protosw_exit(); 1550 sctp_v4_protosw_exit();
1535 1551
1552 unregister_pernet_subsys(&sctp_defaults_ops);
1553
1536 /* Unregister with socket layer. */ 1554 /* Unregister with socket layer. */
1537 sctp_v6_pf_exit(); 1555 sctp_v6_pf_exit();
1538 sctp_v4_pf_exit(); 1556 sctp_v4_pf_exit();
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index b140c092d226..f14f24ee9983 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -297,7 +297,7 @@ static int rpc_complete_task(struct rpc_task *task)
297 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 297 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
298 ret = atomic_dec_and_test(&task->tk_count); 298 ret = atomic_dec_and_test(&task->tk_count);
299 if (waitqueue_active(wq)) 299 if (waitqueue_active(wq))
300 __wake_up_locked_key(wq, TASK_NORMAL, 1, &k); 300 __wake_up_locked_key(wq, TASK_NORMAL, &k);
301 spin_unlock_irqrestore(&wq->lock, flags); 301 spin_unlock_irqrestore(&wq->lock, flags);
302 return ret; 302 return ret;
303} 303}
@@ -1092,14 +1092,10 @@ void
1092rpc_destroy_mempool(void) 1092rpc_destroy_mempool(void)
1093{ 1093{
1094 rpciod_stop(); 1094 rpciod_stop();
1095 if (rpc_buffer_mempool) 1095 mempool_destroy(rpc_buffer_mempool);
1096 mempool_destroy(rpc_buffer_mempool); 1096 mempool_destroy(rpc_task_mempool);
1097 if (rpc_task_mempool) 1097 kmem_cache_destroy(rpc_task_slabp);
1098 mempool_destroy(rpc_task_mempool); 1098 kmem_cache_destroy(rpc_buffer_slabp);
1099 if (rpc_task_slabp)
1100 kmem_cache_destroy(rpc_task_slabp);
1101 if (rpc_buffer_slabp)
1102 kmem_cache_destroy(rpc_buffer_slabp);
1103 rpc_destroy_wait_queue(&delay_queue); 1099 rpc_destroy_wait_queue(&delay_queue);
1104} 1100}
1105 1101
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index ab5dd621ae0c..2e98f4a243e5 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -614,6 +614,7 @@ static void xprt_autoclose(struct work_struct *work)
614 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 614 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
615 xprt->ops->close(xprt); 615 xprt->ops->close(xprt);
616 xprt_release_write(xprt, NULL); 616 xprt_release_write(xprt, NULL);
617 wake_up_bit(&xprt->state, XPRT_LOCKED);
617} 618}
618 619
619/** 620/**
@@ -723,6 +724,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
723 xprt->ops->release_xprt(xprt, NULL); 724 xprt->ops->release_xprt(xprt, NULL);
724out: 725out:
725 spin_unlock_bh(&xprt->transport_lock); 726 spin_unlock_bh(&xprt->transport_lock);
727 wake_up_bit(&xprt->state, XPRT_LOCKED);
726} 728}
727 729
728/** 730/**
@@ -1394,6 +1396,10 @@ out:
1394static void xprt_destroy(struct rpc_xprt *xprt) 1396static void xprt_destroy(struct rpc_xprt *xprt)
1395{ 1397{
1396 dprintk("RPC: destroying transport %p\n", xprt); 1398 dprintk("RPC: destroying transport %p\n", xprt);
1399
1400 /* Exclude transport connect/disconnect handlers */
1401 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
1402
1397 del_timer_sync(&xprt->timer); 1403 del_timer_sync(&xprt->timer);
1398 1404
1399 rpc_xprt_debugfs_unregister(xprt); 1405 rpc_xprt_debugfs_unregister(xprt);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 7be90bc1a7c2..1a85e0ed0b48 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -777,7 +777,6 @@ static void xs_sock_mark_closed(struct rpc_xprt *xprt)
777 xs_sock_reset_connection_flags(xprt); 777 xs_sock_reset_connection_flags(xprt);
778 /* Mark transport as closed and wake up all pending tasks */ 778 /* Mark transport as closed and wake up all pending tasks */
779 xprt_disconnect_done(xprt); 779 xprt_disconnect_done(xprt);
780 xprt_force_disconnect(xprt);
781} 780}
782 781
783/** 782/**
@@ -881,8 +880,11 @@ static void xs_xprt_free(struct rpc_xprt *xprt)
881 */ 880 */
882static void xs_destroy(struct rpc_xprt *xprt) 881static void xs_destroy(struct rpc_xprt *xprt)
883{ 882{
883 struct sock_xprt *transport = container_of(xprt,
884 struct sock_xprt, xprt);
884 dprintk("RPC: xs_destroy xprt %p\n", xprt); 885 dprintk("RPC: xs_destroy xprt %p\n", xprt);
885 886
887 cancel_delayed_work_sync(&transport->connect_worker);
886 xs_close(xprt); 888 xs_close(xprt);
887 xs_xprt_free(xprt); 889 xs_xprt_free(xprt);
888 module_put(THIS_MODULE); 890 module_put(THIS_MODULE);
@@ -1435,6 +1437,7 @@ out:
1435static void xs_tcp_state_change(struct sock *sk) 1437static void xs_tcp_state_change(struct sock *sk)
1436{ 1438{
1437 struct rpc_xprt *xprt; 1439 struct rpc_xprt *xprt;
1440 struct sock_xprt *transport;
1438 1441
1439 read_lock_bh(&sk->sk_callback_lock); 1442 read_lock_bh(&sk->sk_callback_lock);
1440 if (!(xprt = xprt_from_sock(sk))) 1443 if (!(xprt = xprt_from_sock(sk)))
@@ -1446,13 +1449,12 @@ static void xs_tcp_state_change(struct sock *sk)
1446 sock_flag(sk, SOCK_ZAPPED), 1449 sock_flag(sk, SOCK_ZAPPED),
1447 sk->sk_shutdown); 1450 sk->sk_shutdown);
1448 1451
1452 transport = container_of(xprt, struct sock_xprt, xprt);
1449 trace_rpc_socket_state_change(xprt, sk->sk_socket); 1453 trace_rpc_socket_state_change(xprt, sk->sk_socket);
1450 switch (sk->sk_state) { 1454 switch (sk->sk_state) {
1451 case TCP_ESTABLISHED: 1455 case TCP_ESTABLISHED:
1452 spin_lock(&xprt->transport_lock); 1456 spin_lock(&xprt->transport_lock);
1453 if (!xprt_test_and_set_connected(xprt)) { 1457 if (!xprt_test_and_set_connected(xprt)) {
1454 struct sock_xprt *transport = container_of(xprt,
1455 struct sock_xprt, xprt);
1456 1458
1457 /* Reset TCP record info */ 1459 /* Reset TCP record info */
1458 transport->tcp_offset = 0; 1460 transport->tcp_offset = 0;
@@ -1461,6 +1463,8 @@ static void xs_tcp_state_change(struct sock *sk)
1461 transport->tcp_flags = 1463 transport->tcp_flags =
1462 TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID; 1464 TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
1463 xprt->connect_cookie++; 1465 xprt->connect_cookie++;
1466 clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
1467 xprt_clear_connecting(xprt);
1464 1468
1465 xprt_wake_pending_tasks(xprt, -EAGAIN); 1469 xprt_wake_pending_tasks(xprt, -EAGAIN);
1466 } 1470 }
@@ -1496,6 +1500,9 @@ static void xs_tcp_state_change(struct sock *sk)
1496 smp_mb__after_atomic(); 1500 smp_mb__after_atomic();
1497 break; 1501 break;
1498 case TCP_CLOSE: 1502 case TCP_CLOSE:
1503 if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
1504 &transport->sock_state))
1505 xprt_clear_connecting(xprt);
1499 xs_sock_mark_closed(xprt); 1506 xs_sock_mark_closed(xprt);
1500 } 1507 }
1501 out: 1508 out:
@@ -2179,6 +2186,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2179 /* Tell the socket layer to start connecting... */ 2186 /* Tell the socket layer to start connecting... */
2180 xprt->stat.connect_count++; 2187 xprt->stat.connect_count++;
2181 xprt->stat.connect_start = jiffies; 2188 xprt->stat.connect_start = jiffies;
2189 set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
2182 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); 2190 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
2183 switch (ret) { 2191 switch (ret) {
2184 case 0: 2192 case 0:
@@ -2240,7 +2248,6 @@ static void xs_tcp_setup_socket(struct work_struct *work)
2240 case -EINPROGRESS: 2248 case -EINPROGRESS:
2241 case -EALREADY: 2249 case -EALREADY:
2242 xprt_unlock_connect(xprt, transport); 2250 xprt_unlock_connect(xprt, transport);
2243 xprt_clear_connecting(xprt);
2244 return; 2251 return;
2245 case -EINVAL: 2252 case -EINVAL:
2246 /* Happens, for instance, if the user specified a link 2253 /* Happens, for instance, if the user specified a link
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 562c926a51cc..c5ac436235e0 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -539,6 +539,7 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
539 *err = -TIPC_ERR_NO_NAME; 539 *err = -TIPC_ERR_NO_NAME;
540 if (skb_linearize(skb)) 540 if (skb_linearize(skb))
541 return false; 541 return false;
542 msg = buf_msg(skb);
542 if (msg_reroute_cnt(msg)) 543 if (msg_reroute_cnt(msg))
543 return false; 544 return false;
544 dnode = addr_domain(net, msg_lookup_scope(msg)); 545 dnode = addr_domain(net, msg_lookup_scope(msg));
diff --git a/scripts/extract-cert.c b/scripts/extract-cert.c
index fd0db015c65c..6ce5945a0b89 100644
--- a/scripts/extract-cert.c
+++ b/scripts/extract-cert.c
@@ -1,15 +1,15 @@
1/* Extract X.509 certificate in DER form from PKCS#11 or PEM. 1/* Extract X.509 certificate in DER form from PKCS#11 or PEM.
2 * 2 *
3 * Copyright © 2014 Red Hat, Inc. All Rights Reserved. 3 * Copyright © 2014-2015 Red Hat, Inc. All Rights Reserved.
4 * Copyright © 2015 Intel Corporation. 4 * Copyright © 2015 Intel Corporation.
5 * 5 *
6 * Authors: David Howells <dhowells@redhat.com> 6 * Authors: David Howells <dhowells@redhat.com>
7 * David Woodhouse <dwmw2@infradead.org> 7 * David Woodhouse <dwmw2@infradead.org>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public Licence 10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 11 * as published by the Free Software Foundation; either version 2.1
12 * 2 of the Licence, or (at your option) any later version. 12 * of the licence, or (at your option) any later version.
13 */ 13 */
14#define _GNU_SOURCE 14#define _GNU_SOURCE
15#include <stdio.h> 15#include <stdio.h>
@@ -86,7 +86,7 @@ static void write_cert(X509 *x509)
86 ERR(!wb, "%s", cert_dst); 86 ERR(!wb, "%s", cert_dst);
87 } 87 }
88 X509_NAME_oneline(X509_get_subject_name(x509), buf, sizeof(buf)); 88 X509_NAME_oneline(X509_get_subject_name(x509), buf, sizeof(buf));
89 ERR(!i2d_X509_bio(wb, x509), cert_dst); 89 ERR(!i2d_X509_bio(wb, x509), "%s", cert_dst);
90 if (kbuild_verbose) 90 if (kbuild_verbose)
91 fprintf(stderr, "Extracted cert: %s\n", buf); 91 fprintf(stderr, "Extracted cert: %s\n", buf);
92} 92}
diff --git a/scripts/sign-file.c b/scripts/sign-file.c
index 058bba3103e2..c3899ca4811c 100755
--- a/scripts/sign-file.c
+++ b/scripts/sign-file.c
@@ -1,12 +1,15 @@
1/* Sign a module file using the given key. 1/* Sign a module file using the given key.
2 * 2 *
3 * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved. 3 * Copyright © 2014-2015 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Copyright © 2015 Intel Corporation.
5 *
6 * Authors: David Howells <dhowells@redhat.com>
7 * David Woodhouse <dwmw2@infradead.org>
5 * 8 *
6 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence 10 * modify it under the terms of the GNU Lesser General Public License
8 * as published by the Free Software Foundation; either version 11 * as published by the Free Software Foundation; either version 2.1
9 * 2 of the Licence, or (at your option) any later version. 12 * of the licence, or (at your option) any later version.
10 */ 13 */
11#define _GNU_SOURCE 14#define _GNU_SOURCE
12#include <stdio.h> 15#include <stdio.h>
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 73455089feef..03c1652c9a1f 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -401,7 +401,7 @@ static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
401 bool match = false; 401 bool match = false;
402 402
403 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && 403 RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
404 lockdep_is_held(&devcgroup_mutex), 404 !lockdep_is_held(&devcgroup_mutex),
405 "device_cgroup:verify_new_ex called without proper synchronization"); 405 "device_cgroup:verify_new_ex called without proper synchronization");
406 406
407 if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) { 407 if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
index 885683a3b0bd..e0406211716b 100644
--- a/sound/arm/Kconfig
+++ b/sound/arm/Kconfig
@@ -9,6 +9,14 @@ menuconfig SND_ARM
9 Drivers that are implemented on ASoC can be found in 9 Drivers that are implemented on ASoC can be found in
10 "ALSA for SoC audio support" section. 10 "ALSA for SoC audio support" section.
11 11
12config SND_PXA2XX_LIB
13 tristate
14 select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
15 select SND_DMAENGINE_PCM
16
17config SND_PXA2XX_LIB_AC97
18 bool
19
12if SND_ARM 20if SND_ARM
13 21
14config SND_ARMAACI 22config SND_ARMAACI
@@ -21,13 +29,6 @@ config SND_PXA2XX_PCM
21 tristate 29 tristate
22 select SND_PCM 30 select SND_PCM
23 31
24config SND_PXA2XX_LIB
25 tristate
26 select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
27
28config SND_PXA2XX_LIB_AC97
29 bool
30
31config SND_PXA2XX_AC97 32config SND_PXA2XX_AC97
32 tristate "AC97 driver for the Intel PXA2xx chip" 33 tristate "AC97 driver for the Intel PXA2xx chip"
33 depends on ARCH_PXA 34 depends on ARCH_PXA
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 477742cb70a2..58c0aad37284 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -73,6 +73,7 @@ struct hda_tegra {
73 struct clk *hda2codec_2x_clk; 73 struct clk *hda2codec_2x_clk;
74 struct clk *hda2hdmi_clk; 74 struct clk *hda2hdmi_clk;
75 void __iomem *regs; 75 void __iomem *regs;
76 struct work_struct probe_work;
76}; 77};
77 78
78#ifdef CONFIG_PM 79#ifdef CONFIG_PM
@@ -294,7 +295,9 @@ static int hda_tegra_dev_disconnect(struct snd_device *device)
294static int hda_tegra_dev_free(struct snd_device *device) 295static int hda_tegra_dev_free(struct snd_device *device)
295{ 296{
296 struct azx *chip = device->device_data; 297 struct azx *chip = device->device_data;
298 struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
297 299
300 cancel_work_sync(&hda->probe_work);
298 if (azx_bus(chip)->chip_init) { 301 if (azx_bus(chip)->chip_init) {
299 azx_stop_all_streams(chip); 302 azx_stop_all_streams(chip);
300 azx_stop_chip(chip); 303 azx_stop_chip(chip);
@@ -426,6 +429,9 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev)
426/* 429/*
427 * constructor 430 * constructor
428 */ 431 */
432
433static void hda_tegra_probe_work(struct work_struct *work);
434
429static int hda_tegra_create(struct snd_card *card, 435static int hda_tegra_create(struct snd_card *card,
430 unsigned int driver_caps, 436 unsigned int driver_caps,
431 struct hda_tegra *hda) 437 struct hda_tegra *hda)
@@ -452,6 +458,8 @@ static int hda_tegra_create(struct snd_card *card,
452 chip->single_cmd = false; 458 chip->single_cmd = false;
453 chip->snoop = true; 459 chip->snoop = true;
454 460
461 INIT_WORK(&hda->probe_work, hda_tegra_probe_work);
462
455 err = azx_bus_init(chip, NULL, &hda_tegra_io_ops); 463 err = azx_bus_init(chip, NULL, &hda_tegra_io_ops);
456 if (err < 0) 464 if (err < 0)
457 return err; 465 return err;
@@ -499,6 +507,21 @@ static int hda_tegra_probe(struct platform_device *pdev)
499 card->private_data = chip; 507 card->private_data = chip;
500 508
501 dev_set_drvdata(&pdev->dev, card); 509 dev_set_drvdata(&pdev->dev, card);
510 schedule_work(&hda->probe_work);
511
512 return 0;
513
514out_free:
515 snd_card_free(card);
516 return err;
517}
518
519static void hda_tegra_probe_work(struct work_struct *work)
520{
521 struct hda_tegra *hda = container_of(work, struct hda_tegra, probe_work);
522 struct azx *chip = &hda->chip;
523 struct platform_device *pdev = to_platform_device(hda->dev);
524 int err;
502 525
503 err = hda_tegra_first_init(chip, pdev); 526 err = hda_tegra_first_init(chip, pdev);
504 if (err < 0) 527 if (err < 0)
@@ -520,11 +543,8 @@ static int hda_tegra_probe(struct platform_device *pdev)
520 chip->running = 1; 543 chip->running = 1;
521 snd_hda_set_power_save(&chip->bus, power_save * 1000); 544 snd_hda_set_power_save(&chip->bus, power_save * 1000);
522 545
523 return 0; 546 out_free:
524 547 return; /* no error return from async probe */
525out_free:
526 snd_card_free(card);
527 return err;
528} 548}
529 549
530static int hda_tegra_remove(struct platform_device *pdev) 550static int hda_tegra_remove(struct platform_device *pdev)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a75b5611d1e4..afec6dc9f91f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4188,6 +4188,24 @@ static void alc_fixup_disable_aamix(struct hda_codec *codec,
4188 } 4188 }
4189} 4189}
4190 4190
4191/* fixup for Thinkpad docks: add dock pins, avoid HP parser fixup */
4192static void alc_fixup_tpt440_dock(struct hda_codec *codec,
4193 const struct hda_fixup *fix, int action)
4194{
4195 static const struct hda_pintbl pincfgs[] = {
4196 { 0x16, 0x21211010 }, /* dock headphone */
4197 { 0x19, 0x21a11010 }, /* dock mic */
4198 { }
4199 };
4200 struct alc_spec *spec = codec->spec;
4201
4202 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
4203 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
4204 codec->power_save_node = 0; /* avoid click noises */
4205 snd_hda_apply_pincfgs(codec, pincfgs);
4206 }
4207}
4208
4191static void alc_shutup_dell_xps13(struct hda_codec *codec) 4209static void alc_shutup_dell_xps13(struct hda_codec *codec)
4192{ 4210{
4193 struct alc_spec *spec = codec->spec; 4211 struct alc_spec *spec = codec->spec;
@@ -4562,7 +4580,6 @@ enum {
4562 ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC, 4580 ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC,
4563 ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, 4581 ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
4564 ALC292_FIXUP_TPT440_DOCK, 4582 ALC292_FIXUP_TPT440_DOCK,
4565 ALC292_FIXUP_TPT440_DOCK2,
4566 ALC283_FIXUP_BXBT2807_MIC, 4583 ALC283_FIXUP_BXBT2807_MIC,
4567 ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED, 4584 ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
4568 ALC282_FIXUP_ASPIRE_V5_PINS, 4585 ALC282_FIXUP_ASPIRE_V5_PINS,
@@ -5029,17 +5046,7 @@ static const struct hda_fixup alc269_fixups[] = {
5029 }, 5046 },
5030 [ALC292_FIXUP_TPT440_DOCK] = { 5047 [ALC292_FIXUP_TPT440_DOCK] = {
5031 .type = HDA_FIXUP_FUNC, 5048 .type = HDA_FIXUP_FUNC,
5032 .v.func = alc269_fixup_pincfg_no_hp_to_lineout, 5049 .v.func = alc_fixup_tpt440_dock,
5033 .chained = true,
5034 .chain_id = ALC292_FIXUP_TPT440_DOCK2
5035 },
5036 [ALC292_FIXUP_TPT440_DOCK2] = {
5037 .type = HDA_FIXUP_PINS,
5038 .v.pins = (const struct hda_pintbl[]) {
5039 { 0x16, 0x21211010 }, /* dock headphone */
5040 { 0x19, 0x21a11010 }, /* dock mic */
5041 { }
5042 },
5043 .chained = true, 5050 .chained = true,
5044 .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST 5051 .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
5045 }, 5052 },
diff --git a/sound/soc/au1x/psc-i2s.c b/sound/soc/au1x/psc-i2s.c
index 38e853add96e..0bf9d62b91a0 100644
--- a/sound/soc/au1x/psc-i2s.c
+++ b/sound/soc/au1x/psc-i2s.c
@@ -296,7 +296,6 @@ static int au1xpsc_i2s_drvprobe(struct platform_device *pdev)
296{ 296{
297 struct resource *iores, *dmares; 297 struct resource *iores, *dmares;
298 unsigned long sel; 298 unsigned long sel;
299 int ret;
300 struct au1xpsc_audio_data *wd; 299 struct au1xpsc_audio_data *wd;
301 300
302 wd = devm_kzalloc(&pdev->dev, sizeof(struct au1xpsc_audio_data), 301 wd = devm_kzalloc(&pdev->dev, sizeof(struct au1xpsc_audio_data),
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 4972bf3efa91..268a28bd1df4 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -732,14 +732,14 @@ static const struct snd_kcontrol_new rt5645_mono_adc_r_mix[] = {
732static const struct snd_kcontrol_new rt5645_dac_l_mix[] = { 732static const struct snd_kcontrol_new rt5645_dac_l_mix[] = {
733 SOC_DAPM_SINGLE("Stereo ADC Switch", RT5645_AD_DA_MIXER, 733 SOC_DAPM_SINGLE("Stereo ADC Switch", RT5645_AD_DA_MIXER,
734 RT5645_M_ADCMIX_L_SFT, 1, 1), 734 RT5645_M_ADCMIX_L_SFT, 1, 1),
735 SOC_DAPM_SINGLE("DAC1 Switch", RT5645_AD_DA_MIXER, 735 SOC_DAPM_SINGLE_AUTODISABLE("DAC1 Switch", RT5645_AD_DA_MIXER,
736 RT5645_M_DAC1_L_SFT, 1, 1), 736 RT5645_M_DAC1_L_SFT, 1, 1),
737}; 737};
738 738
739static const struct snd_kcontrol_new rt5645_dac_r_mix[] = { 739static const struct snd_kcontrol_new rt5645_dac_r_mix[] = {
740 SOC_DAPM_SINGLE("Stereo ADC Switch", RT5645_AD_DA_MIXER, 740 SOC_DAPM_SINGLE("Stereo ADC Switch", RT5645_AD_DA_MIXER,
741 RT5645_M_ADCMIX_R_SFT, 1, 1), 741 RT5645_M_ADCMIX_R_SFT, 1, 1),
742 SOC_DAPM_SINGLE("DAC1 Switch", RT5645_AD_DA_MIXER, 742 SOC_DAPM_SINGLE_AUTODISABLE("DAC1 Switch", RT5645_AD_DA_MIXER,
743 RT5645_M_DAC1_R_SFT, 1, 1), 743 RT5645_M_DAC1_R_SFT, 1, 1),
744}; 744};
745 745
@@ -1381,7 +1381,7 @@ static void hp_amp_power(struct snd_soc_codec *codec, int on)
1381 regmap_write(rt5645->regmap, RT5645_PR_BASE + 1381 regmap_write(rt5645->regmap, RT5645_PR_BASE +
1382 RT5645_MAMP_INT_REG2, 0xfc00); 1382 RT5645_MAMP_INT_REG2, 0xfc00);
1383 snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140); 1383 snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140);
1384 mdelay(5); 1384 msleep(40);
1385 rt5645->hp_on = true; 1385 rt5645->hp_on = true;
1386 } else { 1386 } else {
1387 /* depop parameters */ 1387 /* depop parameters */
@@ -2829,13 +2829,12 @@ static int rt5645_jack_detect(struct snd_soc_codec *codec, int jack_insert)
2829 snd_soc_dapm_sync(dapm); 2829 snd_soc_dapm_sync(dapm);
2830 rt5645->jack_type = SND_JACK_HEADPHONE; 2830 rt5645->jack_type = SND_JACK_HEADPHONE;
2831 } 2831 }
2832
2833 snd_soc_update_bits(codec, RT5645_CHARGE_PUMP, 0x0300, 0x0200);
2834 snd_soc_write(codec, RT5645_DEPOP_M1, 0x001d);
2835 snd_soc_write(codec, RT5645_DEPOP_M1, 0x0001);
2836 } else { /* jack out */ 2832 } else { /* jack out */
2837 rt5645->jack_type = 0; 2833 rt5645->jack_type = 0;
2838 2834
2835 regmap_update_bits(rt5645->regmap, RT5645_HP_VOL,
2836 RT5645_L_MUTE | RT5645_R_MUTE,
2837 RT5645_L_MUTE | RT5645_R_MUTE);
2839 regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2, 2838 regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2,
2840 RT5645_CBJ_MN_JD, RT5645_CBJ_MN_JD); 2839 RT5645_CBJ_MN_JD, RT5645_CBJ_MN_JD);
2841 regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1, 2840 regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1,
@@ -2880,8 +2879,6 @@ int rt5645_set_jack_detect(struct snd_soc_codec *codec,
2880 rt5645->en_button_func = true; 2879 rt5645->en_button_func = true;
2881 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1, 2880 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
2882 RT5645_GP1_PIN_IRQ, RT5645_GP1_PIN_IRQ); 2881 RT5645_GP1_PIN_IRQ, RT5645_GP1_PIN_IRQ);
2883 regmap_update_bits(rt5645->regmap, RT5645_DEPOP_M1,
2884 RT5645_HP_CB_MASK, RT5645_HP_CB_PU);
2885 regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL1, 2882 regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL1,
2886 RT5645_DIG_GATE_CTRL, RT5645_DIG_GATE_CTRL); 2883 RT5645_DIG_GATE_CTRL, RT5645_DIG_GATE_CTRL);
2887 } 2884 }
@@ -3205,6 +3202,13 @@ static const struct dmi_system_id dmi_platform_intel_braswell[] = {
3205 DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), 3202 DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
3206 }, 3203 },
3207 }, 3204 },
3205 {
3206 .ident = "Google Ultima",
3207 .callback = strago_quirk_cb,
3208 .matches = {
3209 DMI_MATCH(DMI_PRODUCT_NAME, "Ultima"),
3210 },
3211 },
3208 { } 3212 { }
3209}; 3213};
3210 3214
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index f2c6ad4b8fde..581ec1502228 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -577,7 +577,6 @@ static int wm0010_boot(struct snd_soc_codec *codec)
577 struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec); 577 struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec);
578 unsigned long flags; 578 unsigned long flags;
579 int ret; 579 int ret;
580 const struct firmware *fw;
581 struct spi_message m; 580 struct spi_message m;
582 struct spi_transfer t; 581 struct spi_transfer t;
583 struct dfw_pllrec pll_rec; 582 struct dfw_pllrec pll_rec;
@@ -623,14 +622,6 @@ static int wm0010_boot(struct snd_soc_codec *codec)
623 wm0010->state = WM0010_OUT_OF_RESET; 622 wm0010->state = WM0010_OUT_OF_RESET;
624 spin_unlock_irqrestore(&wm0010->irq_lock, flags); 623 spin_unlock_irqrestore(&wm0010->irq_lock, flags);
625 624
626 /* First the bootloader */
627 ret = request_firmware(&fw, "wm0010_stage2.bin", codec->dev);
628 if (ret != 0) {
629 dev_err(codec->dev, "Failed to request stage2 loader: %d\n",
630 ret);
631 goto abort;
632 }
633
634 if (!wait_for_completion_timeout(&wm0010->boot_completion, 625 if (!wait_for_completion_timeout(&wm0010->boot_completion,
635 msecs_to_jiffies(20))) 626 msecs_to_jiffies(20)))
636 dev_err(codec->dev, "Failed to get interrupt from DSP\n"); 627 dev_err(codec->dev, "Failed to get interrupt from DSP\n");
@@ -673,7 +664,7 @@ static int wm0010_boot(struct snd_soc_codec *codec)
673 664
674 img_swap = kzalloc(len, GFP_KERNEL | GFP_DMA); 665 img_swap = kzalloc(len, GFP_KERNEL | GFP_DMA);
675 if (!img_swap) 666 if (!img_swap)
676 goto abort; 667 goto abort_out;
677 668
678 /* We need to re-order for 0010 */ 669 /* We need to re-order for 0010 */
679 byte_swap_64((u64 *)&pll_rec, img_swap, len); 670 byte_swap_64((u64 *)&pll_rec, img_swap, len);
@@ -688,16 +679,16 @@ static int wm0010_boot(struct snd_soc_codec *codec)
688 spi_message_add_tail(&t, &m); 679 spi_message_add_tail(&t, &m);
689 680
690 ret = spi_sync(spi, &m); 681 ret = spi_sync(spi, &m);
691 if (ret != 0) { 682 if (ret) {
692 dev_err(codec->dev, "First PLL write failed: %d\n", ret); 683 dev_err(codec->dev, "First PLL write failed: %d\n", ret);
693 goto abort; 684 goto abort_swap;
694 } 685 }
695 686
696 /* Use a second send of the message to get the return status */ 687 /* Use a second send of the message to get the return status */
697 ret = spi_sync(spi, &m); 688 ret = spi_sync(spi, &m);
698 if (ret != 0) { 689 if (ret) {
699 dev_err(codec->dev, "Second PLL write failed: %d\n", ret); 690 dev_err(codec->dev, "Second PLL write failed: %d\n", ret);
700 goto abort; 691 goto abort_swap;
701 } 692 }
702 693
703 p = (u32 *)out; 694 p = (u32 *)out;
@@ -730,6 +721,10 @@ static int wm0010_boot(struct snd_soc_codec *codec)
730 721
731 return 0; 722 return 0;
732 723
724abort_swap:
725 kfree(img_swap);
726abort_out:
727 kfree(out);
733abort: 728abort:
734 /* Put the chip back into reset */ 729 /* Put the chip back into reset */
735 wm0010_halt(codec); 730 wm0010_halt(codec);
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index e3b7d0c57411..dbd88408861a 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -211,28 +211,38 @@ static int wm8960_put_deemph(struct snd_kcontrol *kcontrol,
211 return wm8960_set_deemph(codec); 211 return wm8960_set_deemph(codec);
212} 212}
213 213
214static const DECLARE_TLV_DB_SCALE(adc_tlv, -9700, 50, 0); 214static const DECLARE_TLV_DB_SCALE(adc_tlv, -9750, 50, 1);
215static const DECLARE_TLV_DB_SCALE(dac_tlv, -12700, 50, 1); 215static const DECLARE_TLV_DB_SCALE(inpga_tlv, -1725, 75, 0);
216static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1);
216static const DECLARE_TLV_DB_SCALE(bypass_tlv, -2100, 300, 0); 217static const DECLARE_TLV_DB_SCALE(bypass_tlv, -2100, 300, 0);
217static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1); 218static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
218static const DECLARE_TLV_DB_SCALE(boost_tlv, -1200, 300, 1); 219static const DECLARE_TLV_DB_SCALE(lineinboost_tlv, -1500, 300, 1);
220static const unsigned int micboost_tlv[] = {
221 TLV_DB_RANGE_HEAD(2),
222 0, 1, TLV_DB_SCALE_ITEM(0, 1300, 0),
223 2, 3, TLV_DB_SCALE_ITEM(2000, 900, 0),
224};
219 225
220static const struct snd_kcontrol_new wm8960_snd_controls[] = { 226static const struct snd_kcontrol_new wm8960_snd_controls[] = {
221SOC_DOUBLE_R_TLV("Capture Volume", WM8960_LINVOL, WM8960_RINVOL, 227SOC_DOUBLE_R_TLV("Capture Volume", WM8960_LINVOL, WM8960_RINVOL,
222 0, 63, 0, adc_tlv), 228 0, 63, 0, inpga_tlv),
223SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL, 229SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL,
224 6, 1, 0), 230 6, 1, 0),
225SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL, 231SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL,
226 7, 1, 0), 232 7, 1, 0),
227 233
228SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume", 234SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume",
229 WM8960_INBMIX1, 4, 7, 0, boost_tlv), 235 WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv),
230SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT2 Volume", 236SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT2 Volume",
231 WM8960_INBMIX1, 1, 7, 0, boost_tlv), 237 WM8960_INBMIX1, 1, 7, 0, lineinboost_tlv),
232SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT3 Volume", 238SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT3 Volume",
233 WM8960_INBMIX2, 4, 7, 0, boost_tlv), 239 WM8960_INBMIX2, 4, 7, 0, lineinboost_tlv),
234SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT2 Volume", 240SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT2 Volume",
235 WM8960_INBMIX2, 1, 7, 0, boost_tlv), 241 WM8960_INBMIX2, 1, 7, 0, lineinboost_tlv),
242SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT1 Volume",
243 WM8960_RINPATH, 4, 3, 0, micboost_tlv),
244SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT1 Volume",
245 WM8960_LINPATH, 4, 3, 0, micboost_tlv),
236 246
237SOC_DOUBLE_R_TLV("Playback Volume", WM8960_LDAC, WM8960_RDAC, 247SOC_DOUBLE_R_TLV("Playback Volume", WM8960_LDAC, WM8960_RDAC,
238 0, 255, 0, dac_tlv), 248 0, 255, 0, dac_tlv),
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index b4eb975da981..293e47a6ff59 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2944,7 +2944,8 @@ static int wm8962_mute(struct snd_soc_dai *dai, int mute)
2944 WM8962_DAC_MUTE, val); 2944 WM8962_DAC_MUTE, val);
2945} 2945}
2946 2946
2947#define WM8962_RATES SNDRV_PCM_RATE_8000_96000 2947#define WM8962_RATES (SNDRV_PCM_RATE_8000_48000 |\
2948 SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
2948 2949
2949#define WM8962_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ 2950#define WM8962_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
2950 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) 2951 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index add6bb99661d..7d45d98a861f 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -663,7 +663,7 @@ static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream,
663 u8 rx_ser = 0; 663 u8 rx_ser = 0;
664 u8 slots = mcasp->tdm_slots; 664 u8 slots = mcasp->tdm_slots;
665 u8 max_active_serializers = (channels + slots - 1) / slots; 665 u8 max_active_serializers = (channels + slots - 1) / slots;
666 int active_serializers, numevt, n; 666 int active_serializers, numevt;
667 u32 reg; 667 u32 reg;
668 /* Default configuration */ 668 /* Default configuration */
669 if (mcasp->version < MCASP_VERSION_3) 669 if (mcasp->version < MCASP_VERSION_3)
@@ -745,9 +745,8 @@ static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream,
745 * The number of words for numevt need to be in steps of active 745 * The number of words for numevt need to be in steps of active
746 * serializers. 746 * serializers.
747 */ 747 */
748 n = numevt % active_serializers; 748 numevt = (numevt / active_serializers) * active_serializers;
749 if (n) 749
750 numevt += (active_serializers - n);
751 while (period_words % numevt && numevt > 0) 750 while (period_words % numevt && numevt > 0)
752 numevt -= active_serializers; 751 numevt -= active_serializers;
753 if (numevt <= 0) 752 if (numevt <= 0)
@@ -1299,6 +1298,7 @@ static struct snd_soc_dai_driver davinci_mcasp_dai[] = {
1299 .ops = &davinci_mcasp_dai_ops, 1298 .ops = &davinci_mcasp_dai_ops,
1300 1299
1301 .symmetric_samplebits = 1, 1300 .symmetric_samplebits = 1,
1301 .symmetric_rates = 1,
1302 }, 1302 },
1303 { 1303 {
1304 .name = "davinci-mcasp.1", 1304 .name = "davinci-mcasp.1",
@@ -1685,7 +1685,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
1685 1685
1686 irq = platform_get_irq_byname(pdev, "common"); 1686 irq = platform_get_irq_byname(pdev, "common");
1687 if (irq >= 0) { 1687 if (irq >= 0) {
1688 irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_common\n", 1688 irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_common",
1689 dev_name(&pdev->dev)); 1689 dev_name(&pdev->dev));
1690 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, 1690 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
1691 davinci_mcasp_common_irq_handler, 1691 davinci_mcasp_common_irq_handler,
@@ -1702,7 +1702,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
1702 1702
1703 irq = platform_get_irq_byname(pdev, "rx"); 1703 irq = platform_get_irq_byname(pdev, "rx");
1704 if (irq >= 0) { 1704 if (irq >= 0) {
1705 irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_rx\n", 1705 irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_rx",
1706 dev_name(&pdev->dev)); 1706 dev_name(&pdev->dev));
1707 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, 1707 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
1708 davinci_mcasp_rx_irq_handler, 1708 davinci_mcasp_rx_irq_handler,
@@ -1717,7 +1717,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
1717 1717
1718 irq = platform_get_irq_byname(pdev, "tx"); 1718 irq = platform_get_irq_byname(pdev, "tx");
1719 if (irq >= 0) { 1719 if (irq >= 0) {
1720 irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_tx\n", 1720 irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_tx",
1721 dev_name(&pdev->dev)); 1721 dev_name(&pdev->dev));
1722 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, 1722 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
1723 davinci_mcasp_tx_irq_handler, 1723 davinci_mcasp_tx_irq_handler,
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 5aeb6ed4827e..96f55ae75c71 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -488,7 +488,8 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
488 priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM; 488 priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
489 } else { 489 } else {
490 dev_err(&pdev->dev, "unknown Device Tree compatible\n"); 490 dev_err(&pdev->dev, "unknown Device Tree compatible\n");
491 return -EINVAL; 491 ret = -EINVAL;
492 goto asrc_fail;
492 } 493 }
493 494
494 /* Common settings for corresponding Freescale CPU DAI driver */ 495 /* Common settings for corresponding Freescale CPU DAI driver */
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 8ec6fb208ea0..37c5cd4d0e59 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -249,7 +249,8 @@ MODULE_DEVICE_TABLE(of, fsl_ssi_ids);
249 249
250static bool fsl_ssi_is_ac97(struct fsl_ssi_private *ssi_private) 250static bool fsl_ssi_is_ac97(struct fsl_ssi_private *ssi_private)
251{ 251{
252 return !!(ssi_private->dai_fmt & SND_SOC_DAIFMT_AC97); 252 return (ssi_private->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
253 SND_SOC_DAIFMT_AC97;
253} 254}
254 255
255static bool fsl_ssi_is_i2s_master(struct fsl_ssi_private *ssi_private) 256static bool fsl_ssi_is_i2s_master(struct fsl_ssi_private *ssi_private)
@@ -947,7 +948,7 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
947 CCSR_SSI_SCR_TCH_EN); 948 CCSR_SSI_SCR_TCH_EN);
948 } 949 }
949 950
950 if (fmt & SND_SOC_DAIFMT_AC97) 951 if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_AC97)
951 fsl_ssi_setup_ac97(ssi_private); 952 fsl_ssi_setup_ac97(ssi_private);
952 953
953 return 0; 954 return 0;
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index f6efa9d4acad..b27f25f70730 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -302,6 +302,10 @@ struct sst_hsw {
302 struct sst_hsw_ipc_dx_reply dx; 302 struct sst_hsw_ipc_dx_reply dx;
303 void *dx_context; 303 void *dx_context;
304 dma_addr_t dx_context_paddr; 304 dma_addr_t dx_context_paddr;
305 enum sst_hsw_device_id dx_dev;
306 enum sst_hsw_device_mclk dx_mclk;
307 enum sst_hsw_device_mode dx_mode;
308 u32 dx_clock_divider;
305 309
306 /* boot */ 310 /* boot */
307 wait_queue_head_t boot_wait; 311 wait_queue_head_t boot_wait;
@@ -1400,10 +1404,10 @@ int sst_hsw_device_set_config(struct sst_hsw *hsw,
1400 1404
1401 trace_ipc_request("set device config", dev); 1405 trace_ipc_request("set device config", dev);
1402 1406
1403 config.ssp_interface = dev; 1407 hsw->dx_dev = config.ssp_interface = dev;
1404 config.clock_frequency = mclk; 1408 hsw->dx_mclk = config.clock_frequency = mclk;
1405 config.mode = mode; 1409 hsw->dx_mode = config.mode = mode;
1406 config.clock_divider = clock_divider; 1410 hsw->dx_clock_divider = config.clock_divider = clock_divider;
1407 if (mode == SST_HSW_DEVICE_TDM_CLOCK_MASTER) 1411 if (mode == SST_HSW_DEVICE_TDM_CLOCK_MASTER)
1408 config.channels = 4; 1412 config.channels = 4;
1409 else 1413 else
@@ -1704,10 +1708,10 @@ int sst_hsw_dsp_runtime_resume(struct sst_hsw *hsw)
1704 return -EIO; 1708 return -EIO;
1705 } 1709 }
1706 1710
1707 /* Set ADSP SSP port settings */ 1711 /* Set ADSP SSP port settings - sadly the FW does not store SSP port
1708 ret = sst_hsw_device_set_config(hsw, SST_HSW_DEVICE_SSP_0, 1712 settings as part of the PM context. */
1709 SST_HSW_DEVICE_MCLK_FREQ_24_MHZ, 1713 ret = sst_hsw_device_set_config(hsw, hsw->dx_dev, hsw->dx_mclk,
1710 SST_HSW_DEVICE_CLOCK_MASTER, 9); 1714 hsw->dx_mode, hsw->dx_clock_divider);
1711 if (ret < 0) 1715 if (ret < 0)
1712 dev_err(dev, "error: SSP re-initialization failed\n"); 1716 dev_err(dev, "error: SSP re-initialization failed\n");
1713 1717
diff --git a/sound/soc/mediatek/mtk-afe-pcm.c b/sound/soc/mediatek/mtk-afe-pcm.c
index d190fe017559..f5baf3c38863 100644
--- a/sound/soc/mediatek/mtk-afe-pcm.c
+++ b/sound/soc/mediatek/mtk-afe-pcm.c
@@ -549,6 +549,23 @@ static int mtk_afe_dais_startup(struct snd_pcm_substream *substream,
549 memif->substream = substream; 549 memif->substream = substream;
550 550
551 snd_soc_set_runtime_hwparams(substream, &mtk_afe_hardware); 551 snd_soc_set_runtime_hwparams(substream, &mtk_afe_hardware);
552
553 /*
554 * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be
555 * smaller than period_size due to AFE's internal buffer.
556 * This easily leads to overrun when avail_min is period_size.
557 * One more period can hold the possible unread buffer.
558 */
559 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
560 ret = snd_pcm_hw_constraint_minmax(runtime,
561 SNDRV_PCM_HW_PARAM_PERIODS,
562 3,
563 mtk_afe_hardware.periods_max);
564 if (ret < 0) {
565 dev_err(afe->dev, "hw_constraint_minmax failed\n");
566 return ret;
567 }
568 }
552 ret = snd_pcm_hw_constraint_integer(runtime, 569 ret = snd_pcm_hw_constraint_integer(runtime,
553 SNDRV_PCM_HW_PARAM_PERIODS); 570 SNDRV_PCM_HW_PARAM_PERIODS);
554 if (ret < 0) 571 if (ret < 0)
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index 39cea80846c3..f2bf8661dd21 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -1,7 +1,6 @@
1config SND_PXA2XX_SOC 1config SND_PXA2XX_SOC
2 tristate "SoC Audio for the Intel PXA2xx chip" 2 tristate "SoC Audio for the Intel PXA2xx chip"
3 depends on ARCH_PXA 3 depends on ARCH_PXA
4 select SND_ARM
5 select SND_PXA2XX_LIB 4 select SND_PXA2XX_LIB
6 help 5 help
7 Say Y or M if you want to add support for codecs attached to 6 Say Y or M if you want to add support for codecs attached to
@@ -25,7 +24,6 @@ config SND_PXA2XX_AC97
25config SND_PXA2XX_SOC_AC97 24config SND_PXA2XX_SOC_AC97
26 tristate 25 tristate
27 select AC97_BUS 26 select AC97_BUS
28 select SND_ARM
29 select SND_PXA2XX_LIB_AC97 27 select SND_PXA2XX_LIB_AC97
30 select SND_SOC_AC97_BUS 28 select SND_SOC_AC97_BUS
31 29
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index 1f6054650991..9e4b04e0fbd1 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -49,7 +49,7 @@ static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
49 .reset = pxa2xx_ac97_cold_reset, 49 .reset = pxa2xx_ac97_cold_reset,
50}; 50};
51 51
52static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 12; 52static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 11;
53static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = { 53static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
54 .addr = __PREG(PCDR), 54 .addr = __PREG(PCDR),
55 .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 55 .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
@@ -57,7 +57,7 @@ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
57 .filter_data = &pxa2xx_ac97_pcm_stereo_in_req, 57 .filter_data = &pxa2xx_ac97_pcm_stereo_in_req,
58}; 58};
59 59
60static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 11; 60static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 12;
61static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = { 61static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = {
62 .addr = __PREG(PCDR), 62 .addr = __PREG(PCDR),
63 .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 63 .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index f4bf21a5539b..ff8bda471b25 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -3501,7 +3501,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
3501 3501
3502 default: 3502 default:
3503 WARN(1, "Unknown event %d\n", event); 3503 WARN(1, "Unknown event %d\n", event);
3504 return -EINVAL; 3504 ret = -EINVAL;
3505 } 3505 }
3506 3506
3507out: 3507out:
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 362c69ac1d6c..53dd085d3ee2 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -101,6 +101,15 @@ static struct snd_soc_codec_driver dummy_codec;
101 SNDRV_PCM_FMTBIT_S32_LE | \ 101 SNDRV_PCM_FMTBIT_S32_LE | \
102 SNDRV_PCM_FMTBIT_U32_LE | \ 102 SNDRV_PCM_FMTBIT_U32_LE | \
103 SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE) 103 SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
104/*
105 * The dummy CODEC is only meant to be used in situations where there is no
106 * actual hardware.
107 *
108 * If there is actual hardware even if it does not have a control bus
109 * the hardware will still have constraints like supported samplerates, etc.
110 * which should be modelled. And the data flow graph also should be modelled
111 * using DAPM.
112 */
104static struct snd_soc_dai_driver dummy_dai = { 113static struct snd_soc_dai_driver dummy_dai = {
105 .name = "snd-soc-dummy-dai", 114 .name = "snd-soc-dummy-dai",
106 .playback = { 115 .playback = {
diff --git a/sound/soc/spear/Kconfig b/sound/soc/spear/Kconfig
index 0a53053495f3..4fb91412ebec 100644
--- a/sound/soc/spear/Kconfig
+++ b/sound/soc/spear/Kconfig
@@ -1,6 +1,6 @@
1config SND_SPEAR_SOC 1config SND_SPEAR_SOC
2 tristate 2 tristate
3 select SND_DMAENGINE_PCM 3 select SND_SOC_GENERIC_DMAENGINE_PCM
4 4
5config SND_SPEAR_SPDIF_OUT 5config SND_SPEAR_SPDIF_OUT
6 tristate 6 tristate
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
index f6eefe1b8f8f..843f037a317d 100644
--- a/sound/soc/sti/uniperif_player.c
+++ b/sound/soc/sti/uniperif_player.c
@@ -989,8 +989,8 @@ static int uni_player_parse_dt(struct platform_device *pdev,
989 if (!info) 989 if (!info)
990 return -ENOMEM; 990 return -ENOMEM;
991 991
992 of_property_read_u32(pnode, "version", &player->ver); 992 if (of_property_read_u32(pnode, "version", &player->ver) ||
993 if (player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) { 993 player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
994 dev_err(dev, "Unknown uniperipheral version "); 994 dev_err(dev, "Unknown uniperipheral version ");
995 return -EINVAL; 995 return -EINVAL;
996 } 996 }
@@ -998,10 +998,16 @@ static int uni_player_parse_dt(struct platform_device *pdev,
998 if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) 998 if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
999 info->underflow_enabled = 1; 999 info->underflow_enabled = 1;
1000 1000
1001 of_property_read_u32(pnode, "uniperiph-id", &info->id); 1001 if (of_property_read_u32(pnode, "uniperiph-id", &info->id)) {
1002 dev_err(dev, "uniperipheral id not defined");
1003 return -EINVAL;
1004 }
1002 1005
1003 /* Read the device mode property */ 1006 /* Read the device mode property */
1004 of_property_read_string(pnode, "mode", &mode); 1007 if (of_property_read_string(pnode, "mode", &mode)) {
1008 dev_err(dev, "uniperipheral mode not defined");
1009 return -EINVAL;
1010 }
1005 1011
1006 if (strcasecmp(mode, "hdmi") == 0) 1012 if (strcasecmp(mode, "hdmi") == 0)
1007 info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_HDMI; 1013 info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_HDMI;
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
index c502626f339b..f791239a3087 100644
--- a/sound/soc/sti/uniperif_reader.c
+++ b/sound/soc/sti/uniperif_reader.c
@@ -316,7 +316,11 @@ static int uni_reader_parse_dt(struct platform_device *pdev,
316 if (!info) 316 if (!info)
317 return -ENOMEM; 317 return -ENOMEM;
318 318
319 of_property_read_u32(node, "version", &reader->ver); 319 if (of_property_read_u32(node, "version", &reader->ver) ||
320 reader->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
321 dev_err(&pdev->dev, "Unknown uniperipheral version ");
322 return -EINVAL;
323 }
320 324
321 /* Save the info structure */ 325 /* Save the info structure */
322 reader->info = info; 326 reader->info = info;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index eb51325e8ad9..284a76e04628 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -768,8 +768,8 @@ static int process_exit_event(struct perf_tool *tool,
768 if (!evsel->attr.sample_id_all) { 768 if (!evsel->attr.sample_id_all) {
769 sample->cpu = 0; 769 sample->cpu = 0;
770 sample->time = 0; 770 sample->time = 0;
771 sample->tid = event->comm.tid; 771 sample->tid = event->fork.tid;
772 sample->pid = event->comm.pid; 772 sample->pid = event->fork.pid;
773 } 773 }
774 print_sample_start(sample, thread, evsel); 774 print_sample_start(sample, thread, evsel);
775 perf_event__fprintf(event, stdout); 775 perf_event__fprintf(event, stdout);
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c
index 1aa21c90731b..5b83f56a3b6f 100644
--- a/tools/perf/tests/sw-clock.c
+++ b/tools/perf/tests/sw-clock.c
@@ -34,6 +34,8 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
34 .disabled = 1, 34 .disabled = 1,
35 .freq = 1, 35 .freq = 1,
36 }; 36 };
37 struct cpu_map *cpus;
38 struct thread_map *threads;
37 39
38 attr.sample_freq = 500; 40 attr.sample_freq = 500;
39 41
@@ -50,14 +52,19 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
50 } 52 }
51 perf_evlist__add(evlist, evsel); 53 perf_evlist__add(evlist, evsel);
52 54
53 evlist->cpus = cpu_map__dummy_new(); 55 cpus = cpu_map__dummy_new();
54 evlist->threads = thread_map__new_by_tid(getpid()); 56 threads = thread_map__new_by_tid(getpid());
55 if (!evlist->cpus || !evlist->threads) { 57 if (!cpus || !threads) {
56 err = -ENOMEM; 58 err = -ENOMEM;
57 pr_debug("Not enough memory to create thread/cpu maps\n"); 59 pr_debug("Not enough memory to create thread/cpu maps\n");
58 goto out_delete_evlist; 60 goto out_free_maps;
59 } 61 }
60 62
63 perf_evlist__set_maps(evlist, cpus, threads);
64
65 cpus = NULL;
66 threads = NULL;
67
61 if (perf_evlist__open(evlist)) { 68 if (perf_evlist__open(evlist)) {
62 const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate"; 69 const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate";
63 70
@@ -107,6 +114,9 @@ next_event:
107 err = -1; 114 err = -1;
108 } 115 }
109 116
117out_free_maps:
118 cpu_map__put(cpus);
119 thread_map__put(threads);
110out_delete_evlist: 120out_delete_evlist:
111 perf_evlist__delete(evlist); 121 perf_evlist__delete(evlist);
112 return err; 122 return err;
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index 3a8fedef83bc..add16385f13e 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -43,6 +43,8 @@ int test__task_exit(void)
43 }; 43 };
44 const char *argv[] = { "true", NULL }; 44 const char *argv[] = { "true", NULL };
45 char sbuf[STRERR_BUFSIZE]; 45 char sbuf[STRERR_BUFSIZE];
46 struct cpu_map *cpus;
47 struct thread_map *threads;
46 48
47 signal(SIGCHLD, sig_handler); 49 signal(SIGCHLD, sig_handler);
48 50
@@ -58,14 +60,19 @@ int test__task_exit(void)
58 * perf_evlist__prepare_workload we'll fill in the only thread 60 * perf_evlist__prepare_workload we'll fill in the only thread
59 * we're monitoring, the one forked there. 61 * we're monitoring, the one forked there.
60 */ 62 */
61 evlist->cpus = cpu_map__dummy_new(); 63 cpus = cpu_map__dummy_new();
62 evlist->threads = thread_map__new_by_tid(-1); 64 threads = thread_map__new_by_tid(-1);
63 if (!evlist->cpus || !evlist->threads) { 65 if (!cpus || !threads) {
64 err = -ENOMEM; 66 err = -ENOMEM;
65 pr_debug("Not enough memory to create thread/cpu maps\n"); 67 pr_debug("Not enough memory to create thread/cpu maps\n");
66 goto out_delete_evlist; 68 goto out_free_maps;
67 } 69 }
68 70
71 perf_evlist__set_maps(evlist, cpus, threads);
72
73 cpus = NULL;
74 threads = NULL;
75
69 err = perf_evlist__prepare_workload(evlist, &target, argv, false, 76 err = perf_evlist__prepare_workload(evlist, &target, argv, false,
70 workload_exec_failed_signal); 77 workload_exec_failed_signal);
71 if (err < 0) { 78 if (err < 0) {
@@ -114,6 +121,9 @@ retry:
114 err = -1; 121 err = -1;
115 } 122 }
116 123
124out_free_maps:
125 cpu_map__put(cpus);
126 thread_map__put(threads);
117out_delete_evlist: 127out_delete_evlist:
118 perf_evlist__delete(evlist); 128 perf_evlist__delete(evlist);
119 return err; 129 return err;
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index cf86f2d3a5e7..c04c60d4863c 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1968,7 +1968,8 @@ skip_annotation:
1968 &options[nr_options], dso); 1968 &options[nr_options], dso);
1969 nr_options += add_map_opt(browser, &actions[nr_options], 1969 nr_options += add_map_opt(browser, &actions[nr_options],
1970 &options[nr_options], 1970 &options[nr_options],
1971 browser->selection->map); 1971 browser->selection ?
1972 browser->selection->map : NULL);
1972 1973
1973 /* perf script support */ 1974 /* perf script support */
1974 if (browser->he_selection) { 1975 if (browser->he_selection) {
@@ -1976,6 +1977,15 @@ skip_annotation:
1976 &actions[nr_options], 1977 &actions[nr_options],
1977 &options[nr_options], 1978 &options[nr_options],
1978 thread, NULL); 1979 thread, NULL);
1980 /*
1981 * Note that browser->selection != NULL
1982 * when browser->he_selection is not NULL,
1983 * so we don't need to check browser->selection
1984 * before fetching browser->selection->sym like what
1985 * we do before fetching browser->selection->map.
1986 *
1987 * See hist_browser__show_entry.
1988 */
1979 nr_options += add_script_opt(browser, 1989 nr_options += add_script_opt(browser,
1980 &actions[nr_options], 1990 &actions[nr_options],
1981 &options[nr_options], 1991 &options[nr_options],
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index d51a5200c8af..c8fc8a258f42 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -124,6 +124,33 @@ void perf_evlist__delete(struct perf_evlist *evlist)
124 free(evlist); 124 free(evlist);
125} 125}
126 126
127static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
128 struct perf_evsel *evsel)
129{
130 /*
131 * We already have cpus for evsel (via PMU sysfs) so
132 * keep it, if there's no target cpu list defined.
133 */
134 if (!evsel->own_cpus || evlist->has_user_cpus) {
135 cpu_map__put(evsel->cpus);
136 evsel->cpus = cpu_map__get(evlist->cpus);
137 } else if (evsel->cpus != evsel->own_cpus) {
138 cpu_map__put(evsel->cpus);
139 evsel->cpus = cpu_map__get(evsel->own_cpus);
140 }
141
142 thread_map__put(evsel->threads);
143 evsel->threads = thread_map__get(evlist->threads);
144}
145
146static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
147{
148 struct perf_evsel *evsel;
149
150 evlist__for_each(evlist, evsel)
151 __perf_evlist__propagate_maps(evlist, evsel);
152}
153
127void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) 154void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
128{ 155{
129 entry->evlist = evlist; 156 entry->evlist = evlist;
@@ -133,18 +160,19 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
133 160
134 if (!evlist->nr_entries++) 161 if (!evlist->nr_entries++)
135 perf_evlist__set_id_pos(evlist); 162 perf_evlist__set_id_pos(evlist);
163
164 __perf_evlist__propagate_maps(evlist, entry);
136} 165}
137 166
138void perf_evlist__splice_list_tail(struct perf_evlist *evlist, 167void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
139 struct list_head *list, 168 struct list_head *list)
140 int nr_entries)
141{ 169{
142 bool set_id_pos = !evlist->nr_entries; 170 struct perf_evsel *evsel, *temp;
143 171
144 list_splice_tail(list, &evlist->entries); 172 __evlist__for_each_safe(list, temp, evsel) {
145 evlist->nr_entries += nr_entries; 173 list_del_init(&evsel->node);
146 if (set_id_pos) 174 perf_evlist__add(evlist, evsel);
147 perf_evlist__set_id_pos(evlist); 175 }
148} 176}
149 177
150void __perf_evlist__set_leader(struct list_head *list) 178void __perf_evlist__set_leader(struct list_head *list)
@@ -210,7 +238,7 @@ static int perf_evlist__add_attrs(struct perf_evlist *evlist,
210 list_add_tail(&evsel->node, &head); 238 list_add_tail(&evsel->node, &head);
211 } 239 }
212 240
213 perf_evlist__splice_list_tail(evlist, &head, nr_attrs); 241 perf_evlist__splice_list_tail(evlist, &head);
214 242
215 return 0; 243 return 0;
216 244
@@ -1103,71 +1131,56 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
1103 return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false); 1131 return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
1104} 1132}
1105 1133
1106static int perf_evlist__propagate_maps(struct perf_evlist *evlist,
1107 bool has_user_cpus)
1108{
1109 struct perf_evsel *evsel;
1110
1111 evlist__for_each(evlist, evsel) {
1112 /*
1113 * We already have cpus for evsel (via PMU sysfs) so
1114 * keep it, if there's no target cpu list defined.
1115 */
1116 if (evsel->cpus && has_user_cpus)
1117 cpu_map__put(evsel->cpus);
1118
1119 if (!evsel->cpus || has_user_cpus)
1120 evsel->cpus = cpu_map__get(evlist->cpus);
1121
1122 evsel->threads = thread_map__get(evlist->threads);
1123
1124 if ((evlist->cpus && !evsel->cpus) ||
1125 (evlist->threads && !evsel->threads))
1126 return -ENOMEM;
1127 }
1128
1129 return 0;
1130}
1131
1132int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) 1134int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
1133{ 1135{
1134 evlist->threads = thread_map__new_str(target->pid, target->tid, 1136 struct cpu_map *cpus;
1135 target->uid); 1137 struct thread_map *threads;
1138
1139 threads = thread_map__new_str(target->pid, target->tid, target->uid);
1136 1140
1137 if (evlist->threads == NULL) 1141 if (!threads)
1138 return -1; 1142 return -1;
1139 1143
1140 if (target__uses_dummy_map(target)) 1144 if (target__uses_dummy_map(target))
1141 evlist->cpus = cpu_map__dummy_new(); 1145 cpus = cpu_map__dummy_new();
1142 else 1146 else
1143 evlist->cpus = cpu_map__new(target->cpu_list); 1147 cpus = cpu_map__new(target->cpu_list);
1144 1148
1145 if (evlist->cpus == NULL) 1149 if (!cpus)
1146 goto out_delete_threads; 1150 goto out_delete_threads;
1147 1151
1148 return perf_evlist__propagate_maps(evlist, !!target->cpu_list); 1152 evlist->has_user_cpus = !!target->cpu_list;
1153
1154 perf_evlist__set_maps(evlist, cpus, threads);
1155
1156 return 0;
1149 1157
1150out_delete_threads: 1158out_delete_threads:
1151 thread_map__put(evlist->threads); 1159 thread_map__put(threads);
1152 evlist->threads = NULL;
1153 return -1; 1160 return -1;
1154} 1161}
1155 1162
1156int perf_evlist__set_maps(struct perf_evlist *evlist, 1163void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1157 struct cpu_map *cpus, 1164 struct thread_map *threads)
1158 struct thread_map *threads)
1159{ 1165{
1160 if (evlist->cpus) 1166 /*
1167 * Allow for the possibility that one or another of the maps isn't being
1168 * changed i.e. don't put it. Note we are assuming the maps that are
1169 * being applied are brand new and evlist is taking ownership of the
1170 * original reference count of 1. If that is not the case it is up to
1171 * the caller to increase the reference count.
1172 */
1173 if (cpus != evlist->cpus) {
1161 cpu_map__put(evlist->cpus); 1174 cpu_map__put(evlist->cpus);
1175 evlist->cpus = cpus;
1176 }
1162 1177
1163 evlist->cpus = cpus; 1178 if (threads != evlist->threads) {
1164
1165 if (evlist->threads)
1166 thread_map__put(evlist->threads); 1179 thread_map__put(evlist->threads);
1180 evlist->threads = threads;
1181 }
1167 1182
1168 evlist->threads = threads; 1183 perf_evlist__propagate_maps(evlist);
1169
1170 return perf_evlist__propagate_maps(evlist, false);
1171} 1184}
1172 1185
1173int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel) 1186int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
@@ -1387,6 +1400,8 @@ void perf_evlist__close(struct perf_evlist *evlist)
1387 1400
1388static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist) 1401static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1389{ 1402{
1403 struct cpu_map *cpus;
1404 struct thread_map *threads;
1390 int err = -ENOMEM; 1405 int err = -ENOMEM;
1391 1406
1392 /* 1407 /*
@@ -1398,20 +1413,19 @@ static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1398 * error, and we may not want to do that fallback to a 1413 * error, and we may not want to do that fallback to a
1399 * default cpu identity map :-\ 1414 * default cpu identity map :-\
1400 */ 1415 */
1401 evlist->cpus = cpu_map__new(NULL); 1416 cpus = cpu_map__new(NULL);
1402 if (evlist->cpus == NULL) 1417 if (!cpus)
1403 goto out; 1418 goto out;
1404 1419
1405 evlist->threads = thread_map__new_dummy(); 1420 threads = thread_map__new_dummy();
1406 if (evlist->threads == NULL) 1421 if (!threads)
1407 goto out_free_cpus; 1422 goto out_put;
1408 1423
1409 err = 0; 1424 perf_evlist__set_maps(evlist, cpus, threads);
1410out: 1425out:
1411 return err; 1426 return err;
1412out_free_cpus: 1427out_put:
1413 cpu_map__put(evlist->cpus); 1428 cpu_map__put(cpus);
1414 evlist->cpus = NULL;
1415 goto out; 1429 goto out;
1416} 1430}
1417 1431
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index b39a6198f4ac..115d8b53c601 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -42,6 +42,7 @@ struct perf_evlist {
42 int nr_mmaps; 42 int nr_mmaps;
43 bool overwrite; 43 bool overwrite;
44 bool enabled; 44 bool enabled;
45 bool has_user_cpus;
45 size_t mmap_len; 46 size_t mmap_len;
46 int id_pos; 47 int id_pos;
47 int is_pos; 48 int is_pos;
@@ -155,9 +156,8 @@ int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
155void perf_evlist__set_selected(struct perf_evlist *evlist, 156void perf_evlist__set_selected(struct perf_evlist *evlist,
156 struct perf_evsel *evsel); 157 struct perf_evsel *evsel);
157 158
158int perf_evlist__set_maps(struct perf_evlist *evlist, 159void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
159 struct cpu_map *cpus, 160 struct thread_map *threads);
160 struct thread_map *threads);
161int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target); 161int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
162int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel); 162int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel);
163 163
@@ -179,8 +179,7 @@ bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
179bool perf_evlist__valid_read_format(struct perf_evlist *evlist); 179bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
180 180
181void perf_evlist__splice_list_tail(struct perf_evlist *evlist, 181void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
182 struct list_head *list, 182 struct list_head *list);
183 int nr_entries);
184 183
185static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist) 184static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
186{ 185{
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index c53f79123b37..5410483d5219 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1033,6 +1033,7 @@ void perf_evsel__exit(struct perf_evsel *evsel)
1033 perf_evsel__free_config_terms(evsel); 1033 perf_evsel__free_config_terms(evsel);
1034 close_cgroup(evsel->cgrp); 1034 close_cgroup(evsel->cgrp);
1035 cpu_map__put(evsel->cpus); 1035 cpu_map__put(evsel->cpus);
1036 cpu_map__put(evsel->own_cpus);
1036 thread_map__put(evsel->threads); 1037 thread_map__put(evsel->threads);
1037 zfree(&evsel->group_name); 1038 zfree(&evsel->group_name);
1038 zfree(&evsel->name); 1039 zfree(&evsel->name);
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 298e6bbca200..ef8925f7211a 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -98,6 +98,7 @@ struct perf_evsel {
98 struct cgroup_sel *cgrp; 98 struct cgroup_sel *cgrp;
99 void *handler; 99 void *handler;
100 struct cpu_map *cpus; 100 struct cpu_map *cpus;
101 struct cpu_map *own_cpus;
101 struct thread_map *threads; 102 struct thread_map *threads;
102 unsigned int sample_size; 103 unsigned int sample_size;
103 int id_pos; 104 int id_pos;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 41814547da15..fce6634aebe2 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1438,7 +1438,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1438 if (ph->needs_swap) 1438 if (ph->needs_swap)
1439 nr = bswap_32(nr); 1439 nr = bswap_32(nr);
1440 1440
1441 ph->env.nr_cpus_online = nr; 1441 ph->env.nr_cpus_avail = nr;
1442 1442
1443 ret = readn(fd, &nr, sizeof(nr)); 1443 ret = readn(fd, &nr, sizeof(nr));
1444 if (ret != sizeof(nr)) 1444 if (ret != sizeof(nr))
@@ -1447,7 +1447,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1447 if (ph->needs_swap) 1447 if (ph->needs_swap)
1448 nr = bswap_32(nr); 1448 nr = bswap_32(nr);
1449 1449
1450 ph->env.nr_cpus_avail = nr; 1450 ph->env.nr_cpus_online = nr;
1451 return 0; 1451 return 0;
1452} 1452}
1453 1453
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index ea768625ab5b..eb0e7f8bf515 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -623,7 +623,7 @@ static int intel_bts_process_event(struct perf_session *session,
623 if (err) 623 if (err)
624 return err; 624 return err;
625 if (event->header.type == PERF_RECORD_EXIT) { 625 if (event->header.type == PERF_RECORD_EXIT) {
626 err = intel_bts_process_tid_exit(bts, event->comm.tid); 626 err = intel_bts_process_tid_exit(bts, event->fork.tid);
627 if (err) 627 if (err)
628 return err; 628 return err;
629 } 629 }
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index bb41c20e6005..535d86f8e4d1 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -1494,7 +1494,7 @@ static int intel_pt_process_event(struct perf_session *session,
1494 if (pt->timeless_decoding) { 1494 if (pt->timeless_decoding) {
1495 if (event->header.type == PERF_RECORD_EXIT) { 1495 if (event->header.type == PERF_RECORD_EXIT) {
1496 err = intel_pt_process_timeless_queues(pt, 1496 err = intel_pt_process_timeless_queues(pt,
1497 event->comm.tid, 1497 event->fork.tid,
1498 sample->time); 1498 sample->time);
1499 } 1499 }
1500 } else if (timestamp) { 1500 } else if (timestamp) {
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index d826e6f515db..21ed6ee63da9 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -287,8 +287,8 @@ __add_event(struct list_head *list, int *idx,
287 if (!evsel) 287 if (!evsel)
288 return NULL; 288 return NULL;
289 289
290 if (cpus) 290 evsel->cpus = cpu_map__get(cpus);
291 evsel->cpus = cpu_map__get(cpus); 291 evsel->own_cpus = cpu_map__get(cpus);
292 292
293 if (name) 293 if (name)
294 evsel->name = strdup(name); 294 evsel->name = strdup(name);
@@ -1140,10 +1140,9 @@ int parse_events(struct perf_evlist *evlist, const char *str,
1140 ret = parse_events__scanner(str, &data, PE_START_EVENTS); 1140 ret = parse_events__scanner(str, &data, PE_START_EVENTS);
1141 perf_pmu__parse_cleanup(); 1141 perf_pmu__parse_cleanup();
1142 if (!ret) { 1142 if (!ret) {
1143 int entries = data.idx - evlist->nr_entries;
1144 struct perf_evsel *last; 1143 struct perf_evsel *last;
1145 1144
1146 perf_evlist__splice_list_tail(evlist, &data.list, entries); 1145 perf_evlist__splice_list_tail(evlist, &data.list);
1147 evlist->nr_groups += data.nr_groups; 1146 evlist->nr_groups += data.nr_groups;
1148 last = perf_evlist__last(evlist); 1147 last = perf_evlist__last(evlist);
1149 last->cmdline_group_boundary = true; 1148 last->cmdline_group_boundary = true;
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 591905a02b92..9cd70819c795 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -255,7 +255,7 @@ PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc
255 list_add_tail(&term->list, head); 255 list_add_tail(&term->list, head);
256 256
257 ALLOC_LIST(list); 257 ALLOC_LIST(list);
258 ABORT_ON(parse_events_add_pmu(list, &data->idx, "cpu", head)); 258 ABORT_ON(parse_events_add_pmu(data, list, "cpu", head));
259 parse_events__free_terms(head); 259 parse_events__free_terms(head);
260 $$ = list; 260 $$ = list;
261} 261}
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 050151144596..cfe121353eec 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -6,6 +6,7 @@ TARGETS += firmware
6TARGETS += ftrace 6TARGETS += ftrace
7TARGETS += futex 7TARGETS += futex
8TARGETS += kcmp 8TARGETS += kcmp
9TARGETS += membarrier
9TARGETS += memfd 10TARGETS += memfd
10TARGETS += memory-hotplug 11TARGETS += memory-hotplug
11TARGETS += mount 12TARGETS += mount
@@ -15,12 +16,12 @@ TARGETS += powerpc
15TARGETS += ptrace 16TARGETS += ptrace
16TARGETS += seccomp 17TARGETS += seccomp
17TARGETS += size 18TARGETS += size
19TARGETS += static_keys
18TARGETS += sysctl 20TARGETS += sysctl
19ifneq (1, $(quicktest)) 21ifneq (1, $(quicktest))
20TARGETS += timers 22TARGETS += timers
21endif 23endif
22TARGETS += user 24TARGETS += user
23TARGETS += jumplabel
24TARGETS += vm 25TARGETS += vm
25TARGETS += x86 26TARGETS += x86
26TARGETS += zram 27TARGETS += zram
diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
index 6b76bfdc847e..4e400eb83657 100644
--- a/tools/testing/selftests/exec/Makefile
+++ b/tools/testing/selftests/exec/Makefile
@@ -1,6 +1,6 @@
1CFLAGS = -Wall 1CFLAGS = -Wall
2BINARIES = execveat 2BINARIES = execveat
3DEPS = execveat.symlink execveat.denatured script 3DEPS = execveat.symlink execveat.denatured script subdir
4all: $(BINARIES) $(DEPS) 4all: $(BINARIES) $(DEPS)
5 5
6subdir: 6subdir:
@@ -22,7 +22,5 @@ TEST_FILES := $(DEPS)
22 22
23include ../lib.mk 23include ../lib.mk
24 24
25override EMIT_TESTS := echo "mkdir -p subdir; (./execveat && echo \"selftests: execveat [PASS]\") || echo \"selftests: execveat [FAIL]\""
26
27clean: 25clean:
28 rm -rf $(BINARIES) $(DEPS) subdir.moved execveat.moved xxxxx* 26 rm -rf $(BINARIES) $(DEPS) subdir.moved execveat.moved xxxxx*
diff --git a/tools/testing/selftests/ftrace/Makefile b/tools/testing/selftests/ftrace/Makefile
index 0acbeca47225..4e6ed13e7f66 100644
--- a/tools/testing/selftests/ftrace/Makefile
+++ b/tools/testing/selftests/ftrace/Makefile
@@ -1,7 +1,7 @@
1all: 1all:
2 2
3TEST_PROGS := ftracetest 3TEST_PROGS := ftracetest
4TEST_DIRS := test.d/ 4TEST_DIRS := test.d
5 5
6include ../lib.mk 6include ../lib.mk
7 7
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 97f1c6742066..50a93f5f13d6 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -12,13 +12,10 @@ run_tests: all
12 $(RUN_TESTS) 12 $(RUN_TESTS)
13 13
14define INSTALL_RULE 14define INSTALL_RULE
15 @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \ 15 @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \
16 mkdir -p $(INSTALL_PATH); \ 16 mkdir -p ${INSTALL_PATH}; \
17 for TEST_DIR in $(TEST_DIRS); do \ 17 echo "rsync -a $(TEST_DIRS) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/"; \
18 cp -r $$TEST_DIR $(INSTALL_PATH); \ 18 rsync -a $(TEST_DIRS) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/; \
19 done; \
20 echo "install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)"; \
21 install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES); \
22 fi 19 fi
23endef 20endef
24 21
diff --git a/tools/testing/selftests/membarrier/.gitignore b/tools/testing/selftests/membarrier/.gitignore
new file mode 100644
index 000000000000..020c44f49a9e
--- /dev/null
+++ b/tools/testing/selftests/membarrier/.gitignore
@@ -0,0 +1 @@
membarrier_test
diff --git a/tools/testing/selftests/membarrier/Makefile b/tools/testing/selftests/membarrier/Makefile
new file mode 100644
index 000000000000..a1a97085847d
--- /dev/null
+++ b/tools/testing/selftests/membarrier/Makefile
@@ -0,0 +1,10 @@
1CFLAGS += -g -I../../../../usr/include/
2
3TEST_PROGS := membarrier_test
4
5all: $(TEST_PROGS)
6
7include ../lib.mk
8
9clean:
10 $(RM) $(TEST_PROGS)
diff --git a/tools/testing/selftests/membarrier/membarrier_test.c b/tools/testing/selftests/membarrier/membarrier_test.c
new file mode 100644
index 000000000000..535f0fef4d0b
--- /dev/null
+++ b/tools/testing/selftests/membarrier/membarrier_test.c
@@ -0,0 +1,118 @@
1#define _GNU_SOURCE
2#include <linux/membarrier.h>
3#include <syscall.h>
4#include <stdio.h>
5#include <errno.h>
6#include <string.h>
7
8#include "../kselftest.h"
9
10enum test_membarrier_status {
11 TEST_MEMBARRIER_PASS = 0,
12 TEST_MEMBARRIER_FAIL,
13 TEST_MEMBARRIER_SKIP,
14};
15
16static int sys_membarrier(int cmd, int flags)
17{
18 return syscall(__NR_membarrier, cmd, flags);
19}
20
21static enum test_membarrier_status test_membarrier_cmd_fail(void)
22{
23 int cmd = -1, flags = 0;
24
25 if (sys_membarrier(cmd, flags) != -1) {
26 printf("membarrier: Wrong command should fail but passed.\n");
27 return TEST_MEMBARRIER_FAIL;
28 }
29 return TEST_MEMBARRIER_PASS;
30}
31
32static enum test_membarrier_status test_membarrier_flags_fail(void)
33{
34 int cmd = MEMBARRIER_CMD_QUERY, flags = 1;
35
36 if (sys_membarrier(cmd, flags) != -1) {
37 printf("membarrier: Wrong flags should fail but passed.\n");
38 return TEST_MEMBARRIER_FAIL;
39 }
40 return TEST_MEMBARRIER_PASS;
41}
42
43static enum test_membarrier_status test_membarrier_success(void)
44{
45 int cmd = MEMBARRIER_CMD_SHARED, flags = 0;
46
47 if (sys_membarrier(cmd, flags) != 0) {
48 printf("membarrier: Executing MEMBARRIER_CMD_SHARED failed. %s.\n",
49 strerror(errno));
50 return TEST_MEMBARRIER_FAIL;
51 }
52
53 printf("membarrier: MEMBARRIER_CMD_SHARED success.\n");
54 return TEST_MEMBARRIER_PASS;
55}
56
57static enum test_membarrier_status test_membarrier(void)
58{
59 enum test_membarrier_status status;
60
61 status = test_membarrier_cmd_fail();
62 if (status)
63 return status;
64 status = test_membarrier_flags_fail();
65 if (status)
66 return status;
67 status = test_membarrier_success();
68 if (status)
69 return status;
70 return TEST_MEMBARRIER_PASS;
71}
72
73static enum test_membarrier_status test_membarrier_query(void)
74{
75 int flags = 0, ret;
76
77 printf("membarrier MEMBARRIER_CMD_QUERY ");
78 ret = sys_membarrier(MEMBARRIER_CMD_QUERY, flags);
79 if (ret < 0) {
80 printf("failed. %s.\n", strerror(errno));
81 switch (errno) {
82 case ENOSYS:
83 /*
84 * It is valid to build a kernel with
85 * CONFIG_MEMBARRIER=n. However, this skips the tests.
86 */
87 return TEST_MEMBARRIER_SKIP;
88 case EINVAL:
89 default:
90 return TEST_MEMBARRIER_FAIL;
91 }
92 }
93 if (!(ret & MEMBARRIER_CMD_SHARED)) {
94 printf("command MEMBARRIER_CMD_SHARED is not supported.\n");
95 return TEST_MEMBARRIER_FAIL;
96 }
97 printf("syscall available.\n");
98 return TEST_MEMBARRIER_PASS;
99}
100
101int main(int argc, char **argv)
102{
103 switch (test_membarrier_query()) {
104 case TEST_MEMBARRIER_FAIL:
105 return ksft_exit_fail();
106 case TEST_MEMBARRIER_SKIP:
107 return ksft_exit_skip();
108 }
109 switch (test_membarrier()) {
110 case TEST_MEMBARRIER_FAIL:
111 return ksft_exit_fail();
112 case TEST_MEMBARRIER_SKIP:
113 return ksft_exit_skip();
114 }
115
116 printf("membarrier: tests done!\n");
117 return ksft_exit_pass();
118}
diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile
index 0e3b41eb85cd..eebac29acbd9 100644
--- a/tools/testing/selftests/mqueue/Makefile
+++ b/tools/testing/selftests/mqueue/Makefile
@@ -1,8 +1,8 @@
1CFLAGS = -O2 1CFLAGS += -O2
2LDLIBS = -lrt -lpthread -lpopt
3TEST_PROGS := mq_open_tests mq_perf_tests
2 4
3all: 5all: $(TEST_PROGS)
4 $(CC) $(CFLAGS) mq_open_tests.c -o mq_open_tests -lrt
5 $(CC) $(CFLAGS) -o mq_perf_tests mq_perf_tests.c -lrt -lpthread -lpopt
6 6
7include ../lib.mk 7include ../lib.mk
8 8
@@ -11,8 +11,6 @@ override define RUN_TESTS
11 @./mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]" 11 @./mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]"
12endef 12endef
13 13
14TEST_PROGS := mq_open_tests mq_perf_tests
15
16override define EMIT_TESTS 14override define EMIT_TESTS
17 echo "./mq_open_tests /test1 || echo \"selftests: mq_open_tests [FAIL]\"" 15 echo "./mq_open_tests /test1 || echo \"selftests: mq_open_tests [FAIL]\""
18 echo "./mq_perf_tests || echo \"selftests: mq_perf_tests [FAIL]\"" 16 echo "./mq_perf_tests || echo \"selftests: mq_perf_tests [FAIL]\""
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index a004b4cce99e..770f47adf295 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1210,6 +1210,10 @@ TEST_F(TRACE_poke, getpid_runs_normally)
1210# define ARCH_REGS struct pt_regs 1210# define ARCH_REGS struct pt_regs
1211# define SYSCALL_NUM gpr[0] 1211# define SYSCALL_NUM gpr[0]
1212# define SYSCALL_RET gpr[3] 1212# define SYSCALL_RET gpr[3]
1213#elif defined(__s390__)
1214# define ARCH_REGS s390_regs
1215# define SYSCALL_NUM gprs[2]
1216# define SYSCALL_RET gprs[2]
1213#else 1217#else
1214# error "Do not know how to find your architecture's registers and syscalls" 1218# error "Do not know how to find your architecture's registers and syscalls"
1215#endif 1219#endif
@@ -1243,7 +1247,8 @@ void change_syscall(struct __test_metadata *_metadata,
1243 ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov); 1247 ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov);
1244 EXPECT_EQ(0, ret); 1248 EXPECT_EQ(0, ret);
1245 1249
1246#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || defined(__powerpc__) 1250#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || \
1251 defined(__powerpc__) || defined(__s390__)
1247 { 1252 {
1248 regs.SYSCALL_NUM = syscall; 1253 regs.SYSCALL_NUM = syscall;
1249 } 1254 }
@@ -1281,17 +1286,21 @@ void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee,
1281 ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); 1286 ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
1282 EXPECT_EQ(0, ret); 1287 EXPECT_EQ(0, ret);
1283 1288
1289 /* Validate and take action on expected syscalls. */
1284 switch (msg) { 1290 switch (msg) {
1285 case 0x1002: 1291 case 0x1002:
1286 /* change getpid to getppid. */ 1292 /* change getpid to getppid. */
1293 EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
1287 change_syscall(_metadata, tracee, __NR_getppid); 1294 change_syscall(_metadata, tracee, __NR_getppid);
1288 break; 1295 break;
1289 case 0x1003: 1296 case 0x1003:
1290 /* skip gettid. */ 1297 /* skip gettid. */
1298 EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
1291 change_syscall(_metadata, tracee, -1); 1299 change_syscall(_metadata, tracee, -1);
1292 break; 1300 break;
1293 case 0x1004: 1301 case 0x1004:
1294 /* do nothing (allow getppid) */ 1302 /* do nothing (allow getppid) */
1303 EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
1295 break; 1304 break;
1296 default: 1305 default:
1297 EXPECT_EQ(0, msg) { 1306 EXPECT_EQ(0, msg) {
@@ -1409,6 +1418,8 @@ TEST_F(TRACE_syscall, syscall_dropped)
1409# define __NR_seccomp 277 1418# define __NR_seccomp 277
1410# elif defined(__powerpc__) 1419# elif defined(__powerpc__)
1411# define __NR_seccomp 358 1420# define __NR_seccomp 358
1421# elif defined(__s390__)
1422# define __NR_seccomp 348
1412# else 1423# else
1413# warning "seccomp syscall number unknown for this architecture" 1424# warning "seccomp syscall number unknown for this architecture"
1414# define __NR_seccomp 0xffff 1425# define __NR_seccomp 0xffff
@@ -1453,6 +1464,9 @@ TEST(seccomp_syscall)
1453 1464
1454 /* Reject insane operation. */ 1465 /* Reject insane operation. */
1455 ret = seccomp(-1, 0, &prog); 1466 ret = seccomp(-1, 0, &prog);
1467 ASSERT_NE(ENOSYS, errno) {
1468 TH_LOG("Kernel does not support seccomp syscall!");
1469 }
1456 EXPECT_EQ(EINVAL, errno) { 1470 EXPECT_EQ(EINVAL, errno) {
1457 TH_LOG("Did not reject crazy op value!"); 1471 TH_LOG("Did not reject crazy op value!");
1458 } 1472 }
@@ -1501,6 +1515,9 @@ TEST(seccomp_syscall_mode_lock)
1501 } 1515 }
1502 1516
1503 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); 1517 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
1518 ASSERT_NE(ENOSYS, errno) {
1519 TH_LOG("Kernel does not support seccomp syscall!");
1520 }
1504 EXPECT_EQ(0, ret) { 1521 EXPECT_EQ(0, ret) {
1505 TH_LOG("Could not install filter!"); 1522 TH_LOG("Could not install filter!");
1506 } 1523 }
@@ -1535,6 +1552,9 @@ TEST(TSYNC_first)
1535 1552
1536 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1553 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
1537 &prog); 1554 &prog);
1555 ASSERT_NE(ENOSYS, errno) {
1556 TH_LOG("Kernel does not support seccomp syscall!");
1557 }
1538 EXPECT_EQ(0, ret) { 1558 EXPECT_EQ(0, ret) {
1539 TH_LOG("Could not install initial filter with TSYNC!"); 1559 TH_LOG("Could not install initial filter with TSYNC!");
1540 } 1560 }
@@ -1694,6 +1714,9 @@ TEST_F(TSYNC, siblings_fail_prctl)
1694 1714
1695 /* Check prctl failure detection by requesting sib 0 diverge. */ 1715 /* Check prctl failure detection by requesting sib 0 diverge. */
1696 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); 1716 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
1717 ASSERT_NE(ENOSYS, errno) {
1718 TH_LOG("Kernel does not support seccomp syscall!");
1719 }
1697 ASSERT_EQ(0, ret) { 1720 ASSERT_EQ(0, ret) {
1698 TH_LOG("setting filter failed"); 1721 TH_LOG("setting filter failed");
1699 } 1722 }
@@ -1731,6 +1754,9 @@ TEST_F(TSYNC, two_siblings_with_ancestor)
1731 } 1754 }
1732 1755
1733 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); 1756 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
1757 ASSERT_NE(ENOSYS, errno) {
1758 TH_LOG("Kernel does not support seccomp syscall!");
1759 }
1734 ASSERT_EQ(0, ret) { 1760 ASSERT_EQ(0, ret) {
1735 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 1761 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
1736 } 1762 }
@@ -1805,6 +1831,9 @@ TEST_F(TSYNC, two_siblings_with_no_filter)
1805 1831
1806 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1832 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
1807 &self->apply_prog); 1833 &self->apply_prog);
1834 ASSERT_NE(ENOSYS, errno) {
1835 TH_LOG("Kernel does not support seccomp syscall!");
1836 }
1808 ASSERT_EQ(0, ret) { 1837 ASSERT_EQ(0, ret) {
1809 TH_LOG("Could install filter on all threads!"); 1838 TH_LOG("Could install filter on all threads!");
1810 } 1839 }
@@ -1833,6 +1862,9 @@ TEST_F(TSYNC, two_siblings_with_one_divergence)
1833 } 1862 }
1834 1863
1835 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); 1864 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
1865 ASSERT_NE(ENOSYS, errno) {
1866 TH_LOG("Kernel does not support seccomp syscall!");
1867 }
1836 ASSERT_EQ(0, ret) { 1868 ASSERT_EQ(0, ret) {
1837 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 1869 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
1838 } 1870 }
@@ -1890,6 +1922,9 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
1890 } 1922 }
1891 1923
1892 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); 1924 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
1925 ASSERT_NE(ENOSYS, errno) {
1926 TH_LOG("Kernel does not support seccomp syscall!");
1927 }
1893 ASSERT_EQ(0, ret) { 1928 ASSERT_EQ(0, ret) {
1894 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 1929 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
1895 } 1930 }
diff --git a/tools/testing/selftests/seccomp/test_harness.h b/tools/testing/selftests/seccomp/test_harness.h
index 977a6afc4489..fb2841601f2f 100644
--- a/tools/testing/selftests/seccomp/test_harness.h
+++ b/tools/testing/selftests/seccomp/test_harness.h
@@ -370,11 +370,8 @@
370 __typeof__(_expected) __exp = (_expected); \ 370 __typeof__(_expected) __exp = (_expected); \
371 __typeof__(_seen) __seen = (_seen); \ 371 __typeof__(_seen) __seen = (_seen); \
372 if (!(__exp _t __seen)) { \ 372 if (!(__exp _t __seen)) { \
373 unsigned long long __exp_print = 0; \ 373 unsigned long long __exp_print = (unsigned long long)__exp; \
374 unsigned long long __seen_print = 0; \ 374 unsigned long long __seen_print = (unsigned long long)__seen; \
375 /* Avoid casting complaints the scariest way we can. */ \
376 memcpy(&__exp_print, &__exp, sizeof(__exp)); \
377 memcpy(&__seen_print, &__seen, sizeof(__seen)); \
378 __TH_LOG("Expected %s (%llu) %s %s (%llu)", \ 375 __TH_LOG("Expected %s (%llu) %s %s (%llu)", \
379 #_expected, __exp_print, #_t, \ 376 #_expected, __exp_print, #_t, \
380 #_seen, __seen_print); \ 377 #_seen, __seen_print); \
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index d36fab7d8ebd..3c53cac15de1 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -1,6 +1,6 @@
1# Makefile for vm selftests 1# Makefile for vm selftests
2 2
3CFLAGS = -Wall 3CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS)
4BINARIES = compaction_test 4BINARIES = compaction_test
5BINARIES += hugepage-mmap 5BINARIES += hugepage-mmap
6BINARIES += hugepage-shm 6BINARIES += hugepage-shm
@@ -12,8 +12,11 @@ BINARIES += userfaultfd
12all: $(BINARIES) 12all: $(BINARIES)
13%: %.c 13%: %.c
14 $(CC) $(CFLAGS) -o $@ $^ -lrt 14 $(CC) $(CFLAGS) -o $@ $^ -lrt
15userfaultfd: userfaultfd.c 15userfaultfd: userfaultfd.c ../../../../usr/include/linux/kernel.h
16 $(CC) $(CFLAGS) -O2 -o $@ $^ -lpthread 16 $(CC) $(CFLAGS) -O2 -o $@ $< -lpthread
17
18../../../../usr/include/linux/kernel.h:
19 make -C ../../../.. headers_install
17 20
18TEST_PROGS := run_vmtests 21TEST_PROGS := run_vmtests
19TEST_FILES := $(BINARIES) 22TEST_FILES := $(BINARIES)
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index 2c7cca6f26a4..d77ed41b2094 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -64,17 +64,9 @@
64#include <sys/syscall.h> 64#include <sys/syscall.h>
65#include <sys/ioctl.h> 65#include <sys/ioctl.h>
66#include <pthread.h> 66#include <pthread.h>
67#include "../../../../include/uapi/linux/userfaultfd.h" 67#include <linux/userfaultfd.h>
68 68
69#ifdef __x86_64__ 69#ifdef __NR_userfaultfd
70#define __NR_userfaultfd 323
71#elif defined(__i386__)
72#define __NR_userfaultfd 374
73#elif defined(__powewrpc__)
74#define __NR_userfaultfd 364
75#else
76#error "missing __NR_userfaultfd definition"
77#endif
78 70
79static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; 71static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
80 72
@@ -430,7 +422,7 @@ static int userfaultfd_stress(void)
430 struct uffdio_register uffdio_register; 422 struct uffdio_register uffdio_register;
431 struct uffdio_api uffdio_api; 423 struct uffdio_api uffdio_api;
432 unsigned long cpu; 424 unsigned long cpu;
433 int uffd_flags; 425 int uffd_flags, err;
434 unsigned long userfaults[nr_cpus]; 426 unsigned long userfaults[nr_cpus];
435 427
436 if (posix_memalign(&area, page_size, nr_pages * page_size)) { 428 if (posix_memalign(&area, page_size, nr_pages * page_size)) {
@@ -473,6 +465,14 @@ static int userfaultfd_stress(void)
473 *area_mutex(area_src, nr) = (pthread_mutex_t) 465 *area_mutex(area_src, nr) = (pthread_mutex_t)
474 PTHREAD_MUTEX_INITIALIZER; 466 PTHREAD_MUTEX_INITIALIZER;
475 count_verify[nr] = *area_count(area_src, nr) = 1; 467 count_verify[nr] = *area_count(area_src, nr) = 1;
468 /*
469 * In the transition between 255 to 256, powerpc will
470 * read out of order in my_bcmp and see both bytes as
471 * zero, so leave a placeholder below always non-zero
472 * after the count, to avoid my_bcmp to trigger false
473 * positives.
474 */
475 *(area_count(area_src, nr) + 1) = 1;
476 } 476 }
477 477
478 pipefd = malloc(sizeof(int) * nr_cpus * 2); 478 pipefd = malloc(sizeof(int) * nr_cpus * 2);
@@ -499,6 +499,7 @@ static int userfaultfd_stress(void)
499 pthread_attr_init(&attr); 499 pthread_attr_init(&attr);
500 pthread_attr_setstacksize(&attr, 16*1024*1024); 500 pthread_attr_setstacksize(&attr, 16*1024*1024);
501 501
502 err = 0;
502 while (bounces--) { 503 while (bounces--) {
503 unsigned long expected_ioctls; 504 unsigned long expected_ioctls;
504 505
@@ -579,20 +580,13 @@ static int userfaultfd_stress(void)
579 /* verification */ 580 /* verification */
580 if (bounces & BOUNCE_VERIFY) { 581 if (bounces & BOUNCE_VERIFY) {
581 for (nr = 0; nr < nr_pages; nr++) { 582 for (nr = 0; nr < nr_pages; nr++) {
582 if (my_bcmp(area_dst,
583 area_dst + nr * page_size,
584 sizeof(pthread_mutex_t))) {
585 fprintf(stderr,
586 "error mutex 2 %lu\n",
587 nr);
588 bounces = 0;
589 }
590 if (*area_count(area_dst, nr) != count_verify[nr]) { 583 if (*area_count(area_dst, nr) != count_verify[nr]) {
591 fprintf(stderr, 584 fprintf(stderr,
592 "error area_count %Lu %Lu %lu\n", 585 "error area_count %Lu %Lu %lu\n",
593 *area_count(area_src, nr), 586 *area_count(area_src, nr),
594 count_verify[nr], 587 count_verify[nr],
595 nr); 588 nr);
589 err = 1;
596 bounces = 0; 590 bounces = 0;
597 } 591 }
598 } 592 }
@@ -609,7 +603,7 @@ static int userfaultfd_stress(void)
609 printf("\n"); 603 printf("\n");
610 } 604 }
611 605
612 return 0; 606 return err;
613} 607}
614 608
615int main(int argc, char **argv) 609int main(int argc, char **argv)
@@ -618,8 +612,8 @@ int main(int argc, char **argv)
618 fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1); 612 fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1);
619 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); 613 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
620 page_size = sysconf(_SC_PAGE_SIZE); 614 page_size = sysconf(_SC_PAGE_SIZE);
621 if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) > 615 if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) * 2
622 page_size) 616 > page_size)
623 fprintf(stderr, "Impossible to run this test\n"), exit(2); 617 fprintf(stderr, "Impossible to run this test\n"), exit(2);
624 nr_pages_per_cpu = atol(argv[1]) * 1024*1024 / page_size / 618 nr_pages_per_cpu = atol(argv[1]) * 1024*1024 / page_size /
625 nr_cpus; 619 nr_cpus;
@@ -637,3 +631,15 @@ int main(int argc, char **argv)
637 nr_pages, nr_pages_per_cpu); 631 nr_pages, nr_pages_per_cpu);
638 return userfaultfd_stress(); 632 return userfaultfd_stress();
639} 633}
634
635#else /* __NR_userfaultfd */
636
637#warning "missing __NR_userfaultfd definition"
638
639int main(void)
640{
641 printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n");
642 return 0;
643}
644
645#endif /* __NR_userfaultfd */
diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c
index 9a43a59a9bb4..421c607a8856 100644
--- a/tools/testing/selftests/x86/entry_from_vm86.c
+++ b/tools/testing/selftests/x86/entry_from_vm86.c
@@ -116,8 +116,9 @@ static bool do_test(struct vm86plus_struct *v86, unsigned long eip,
116 v86->regs.eip = eip; 116 v86->regs.eip = eip;
117 ret = vm86(VM86_ENTER, v86); 117 ret = vm86(VM86_ENTER, v86);
118 118
119 if (ret == -1 && errno == ENOSYS) { 119 if (ret == -1 && (errno == ENOSYS || errno == EPERM)) {
120 printf("[SKIP]\tvm86 not supported\n"); 120 printf("[SKIP]\tvm86 %s\n",
121 errno == ENOSYS ? "not supported" : "not allowed");
121 return false; 122 return false;
122 } 123 }
123 124
diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh
index 20de9a761269..683a292e3290 100755
--- a/tools/testing/selftests/zram/zram.sh
+++ b/tools/testing/selftests/zram/zram.sh
@@ -1,15 +1,7 @@
1#!/bin/bash 1#!/bin/bash
2TCID="zram.sh" 2TCID="zram.sh"
3 3
4check_prereqs() 4. ./zram_lib.sh
5{
6 local msg="skip all tests:"
7
8 if [ $UID != 0 ]; then
9 echo $msg must be run as root >&2
10 exit 0
11 fi
12}
13 5
14run_zram () { 6run_zram () {
15echo "--------------------" 7echo "--------------------"
diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh
index 424e68ed1487..f6a9c73e7a44 100755
--- a/tools/testing/selftests/zram/zram_lib.sh
+++ b/tools/testing/selftests/zram/zram_lib.sh
@@ -23,8 +23,9 @@ trap INT
23check_prereqs() 23check_prereqs()
24{ 24{
25 local msg="skip all tests:" 25 local msg="skip all tests:"
26 local uid=$(id -u)
26 27
27 if [ $UID != 0 ]; then 28 if [ $uid -ne 0 ]; then
28 echo $msg must be run as root >&2 29 echo $msg must be run as root >&2
29 exit 0 30 exit 0
30 fi 31 fi
diff --git a/tools/virtio/Makefile b/tools/virtio/Makefile
index 505ad51b3b51..39c89a5ea990 100644
--- a/tools/virtio/Makefile
+++ b/tools/virtio/Makefile
@@ -6,7 +6,7 @@ vringh_test: vringh_test.o vringh.o virtio_ring.o
6CFLAGS += -g -O2 -Werror -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE 6CFLAGS += -g -O2 -Werror -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE
7vpath %.c ../../drivers/virtio ../../drivers/vhost 7vpath %.c ../../drivers/virtio ../../drivers/vhost
8mod: 8mod:
9 ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test 9 ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V}
10.PHONY: all test mod clean 10.PHONY: all test mod clean
11clean: 11clean:
12 ${RM} *.o vringh_test virtio_test vhost_test/*.o vhost_test/.*.cmd \ 12 ${RM} *.o vringh_test virtio_test vhost_test/*.o vhost_test/.*.cmd \
diff --git a/tools/virtio/asm/barrier.h b/tools/virtio/asm/barrier.h
index aff61e13306c..26b7926bda88 100644
--- a/tools/virtio/asm/barrier.h
+++ b/tools/virtio/asm/barrier.h
@@ -3,6 +3,8 @@
3#define mb() __sync_synchronize() 3#define mb() __sync_synchronize()
4 4
5#define smp_mb() mb() 5#define smp_mb() mb()
6# define dma_rmb() barrier()
7# define dma_wmb() barrier()
6# define smp_rmb() barrier() 8# define smp_rmb() barrier()
7# define smp_wmb() barrier() 9# define smp_wmb() barrier()
8/* Weak barriers should be used. If not - it's a bug */ 10/* Weak barriers should be used. If not - it's a bug */
diff --git a/tools/virtio/linux/export.h b/tools/virtio/linux/export.h
new file mode 100644
index 000000000000..416875e29254
--- /dev/null
+++ b/tools/virtio/linux/export.h
@@ -0,0 +1,3 @@
1#define EXPORT_SYMBOL_GPL(sym) extern typeof(sym) sym
2#define EXPORT_SYMBOL(sym) extern typeof(sym) sym
3
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index 1e8ce6979c1e..0a3da64638ce 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -22,6 +22,7 @@
22 22
23typedef unsigned long long dma_addr_t; 23typedef unsigned long long dma_addr_t;
24typedef size_t __kernel_size_t; 24typedef size_t __kernel_size_t;
25typedef unsigned int __wsum;
25 26
26struct page { 27struct page {
27 unsigned long long dummy; 28 unsigned long long dummy;
@@ -47,6 +48,13 @@ static inline void *kmalloc(size_t s, gfp_t gfp)
47 return __kmalloc_fake; 48 return __kmalloc_fake;
48 return malloc(s); 49 return malloc(s);
49} 50}
51static inline void *kzalloc(size_t s, gfp_t gfp)
52{
53 void *p = kmalloc(s, gfp);
54
55 memset(p, 0, s);
56 return p;
57}
50 58
51static inline void kfree(void *p) 59static inline void kfree(void *p)
52{ 60{
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 76e38d231e99..48c6e1ac6827 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -200,6 +200,14 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
200 timer->irq = irq; 200 timer->irq = irq;
201 201
202 /* 202 /*
203 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
204 * and to 0 for ARMv7. We provide an implementation that always
205 * resets the timer to be disabled and unmasked and is compliant with
206 * the ARMv7 architecture.
207 */
208 timer->cntv_ctl = 0;
209
210 /*
203 * Tell the VGIC that the virtual interrupt is tied to a 211 * Tell the VGIC that the virtual interrupt is tied to a
204 * physical interrupt. We do that once per VCPU. 212 * physical interrupt. We do that once per VCPU.
205 */ 213 */
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index afbf925b00f4..7dd5d62f10a1 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -288,7 +288,7 @@ int vgic_v3_probe(struct device_node *vgic_node,
288 288
289 vgic->vctrl_base = NULL; 289 vgic->vctrl_base = NULL;
290 vgic->type = VGIC_V3; 290 vgic->type = VGIC_V3;
291 vgic->max_gic_vcpus = KVM_MAX_VCPUS; 291 vgic->max_gic_vcpus = VGIC_V3_MAX_CPUS;
292 292
293 kvm_info("%s@%llx IRQ%d\n", vgic_node->name, 293 kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
294 vcpu_res.start, vgic->maint_irq); 294 vcpu_res.start, vgic->maint_irq);
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 9eb489a2c94c..6bd1c9bf7ae7 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1144,26 +1144,11 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
1144 struct irq_phys_map *map; 1144 struct irq_phys_map *map;
1145 map = vgic_irq_map_search(vcpu, irq); 1145 map = vgic_irq_map_search(vcpu, irq);
1146 1146
1147 /*
1148 * If we have a mapping, and the virtual interrupt is
1149 * being injected, then we must set the state to
1150 * active in the physical world. Otherwise the
1151 * physical interrupt will fire and the guest will
1152 * exit before processing the virtual interrupt.
1153 */
1154 if (map) { 1147 if (map) {
1155 int ret;
1156
1157 BUG_ON(!map->active);
1158 vlr.hwirq = map->phys_irq; 1148 vlr.hwirq = map->phys_irq;
1159 vlr.state |= LR_HW; 1149 vlr.state |= LR_HW;
1160 vlr.state &= ~LR_EOI_INT; 1150 vlr.state &= ~LR_EOI_INT;
1161 1151
1162 ret = irq_set_irqchip_state(map->irq,
1163 IRQCHIP_STATE_ACTIVE,
1164 true);
1165 WARN_ON(ret);
1166
1167 /* 1152 /*
1168 * Make sure we're not going to sample this 1153 * Make sure we're not going to sample this
1169 * again, as a HW-backed interrupt cannot be 1154 * again, as a HW-backed interrupt cannot be
@@ -1255,7 +1240,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1255 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1240 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1256 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 1241 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1257 unsigned long *pa_percpu, *pa_shared; 1242 unsigned long *pa_percpu, *pa_shared;
1258 int i, vcpu_id; 1243 int i, vcpu_id, lr, ret;
1259 int overflow = 0; 1244 int overflow = 0;
1260 int nr_shared = vgic_nr_shared_irqs(dist); 1245 int nr_shared = vgic_nr_shared_irqs(dist);
1261 1246
@@ -1310,6 +1295,31 @@ epilog:
1310 */ 1295 */
1311 clear_bit(vcpu_id, dist->irq_pending_on_cpu); 1296 clear_bit(vcpu_id, dist->irq_pending_on_cpu);
1312 } 1297 }
1298
1299 for (lr = 0; lr < vgic->nr_lr; lr++) {
1300 struct vgic_lr vlr;
1301
1302 if (!test_bit(lr, vgic_cpu->lr_used))
1303 continue;
1304
1305 vlr = vgic_get_lr(vcpu, lr);
1306
1307 /*
1308 * If we have a mapping, and the virtual interrupt is
1309 * presented to the guest (as pending or active), then we must
1310 * set the state to active in the physical world. See
1311 * Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt.
1312 */
1313 if (vlr.state & LR_HW) {
1314 struct irq_phys_map *map;
1315 map = vgic_irq_map_search(vcpu, vlr.irq);
1316
1317 ret = irq_set_irqchip_state(map->irq,
1318 IRQCHIP_STATE_ACTIVE,
1319 true);
1320 WARN_ON(ret);
1321 }
1322 }
1313} 1323}
1314 1324
1315static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) 1325static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h
index 5cbf190d238c..6bca74ca5331 100644
--- a/virt/kvm/coalesced_mmio.h
+++ b/virt/kvm/coalesced_mmio.h
@@ -24,9 +24,9 @@ struct kvm_coalesced_mmio_dev {
24int kvm_coalesced_mmio_init(struct kvm *kvm); 24int kvm_coalesced_mmio_init(struct kvm *kvm);
25void kvm_coalesced_mmio_free(struct kvm *kvm); 25void kvm_coalesced_mmio_free(struct kvm *kvm);
26int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, 26int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
27 struct kvm_coalesced_mmio_zone *zone); 27 struct kvm_coalesced_mmio_zone *zone);
28int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, 28int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
29 struct kvm_coalesced_mmio_zone *zone); 29 struct kvm_coalesced_mmio_zone *zone);
30 30
31#else 31#else
32 32
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 9ff4193dfa49..79db45336e3a 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -771,40 +771,14 @@ static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
771 return KVM_MMIO_BUS; 771 return KVM_MMIO_BUS;
772} 772}
773 773
774static int 774static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
775kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) 775 enum kvm_bus bus_idx,
776 struct kvm_ioeventfd *args)
776{ 777{
777 enum kvm_bus bus_idx;
778 struct _ioeventfd *p;
779 struct eventfd_ctx *eventfd;
780 int ret;
781
782 bus_idx = ioeventfd_bus_from_flags(args->flags);
783 /* must be natural-word sized, or 0 to ignore length */
784 switch (args->len) {
785 case 0:
786 case 1:
787 case 2:
788 case 4:
789 case 8:
790 break;
791 default:
792 return -EINVAL;
793 }
794
795 /* check for range overflow */
796 if (args->addr + args->len < args->addr)
797 return -EINVAL;
798 778
799 /* check for extra flags that we don't understand */ 779 struct eventfd_ctx *eventfd;
800 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK) 780 struct _ioeventfd *p;
801 return -EINVAL; 781 int ret;
802
803 /* ioeventfd with no length can't be combined with DATAMATCH */
804 if (!args->len &&
805 args->flags & (KVM_IOEVENTFD_FLAG_PIO |
806 KVM_IOEVENTFD_FLAG_DATAMATCH))
807 return -EINVAL;
808 782
809 eventfd = eventfd_ctx_fdget(args->fd); 783 eventfd = eventfd_ctx_fdget(args->fd);
810 if (IS_ERR(eventfd)) 784 if (IS_ERR(eventfd))
@@ -843,16 +817,6 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
843 if (ret < 0) 817 if (ret < 0)
844 goto unlock_fail; 818 goto unlock_fail;
845 819
846 /* When length is ignored, MMIO is also put on a separate bus, for
847 * faster lookups.
848 */
849 if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) {
850 ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS,
851 p->addr, 0, &p->dev);
852 if (ret < 0)
853 goto register_fail;
854 }
855
856 kvm->buses[bus_idx]->ioeventfd_count++; 820 kvm->buses[bus_idx]->ioeventfd_count++;
857 list_add_tail(&p->list, &kvm->ioeventfds); 821 list_add_tail(&p->list, &kvm->ioeventfds);
858 822
@@ -860,8 +824,6 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
860 824
861 return 0; 825 return 0;
862 826
863register_fail:
864 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
865unlock_fail: 827unlock_fail:
866 mutex_unlock(&kvm->slots_lock); 828 mutex_unlock(&kvm->slots_lock);
867 829
@@ -873,14 +835,13 @@ fail:
873} 835}
874 836
875static int 837static int
876kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) 838kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
839 struct kvm_ioeventfd *args)
877{ 840{
878 enum kvm_bus bus_idx;
879 struct _ioeventfd *p, *tmp; 841 struct _ioeventfd *p, *tmp;
880 struct eventfd_ctx *eventfd; 842 struct eventfd_ctx *eventfd;
881 int ret = -ENOENT; 843 int ret = -ENOENT;
882 844
883 bus_idx = ioeventfd_bus_from_flags(args->flags);
884 eventfd = eventfd_ctx_fdget(args->fd); 845 eventfd = eventfd_ctx_fdget(args->fd);
885 if (IS_ERR(eventfd)) 846 if (IS_ERR(eventfd))
886 return PTR_ERR(eventfd); 847 return PTR_ERR(eventfd);
@@ -901,10 +862,6 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
901 continue; 862 continue;
902 863
903 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); 864 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
904 if (!p->length) {
905 kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS,
906 &p->dev);
907 }
908 kvm->buses[bus_idx]->ioeventfd_count--; 865 kvm->buses[bus_idx]->ioeventfd_count--;
909 ioeventfd_release(p); 866 ioeventfd_release(p);
910 ret = 0; 867 ret = 0;
@@ -918,6 +875,71 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
918 return ret; 875 return ret;
919} 876}
920 877
878static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
879{
880 enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
881 int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
882
883 if (!args->len && bus_idx == KVM_MMIO_BUS)
884 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
885
886 return ret;
887}
888
889static int
890kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
891{
892 enum kvm_bus bus_idx;
893 int ret;
894
895 bus_idx = ioeventfd_bus_from_flags(args->flags);
896 /* must be natural-word sized, or 0 to ignore length */
897 switch (args->len) {
898 case 0:
899 case 1:
900 case 2:
901 case 4:
902 case 8:
903 break;
904 default:
905 return -EINVAL;
906 }
907
908 /* check for range overflow */
909 if (args->addr + args->len < args->addr)
910 return -EINVAL;
911
912 /* check for extra flags that we don't understand */
913 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
914 return -EINVAL;
915
916 /* ioeventfd with no length can't be combined with DATAMATCH */
917 if (!args->len &&
918 args->flags & (KVM_IOEVENTFD_FLAG_PIO |
919 KVM_IOEVENTFD_FLAG_DATAMATCH))
920 return -EINVAL;
921
922 ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
923 if (ret)
924 goto fail;
925
926 /* When length is ignored, MMIO is also put on a separate bus, for
927 * faster lookups.
928 */
929 if (!args->len && bus_idx == KVM_MMIO_BUS) {
930 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
931 if (ret < 0)
932 goto fast_fail;
933 }
934
935 return 0;
936
937fast_fail:
938 kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
939fail:
940 return ret;
941}
942
921int 943int
922kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) 944kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
923{ 945{
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a25a73147f71..8db1d9361993 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -66,8 +66,8 @@
66MODULE_AUTHOR("Qumranet"); 66MODULE_AUTHOR("Qumranet");
67MODULE_LICENSE("GPL"); 67MODULE_LICENSE("GPL");
68 68
69/* halt polling only reduces halt latency by 5-7 us, 500us is enough */ 69/* Architectures should define their poll value according to the halt latency */
70static unsigned int halt_poll_ns = 500000; 70static unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
71module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR); 71module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
72 72
73/* Default doubles per-vcpu halt_poll_ns. */ 73/* Default doubles per-vcpu halt_poll_ns. */
@@ -2004,6 +2004,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
2004 if (vcpu->halt_poll_ns) { 2004 if (vcpu->halt_poll_ns) {
2005 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); 2005 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
2006 2006
2007 ++vcpu->stat.halt_attempted_poll;
2007 do { 2008 do {
2008 /* 2009 /*
2009 * This sets KVM_REQ_UNHALT if an interrupt 2010 * This sets KVM_REQ_UNHALT if an interrupt
@@ -2043,7 +2044,8 @@ out:
2043 else if (vcpu->halt_poll_ns < halt_poll_ns && 2044 else if (vcpu->halt_poll_ns < halt_poll_ns &&
2044 block_ns < halt_poll_ns) 2045 block_ns < halt_poll_ns)
2045 grow_halt_poll_ns(vcpu); 2046 grow_halt_poll_ns(vcpu);
2046 } 2047 } else
2048 vcpu->halt_poll_ns = 0;
2047 2049
2048 trace_kvm_vcpu_wakeup(block_ns, waited); 2050 trace_kvm_vcpu_wakeup(block_ns, waited);
2049} 2051}
@@ -3156,10 +3158,25 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
3156static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 3158static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
3157 const struct kvm_io_range *r2) 3159 const struct kvm_io_range *r2)
3158{ 3160{
3159 if (r1->addr < r2->addr) 3161 gpa_t addr1 = r1->addr;
3162 gpa_t addr2 = r2->addr;
3163
3164 if (addr1 < addr2)
3160 return -1; 3165 return -1;
3161 if (r1->addr + r1->len > r2->addr + r2->len) 3166
3167 /* If r2->len == 0, match the exact address. If r2->len != 0,
3168 * accept any overlapping write. Any order is acceptable for
3169 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
3170 * we process all of them.
3171 */
3172 if (r2->len) {
3173 addr1 += r1->len;
3174 addr2 += r2->len;
3175 }
3176
3177 if (addr1 > addr2)
3162 return 1; 3178 return 1;
3179
3163 return 0; 3180 return 0;
3164} 3181}
3165 3182