aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2017-02-09 10:01:23 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2017-02-09 10:01:23 -0500
commit2e751dfb5ffd20d1a31837dbc9718741df69bffe (patch)
tree8b7e0e47cf0ef4efda0f1090f0ccbbeb17e5a085
parenta0e136d436ded817c0aade72efdefa56a00b4e5e (diff)
parent7b6b46311a8562fb3a9e035ed6ffab6d49c28886 (diff)
Merge tag 'kvmarm-for-4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
kvmarm updates for 4.11 - GICv3 save restore - Cache flushing fixes - MSI injection fix for GICv3 ITS - Physical timer emulation support
-rw-r--r--Documentation/devicetree/bindings/mtd/tango-nand.txt6
-rw-r--r--Documentation/devicetree/bindings/net/ti,dp83867.txt6
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt19
-rw-r--r--Documentation/virtual/kvm/devices/arm-vgic-v3.txt11
-rw-r--r--MAINTAINERS11
-rw-r--r--Makefile4
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/include/asm/cache.h9
-rw-r--r--arch/arc/include/asm/entry-arcv2.h2
-rw-r--r--arch/arc/include/asm/module.h4
-rw-r--r--arch/arc/include/asm/ptrace.h2
-rw-r--r--arch/arc/include/asm/setup.h1
-rw-r--r--arch/arc/kernel/intc-arcv2.c6
-rw-r--r--arch/arc/kernel/intc-compact.c4
-rw-r--r--arch/arc/kernel/mcip.c4
-rw-r--r--arch/arc/kernel/module.c4
-rw-r--r--arch/arc/mm/cache.c155
-rw-r--r--arch/arc/mm/init.c5
-rw-r--r--arch/arm/boot/dts/Makefile1
-rw-r--r--arch/arm/boot/dts/am335x-icev2.dts1
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi2
-rw-r--r--arch/arm/boot/dts/da850-evm.dts1
-rw-r--r--arch/arm/boot/dts/dra7.dtsi1
-rw-r--r--arch/arm/boot/dts/dra72-evm-revc.dts2
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi4
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts11
-rw-r--r--arch/arm/boot/dts/omap5.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom-mdm9615.dtsi2
-rw-r--r--arch/arm/boot/dts/sun6i-a31-hummingbird.dts4
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts2
-rw-r--r--arch/arm/configs/multi_v7_defconfig4
-rw-r--r--arch/arm/configs/s3c2410_defconfig6
-rw-r--r--arch/arm/include/asm/cputype.h3
-rw-r--r--arch/arm/include/asm/ftrace.h18
-rw-r--r--arch/arm/include/asm/kvm_host.h3
-rw-r--r--arch/arm/include/asm/kvm_mmu.h12
-rw-r--r--arch/arm/include/asm/virt.h5
-rw-r--r--arch/arm/include/uapi/asm/kvm.h13
-rw-r--r--arch/arm/include/uapi/asm/types.h (renamed from arch/arm/include/asm/types.h)6
-rw-r--r--arch/arm/kernel/hw_breakpoint.c16
-rw-r--r--arch/arm/kernel/smp_tlb.c7
-rw-r--r--arch/arm/kvm/Makefile5
-rw-r--r--arch/arm/kvm/arm.c7
-rw-r--r--arch/arm/kvm/mmu.c20
-rw-r--r--arch/arm/kvm/reset.c9
-rw-r--r--arch/arm/kvm/vgic-v3-coproc.c35
-rw-r--r--arch/arm/mach-omap1/dma.c16
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c2
-rw-r--r--arch/arm/mach-ux500/pm.c4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi2
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433.dtsi2
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts2
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi6
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h6
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/virt.h9
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h13
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h1
-rw-r--r--arch/arm64/kernel/entry.S2
-rw-r--r--arch/arm64/kernel/ptrace.c16
-rw-r--r--arch/arm64/kernel/traps.c28
-rw-r--r--arch/arm64/kvm/Makefile4
-rw-r--r--arch/arm64/kvm/reset.c9
-rw-r--r--arch/arm64/kvm/sys_regs.c92
-rw-r--r--arch/arm64/kvm/sys_regs.h4
-rw-r--r--arch/arm64/kvm/vgic-sys-reg-v3.c346
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h4
-rw-r--r--arch/powerpc/include/asm/hugetlb.h14
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/page.h3
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h1
-rw-r--r--arch/powerpc/include/asm/pgtable-be-types.h8
-rw-r--r--arch/powerpc/include/asm/pgtable-types.h7
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h10
-rw-r--r--arch/powerpc/kernel/eeh.c10
-rw-r--r--arch/powerpc/kernel/ptrace.c14
-rw-r--r--arch/powerpc/mm/hash_utils_64.c4
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c5
-rw-r--r--arch/powerpc/mm/hugetlbpage.c31
-rw-r--r--arch/powerpc/mm/init-common.c13
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c18
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/perf/power9-events-list.h2
-rw-r--r--arch/powerpc/perf/power9-pmu.c2
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c31
-rw-r--r--arch/s390/configs/default_defconfig27
-rw-r--r--arch/s390/configs/gcov_defconfig50
-rw-r--r--arch/s390/configs/performance_defconfig33
-rw-r--r--arch/s390/defconfig5
-rw-r--r--arch/s390/include/asm/ctl_reg.h4
-rw-r--r--arch/x86/events/amd/ibs.c2
-rw-r--r--arch/x86/events/intel/core.c7
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kvm/x86.c3
-rw-r--r--arch/x86/pci/acpi.c10
-rw-r--r--block/blk-mq.c1
-rw-r--r--drivers/block/nbd.c6
-rw-r--r--drivers/char/virtio_console.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c14
-rw-r--r--drivers/clocksource/exynos_mct.c1
-rw-r--r--drivers/hid/hid-corsair.c60
-rw-r--r--drivers/hid/wacom_sys.c16
-rw-r--r--drivers/hid/wacom_wac.c10
-rw-r--r--drivers/mmc/core/mmc_ops.c25
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c8
-rw-r--r--drivers/mmc/host/mxs-mmc.c6
-rw-r--r--drivers/mmc/host/sdhci-acpi.c3
-rw-r--r--drivers/mtd/nand/Kconfig3
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c2
-rw-r--r--drivers/mtd/nand/tango_nand.c4
-rw-r--r--drivers/mtd/nand/xway_nand.c5
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c25
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c11
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c7
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c6
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c19
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/ieee802154/at86rf230.c4
-rw-r--r--drivers/net/ieee802154/atusb.c59
-rw-r--r--drivers/net/phy/dp83867.c8
-rw-r--r--drivers/net/usb/r8152.c7
-rw-r--r--drivers/net/vxlan.c13
-rw-r--r--drivers/nvdimm/namespace_devs.c23
-rw-r--r--drivers/nvdimm/pmem.c4
-rw-r--r--drivers/pci/host/pci-xgene-msi.c2
-rw-r--r--drivers/pci/host/pcie-designware.c10
-rw-r--r--drivers/pci/probe.c12
-rw-r--r--drivers/s390/virtio/virtio_ccw.c29
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c2
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h12
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c40
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c92
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c37
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c57
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c24
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h1
-rw-r--r--drivers/scsi/sd.c23
-rw-r--r--drivers/scsi/ses.c2
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c1
-rw-r--r--drivers/spi/Kconfig1
-rw-r--r--drivers/spi/spi-armada-3700.c11
-rw-r--r--drivers/spi/spi-axi-spi-engine.c3
-rw-r--r--drivers/spi/spi-davinci.c4
-rw-r--r--drivers/spi/spi-dw-mid.c4
-rw-r--r--drivers/spi/spi-dw.c5
-rw-r--r--drivers/spi/spi-pxa2xx.c1
-rw-r--r--drivers/spi/spi-sh-msiof.c4
-rw-r--r--drivers/thermal/rockchip_thermal.c153
-rw-r--r--drivers/thermal/thermal_core.c10
-rw-r--r--drivers/thermal/thermal_hwmon.c20
-rw-r--r--drivers/usb/dwc2/core.h4
-rw-r--r--drivers/usb/dwc2/gadget.c18
-rw-r--r--drivers/usb/dwc2/hcd.c7
-rw-r--r--drivers/usb/dwc2/params.c10
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c4
-rw-r--r--drivers/usb/gadget/composite.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c12
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c3
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.h1
-rw-r--r--drivers/usb/host/xhci-plat.c2
-rw-r--r--drivers/vhost/scsi.c4
-rw-r--r--drivers/xen/platform-pci.c71
-rw-r--r--fs/ceph/caps.c7
-rw-r--r--fs/ceph/dir.c5
-rw-r--r--fs/ceph/inode.c3
-rw-r--r--fs/ceph/mds_client.c9
-rw-r--r--fs/fuse/dev.c3
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/nfs/nfs4proc.c29
-rw-r--r--fs/nfs/nfs4state.c1
-rw-r--r--fs/nfsd/nfs4xdr.c4
-rw-r--r--fs/overlayfs/namei.c27
-rw-r--r--fs/ubifs/Kconfig2
-rw-r--r--fs/ubifs/dir.c58
-rw-r--r--fs/ubifs/ioctl.c3
-rw-r--r--fs/ubifs/journal.c2
-rw-r--r--fs/ubifs/tnc.c25
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c39
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h8
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c10
-rw-r--r--fs/xfs/xfs_dquot.c4
-rw-r--r--fs/xfs/xfs_iops.c50
-rw-r--r--fs/xfs/xfs_linux.h6
-rw-r--r--include/kvm/arm_arch_timer.h40
-rw-r--r--include/kvm/arm_vgic.h18
-rw-r--r--include/linux/bpf.h2
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/filter.h6
-rw-r--r--include/linux/irqchip/arm-gic-v3.h45
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/kvm_host.h1
-rw-r--r--include/linux/rcupdate.h4
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/tcp.h7
-rw-r--r--include/scsi/libfc.h6
-rw-r--r--include/uapi/linux/nl80211.h4
-rw-r--r--include/uapi/linux/pkt_cls.h2
-rw-r--r--include/uapi/linux/tc_act/tc_bpf.h2
-rw-r--r--kernel/bpf/core.c14
-rw-r--r--kernel/bpf/syscall.c8
-rw-r--r--kernel/bpf/verifier.c2
-rw-r--r--kernel/cpu.c24
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/rcu/rcu.h1
-rw-r--r--kernel/rcu/tiny.c4
-rw-r--r--kernel/rcu/tiny_plugin.h9
-rw-r--r--kernel/rcu/tree.c33
-rw-r--r--kernel/rcu/tree_exp.h52
-rw-r--r--kernel/rcu/tree_plugin.h2
-rw-r--r--kernel/rcu/update.c38
-rw-r--r--lib/swiotlb.c6
-rw-r--r--net/ax25/ax25_subr.c2
-rw-r--r--net/ceph/crypto.c2
-rw-r--r--net/ipv4/fib_semantics.c11
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp_fastopen.c2
-rw-r--r--net/ipv6/ip6_tunnel.c4
-rw-r--r--net/ipv6/mcast.c51
-rw-r--r--net/ipv6/route.c3
-rw-r--r--net/ipv6/seg6_hmac.c2
-rw-r--r--net/ipv6/seg6_iptunnel.c4
-rw-r--r--net/mac80211/chan.c3
-rw-r--r--net/mac80211/iface.c21
-rw-r--r--net/mac80211/main.c13
-rw-r--r--net/mac80211/rate.c2
-rw-r--r--net/mac80211/rx.c38
-rw-r--r--net/mac80211/sta_info.c4
-rw-r--r--net/mac80211/tx.c17
-rw-r--r--net/mac80211/vht.c4
-rw-r--r--net/openvswitch/conntrack.c6
-rw-r--r--net/sched/act_api.c5
-rw-r--r--net/sched/act_bpf.c5
-rw-r--r--net/sched/cls_bpf.c4
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/svc_xprt.c10
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c2
-rw-r--r--net/tipc/discover.c4
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/msg.c16
-rw-r--r--net/tipc/msg.h2
-rw-r--r--net/tipc/name_distr.c2
-rw-r--r--net/wireless/nl80211.c15
-rw-r--r--tools/perf/util/probe-event.c95
-rw-r--r--tools/perf/util/probe-finder.c15
-rw-r--r--tools/perf/util/probe-finder.h3
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c2
-rw-r--r--tools/virtio/ringtest/main.h12
-rwxr-xr-xtools/virtio/ringtest/run-on-all.sh5
-rw-r--r--virt/kvm/arm/arch_timer.c221
-rw-r--r--virt/kvm/arm/hyp/timer-sr.c46
-rw-r--r--virt/kvm/arm/vgic/vgic-debug.c283
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c22
-rw-r--r--virt/kvm/arm/vgic/vgic-irqfd.c3
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c6
-rw-r--r--virt/kvm/arm/vgic/vgic-kvm-device.c231
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c87
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c203
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c167
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.h24
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c14
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c42
-rw-r--r--virt/kvm/arm/vgic/vgic.c66
-rw-r--r--virt/kvm/arm/vgic/vgic.h83
295 files changed, 3818 insertions, 1399 deletions
diff --git a/Documentation/devicetree/bindings/mtd/tango-nand.txt b/Documentation/devicetree/bindings/mtd/tango-nand.txt
index ad5a02f2ac8c..cd1bf2ac9055 100644
--- a/Documentation/devicetree/bindings/mtd/tango-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/tango-nand.txt
@@ -5,7 +5,7 @@ Required properties:
5- compatible: "sigma,smp8758-nand" 5- compatible: "sigma,smp8758-nand"
6- reg: address/size of nfc_reg, nfc_mem, and pbus_reg 6- reg: address/size of nfc_reg, nfc_mem, and pbus_reg
7- dmas: reference to the DMA channel used by the controller 7- dmas: reference to the DMA channel used by the controller
8- dma-names: "nfc_sbox" 8- dma-names: "rxtx"
9- clocks: reference to the system clock 9- clocks: reference to the system clock
10- #address-cells: <1> 10- #address-cells: <1>
11- #size-cells: <0> 11- #size-cells: <0>
@@ -17,9 +17,9 @@ Example:
17 17
18 nandc: nand-controller@2c000 { 18 nandc: nand-controller@2c000 {
19 compatible = "sigma,smp8758-nand"; 19 compatible = "sigma,smp8758-nand";
20 reg = <0x2c000 0x30 0x2d000 0x800 0x20000 0x1000>; 20 reg = <0x2c000 0x30>, <0x2d000 0x800>, <0x20000 0x1000>;
21 dmas = <&dma0 3>; 21 dmas = <&dma0 3>;
22 dma-names = "nfc_sbox"; 22 dma-names = "rxtx";
23 clocks = <&clkgen SYS_CLK>; 23 clocks = <&clkgen SYS_CLK>;
24 #address-cells = <1>; 24 #address-cells = <1>;
25 #size-cells = <0>; 25 #size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.txt b/Documentation/devicetree/bindings/net/ti,dp83867.txt
index 85bf945b898f..afe9630a5e7d 100644
--- a/Documentation/devicetree/bindings/net/ti,dp83867.txt
+++ b/Documentation/devicetree/bindings/net/ti,dp83867.txt
@@ -3,9 +3,11 @@
3Required properties: 3Required properties:
4 - reg - The ID number for the phy, usually a small integer 4 - reg - The ID number for the phy, usually a small integer
5 - ti,rx-internal-delay - RGMII Receive Clock Delay - see dt-bindings/net/ti-dp83867.h 5 - ti,rx-internal-delay - RGMII Receive Clock Delay - see dt-bindings/net/ti-dp83867.h
6 for applicable values 6 for applicable values. Required only if interface type is
7 PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_RXID
7 - ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h 8 - ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h
8 for applicable values 9 for applicable values. Required only if interface type is
10 PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_TXID
9 - ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h 11 - ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h
10 for applicable values 12 for applicable values
11 13
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index da6614c63796..dc975064fa27 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -1,17 +1,23 @@
1Renesas MSIOF spi controller 1Renesas MSIOF spi controller
2 2
3Required properties: 3Required properties:
4- compatible : "renesas,msiof-<soctype>" for SoCs, 4- compatible : "renesas,msiof-r8a7790" (R-Car H2)
5 "renesas,sh-msiof" for SuperH, or
6 "renesas,sh-mobile-msiof" for SH Mobile series.
7 Examples with soctypes are:
8 "renesas,msiof-r8a7790" (R-Car H2)
9 "renesas,msiof-r8a7791" (R-Car M2-W) 5 "renesas,msiof-r8a7791" (R-Car M2-W)
10 "renesas,msiof-r8a7792" (R-Car V2H) 6 "renesas,msiof-r8a7792" (R-Car V2H)
11 "renesas,msiof-r8a7793" (R-Car M2-N) 7 "renesas,msiof-r8a7793" (R-Car M2-N)
12 "renesas,msiof-r8a7794" (R-Car E2) 8 "renesas,msiof-r8a7794" (R-Car E2)
13 "renesas,msiof-r8a7796" (R-Car M3-W) 9 "renesas,msiof-r8a7796" (R-Car M3-W)
14 "renesas,msiof-sh73a0" (SH-Mobile AG5) 10 "renesas,msiof-sh73a0" (SH-Mobile AG5)
11 "renesas,sh-mobile-msiof" (generic SH-Mobile compatibile device)
12 "renesas,rcar-gen2-msiof" (generic R-Car Gen2 compatible device)
13 "renesas,rcar-gen3-msiof" (generic R-Car Gen3 compatible device)
14 "renesas,sh-msiof" (deprecated)
15
16 When compatible with the generic version, nodes
17 must list the SoC-specific version corresponding
18 to the platform first followed by the generic
19 version.
20
15- reg : A list of offsets and lengths of the register sets for 21- reg : A list of offsets and lengths of the register sets for
16 the device. 22 the device.
17 If only one register set is present, it is to be used 23 If only one register set is present, it is to be used
@@ -61,7 +67,8 @@ Documentation/devicetree/bindings/pinctrl/renesas,*.
61Example: 67Example:
62 68
63 msiof0: spi@e6e20000 { 69 msiof0: spi@e6e20000 {
64 compatible = "renesas,msiof-r8a7791"; 70 compatible = "renesas,msiof-r8a7791",
71 "renesas,rcar-gen2-msiof";
65 reg = <0 0xe6e20000 0 0x0064>; 72 reg = <0 0xe6e20000 0 0x0064>;
66 interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>; 73 interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
67 clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>; 74 clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>;
diff --git a/Documentation/virtual/kvm/devices/arm-vgic-v3.txt b/Documentation/virtual/kvm/devices/arm-vgic-v3.txt
index 9348b3caccd7..c1a24612c198 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic-v3.txt
+++ b/Documentation/virtual/kvm/devices/arm-vgic-v3.txt
@@ -118,7 +118,7 @@ Groups:
118 -EBUSY: One or more VCPUs are running 118 -EBUSY: One or more VCPUs are running
119 119
120 120
121 KVM_DEV_ARM_VGIC_CPU_SYSREGS 121 KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS
122 Attributes: 122 Attributes:
123 The attr field of kvm_device_attr encodes two values: 123 The attr field of kvm_device_attr encodes two values:
124 bits: | 63 .... 32 | 31 .... 16 | 15 .... 0 | 124 bits: | 63 .... 32 | 31 .... 16 | 15 .... 0 |
@@ -139,13 +139,15 @@ Groups:
139 All system regs accessed through this API are (rw, 64-bit) and 139 All system regs accessed through this API are (rw, 64-bit) and
140 kvm_device_attr.addr points to a __u64 value. 140 kvm_device_attr.addr points to a __u64 value.
141 141
142 KVM_DEV_ARM_VGIC_CPU_SYSREGS accesses the CPU interface registers for the 142 KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS accesses the CPU interface registers for the
143 CPU specified by the mpidr field. 143 CPU specified by the mpidr field.
144 144
145 CPU interface registers access is not implemented for AArch32 mode.
146 Error -ENXIO is returned when accessed in AArch32 mode.
145 Errors: 147 Errors:
146 -ENXIO: Getting or setting this register is not yet supported 148 -ENXIO: Getting or setting this register is not yet supported
147 -EBUSY: VCPU is running 149 -EBUSY: VCPU is running
148 -EINVAL: Invalid mpidr supplied 150 -EINVAL: Invalid mpidr or register value supplied
149 151
150 152
151 KVM_DEV_ARM_VGIC_GRP_NR_IRQS 153 KVM_DEV_ARM_VGIC_GRP_NR_IRQS
@@ -204,3 +206,6 @@ Groups:
204 architecture defined MPIDR, and the field is encoded as follows: 206 architecture defined MPIDR, and the field is encoded as follows:
205 | 63 .... 56 | 55 .... 48 | 47 .... 40 | 39 .... 32 | 207 | 63 .... 56 | 55 .... 48 | 47 .... 40 | 39 .... 32 |
206 | Aff3 | Aff2 | Aff1 | Aff0 | 208 | Aff3 | Aff2 | Aff1 | Aff0 |
209 Errors:
210 -EINVAL: vINTID is not multiple of 32 or
211 info field is not VGIC_LEVEL_INFO_LINE_LEVEL
diff --git a/MAINTAINERS b/MAINTAINERS
index c36976d3bd1a..26edd832c64e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -976,6 +976,7 @@ M: Russell King <linux@armlinux.org.uk>
976L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 976L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
977W: http://www.armlinux.org.uk/ 977W: http://www.armlinux.org.uk/
978S: Maintained 978S: Maintained
979T: git git://git.armlinux.org.uk/~rmk/linux-arm.git
979F: arch/arm/ 980F: arch/arm/
980 981
981ARM SUB-ARCHITECTURES 982ARM SUB-ARCHITECTURES
@@ -1153,6 +1154,7 @@ ARM/CLKDEV SUPPORT
1153M: Russell King <linux@armlinux.org.uk> 1154M: Russell King <linux@armlinux.org.uk>
1154L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1155L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1155S: Maintained 1156S: Maintained
1157T: git git://git.armlinux.org.uk/~rmk/linux-arm.git clkdev
1156F: arch/arm/include/asm/clkdev.h 1158F: arch/arm/include/asm/clkdev.h
1157F: drivers/clk/clkdev.c 1159F: drivers/clk/clkdev.c
1158 1160
@@ -1688,6 +1690,7 @@ M: Krzysztof Kozlowski <krzk@kernel.org>
1688R: Javier Martinez Canillas <javier@osg.samsung.com> 1690R: Javier Martinez Canillas <javier@osg.samsung.com>
1689L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1691L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1690L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) 1692L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
1693Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
1691S: Maintained 1694S: Maintained
1692F: arch/arm/boot/dts/s3c* 1695F: arch/arm/boot/dts/s3c*
1693F: arch/arm/boot/dts/s5p* 1696F: arch/arm/boot/dts/s5p*
@@ -7697,8 +7700,10 @@ F: drivers/net/dsa/mv88e6xxx/
7697F: Documentation/devicetree/bindings/net/dsa/marvell.txt 7700F: Documentation/devicetree/bindings/net/dsa/marvell.txt
7698 7701
7699MARVELL ARMADA DRM SUPPORT 7702MARVELL ARMADA DRM SUPPORT
7700M: Russell King <rmk+kernel@armlinux.org.uk> 7703M: Russell King <linux@armlinux.org.uk>
7701S: Maintained 7704S: Maintained
7705T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-devel
7706T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-fixes
7702F: drivers/gpu/drm/armada/ 7707F: drivers/gpu/drm/armada/
7703F: include/uapi/drm/armada_drm.h 7708F: include/uapi/drm/armada_drm.h
7704F: Documentation/devicetree/bindings/display/armada/ 7709F: Documentation/devicetree/bindings/display/armada/
@@ -8903,8 +8908,10 @@ S: Supported
8903F: drivers/nfc/nxp-nci 8908F: drivers/nfc/nxp-nci
8904 8909
8905NXP TDA998X DRM DRIVER 8910NXP TDA998X DRM DRIVER
8906M: Russell King <rmk+kernel@armlinux.org.uk> 8911M: Russell King <linux@armlinux.org.uk>
8907S: Supported 8912S: Supported
8913T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel
8914T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes
8908F: drivers/gpu/drm/i2c/tda998x_drv.c 8915F: drivers/gpu/drm/i2c/tda998x_drv.c
8909F: include/drm/i2c/tda998x.h 8916F: include/drm/i2c/tda998x.h
8910 8917
diff --git a/Makefile b/Makefile
index 96e2352d10a8..098840012b9b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 10 2PATCHLEVEL = 10
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc5
5NAME = Roaring Lionus 5NAME = Anniversary Edition
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
8# To see a list of typical targets execute "make help" 8# To see a list of typical targets execute "make help"
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index c75d29077e4a..283099c9560a 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -29,7 +29,7 @@ config ARC
29 select HAVE_KPROBES 29 select HAVE_KPROBES
30 select HAVE_KRETPROBES 30 select HAVE_KRETPROBES
31 select HAVE_MEMBLOCK 31 select HAVE_MEMBLOCK
32 select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND 32 select HAVE_MOD_ARCH_SPECIFIC
33 select HAVE_OPROFILE 33 select HAVE_OPROFILE
34 select HAVE_PERF_EVENTS 34 select HAVE_PERF_EVENTS
35 select HANDLE_DOMAIN_IRQ 35 select HANDLE_DOMAIN_IRQ
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index b3410ff6a62d..5008021fba98 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -67,7 +67,7 @@ extern unsigned long perip_base, perip_end;
67#define ARC_REG_IC_PTAG_HI 0x1F 67#define ARC_REG_IC_PTAG_HI 0x1F
68 68
69/* Bit val in IC_CTRL */ 69/* Bit val in IC_CTRL */
70#define IC_CTRL_CACHE_DISABLE 0x1 70#define IC_CTRL_DIS 0x1
71 71
72/* Data cache related Auxiliary registers */ 72/* Data cache related Auxiliary registers */
73#define ARC_REG_DC_BCR 0x72 /* Build Config reg */ 73#define ARC_REG_DC_BCR 0x72 /* Build Config reg */
@@ -80,8 +80,9 @@ extern unsigned long perip_base, perip_end;
80#define ARC_REG_DC_PTAG_HI 0x5F 80#define ARC_REG_DC_PTAG_HI 0x5F
81 81
82/* Bit val in DC_CTRL */ 82/* Bit val in DC_CTRL */
83#define DC_CTRL_INV_MODE_FLUSH 0x40 83#define DC_CTRL_DIS 0x001
84#define DC_CTRL_FLUSH_STATUS 0x100 84#define DC_CTRL_INV_MODE_FLUSH 0x040
85#define DC_CTRL_FLUSH_STATUS 0x100
85 86
86/*System-level cache (L2 cache) related Auxiliary registers */ 87/*System-level cache (L2 cache) related Auxiliary registers */
87#define ARC_REG_SLC_CFG 0x901 88#define ARC_REG_SLC_CFG 0x901
@@ -92,8 +93,8 @@ extern unsigned long perip_base, perip_end;
92#define ARC_REG_SLC_RGN_END 0x916 93#define ARC_REG_SLC_RGN_END 0x916
93 94
94/* Bit val in SLC_CONTROL */ 95/* Bit val in SLC_CONTROL */
96#define SLC_CTRL_DIS 0x001
95#define SLC_CTRL_IM 0x040 97#define SLC_CTRL_IM 0x040
96#define SLC_CTRL_DISABLE 0x001
97#define SLC_CTRL_BUSY 0x100 98#define SLC_CTRL_BUSY 0x100
98#define SLC_CTRL_RGN_OP_INV 0x200 99#define SLC_CTRL_RGN_OP_INV 0x200
99 100
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index b5ff87e6f4b7..aee1a77934cf 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -16,6 +16,7 @@
16 ; 16 ;
17 ; Now manually save: r12, sp, fp, gp, r25 17 ; Now manually save: r12, sp, fp, gp, r25
18 18
19 PUSH r30
19 PUSH r12 20 PUSH r12
20 21
21 ; Saving pt_regs->sp correctly requires some extra work due to the way 22 ; Saving pt_regs->sp correctly requires some extra work due to the way
@@ -72,6 +73,7 @@
72 POPAX AUX_USER_SP 73 POPAX AUX_USER_SP
731: 741:
74 POP r12 75 POP r12
76 POP r30
75 77
76.endm 78.endm
77 79
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
index 6e91d8b339c3..567590ea8f6c 100644
--- a/arch/arc/include/asm/module.h
+++ b/arch/arc/include/asm/module.h
@@ -14,13 +14,13 @@
14 14
15#include <asm-generic/module.h> 15#include <asm-generic/module.h>
16 16
17#ifdef CONFIG_ARC_DW2_UNWIND
18struct mod_arch_specific { 17struct mod_arch_specific {
18#ifdef CONFIG_ARC_DW2_UNWIND
19 void *unw_info; 19 void *unw_info;
20 int unw_sec_idx; 20 int unw_sec_idx;
21#endif
21 const char *secstr; 22 const char *secstr;
22}; 23};
23#endif
24 24
25#define MODULE_PROC_FAMILY "ARC700" 25#define MODULE_PROC_FAMILY "ARC700"
26 26
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 69095da1fcfd..47111d565a95 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -84,7 +84,7 @@ struct pt_regs {
84 unsigned long fp; 84 unsigned long fp;
85 unsigned long sp; /* user/kernel sp depending on where we came from */ 85 unsigned long sp; /* user/kernel sp depending on where we came from */
86 86
87 unsigned long r12; 87 unsigned long r12, r30;
88 88
89 /*------- Below list auto saved by h/w -----------*/ 89 /*------- Below list auto saved by h/w -----------*/
90 unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; 90 unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index cb954cdab070..c568a9df82b1 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -31,6 +31,7 @@ extern int root_mountflags, end_mem;
31 31
32void setup_processor(void); 32void setup_processor(void);
33void __init setup_arch_memory(void); 33void __init setup_arch_memory(void);
34long __init arc_get_mem_sz(void);
34 35
35/* Helpers used in arc_*_mumbojumbo routines */ 36/* Helpers used in arc_*_mumbojumbo routines */
36#define IS_AVAIL1(v, s) ((v) ? s : "") 37#define IS_AVAIL1(v, s) ((v) ? s : "")
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 994dca7014db..ecef0fb0b66c 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -77,20 +77,20 @@ void arc_init_IRQ(void)
77 77
78static void arcv2_irq_mask(struct irq_data *data) 78static void arcv2_irq_mask(struct irq_data *data)
79{ 79{
80 write_aux_reg(AUX_IRQ_SELECT, data->irq); 80 write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
81 write_aux_reg(AUX_IRQ_ENABLE, 0); 81 write_aux_reg(AUX_IRQ_ENABLE, 0);
82} 82}
83 83
84static void arcv2_irq_unmask(struct irq_data *data) 84static void arcv2_irq_unmask(struct irq_data *data)
85{ 85{
86 write_aux_reg(AUX_IRQ_SELECT, data->irq); 86 write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
87 write_aux_reg(AUX_IRQ_ENABLE, 1); 87 write_aux_reg(AUX_IRQ_ENABLE, 1);
88} 88}
89 89
90void arcv2_irq_enable(struct irq_data *data) 90void arcv2_irq_enable(struct irq_data *data)
91{ 91{
92 /* set default priority */ 92 /* set default priority */
93 write_aux_reg(AUX_IRQ_SELECT, data->irq); 93 write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
94 write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO); 94 write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
95 95
96 /* 96 /*
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index ce9deb953ca9..8c1fd5c00782 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -57,7 +57,7 @@ static void arc_irq_mask(struct irq_data *data)
57 unsigned int ienb; 57 unsigned int ienb;
58 58
59 ienb = read_aux_reg(AUX_IENABLE); 59 ienb = read_aux_reg(AUX_IENABLE);
60 ienb &= ~(1 << data->irq); 60 ienb &= ~(1 << data->hwirq);
61 write_aux_reg(AUX_IENABLE, ienb); 61 write_aux_reg(AUX_IENABLE, ienb);
62} 62}
63 63
@@ -66,7 +66,7 @@ static void arc_irq_unmask(struct irq_data *data)
66 unsigned int ienb; 66 unsigned int ienb;
67 67
68 ienb = read_aux_reg(AUX_IENABLE); 68 ienb = read_aux_reg(AUX_IENABLE);
69 ienb |= (1 << data->irq); 69 ienb |= (1 << data->hwirq);
70 write_aux_reg(AUX_IENABLE, ienb); 70 write_aux_reg(AUX_IENABLE, ienb);
71} 71}
72 72
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 560c4afc2af4..9274f8ade8c7 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/irq.h> 12#include <linux/irq.h>
13#include <linux/irqchip/chained_irq.h>
13#include <linux/spinlock.h> 14#include <linux/spinlock.h>
14#include <soc/arc/mcip.h> 15#include <soc/arc/mcip.h>
15#include <asm/irqflags-arcv2.h> 16#include <asm/irqflags-arcv2.h>
@@ -221,10 +222,13 @@ static irq_hw_number_t idu_first_hwirq;
221static void idu_cascade_isr(struct irq_desc *desc) 222static void idu_cascade_isr(struct irq_desc *desc)
222{ 223{
223 struct irq_domain *idu_domain = irq_desc_get_handler_data(desc); 224 struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
225 struct irq_chip *core_chip = irq_desc_get_chip(desc);
224 irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc)); 226 irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
225 irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq; 227 irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
226 228
229 chained_irq_enter(core_chip, desc);
227 generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq)); 230 generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
231 chained_irq_exit(core_chip, desc);
228} 232}
229 233
230static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) 234static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
index 42e964db2967..3d99a6091332 100644
--- a/arch/arc/kernel/module.c
+++ b/arch/arc/kernel/module.c
@@ -32,8 +32,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
32#ifdef CONFIG_ARC_DW2_UNWIND 32#ifdef CONFIG_ARC_DW2_UNWIND
33 mod->arch.unw_sec_idx = 0; 33 mod->arch.unw_sec_idx = 0;
34 mod->arch.unw_info = NULL; 34 mod->arch.unw_info = NULL;
35 mod->arch.secstr = secstr;
36#endif 35#endif
36 mod->arch.secstr = secstr;
37 return 0; 37 return 0;
38} 38}
39 39
@@ -113,8 +113,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
113 113
114 } 114 }
115 115
116#ifdef CONFIG_ARC_DW2_UNWIND
116 if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0) 117 if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
117 module->arch.unw_sec_idx = tgtsec; 118 module->arch.unw_sec_idx = tgtsec;
119#endif
118 120
119 return 0; 121 return 0;
120 122
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index ec86ac0e3321..d408fa21a07c 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -23,7 +23,7 @@
23 23
24static int l2_line_sz; 24static int l2_line_sz;
25static int ioc_exists; 25static int ioc_exists;
26int slc_enable = 1, ioc_enable = 0; 26int slc_enable = 1, ioc_enable = 1;
27unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */ 27unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
28unsigned long perip_end = 0xFFFFFFFF; /* legacy value */ 28unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
29 29
@@ -271,7 +271,11 @@ void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
271 271
272/* 272/*
273 * For ARC700 MMUv3 I-cache and D-cache flushes 273 * For ARC700 MMUv3 I-cache and D-cache flushes
274 * Also reused for HS38 aliasing I-cache configuration 274 * - ARC700 programming model requires paddr and vaddr be passed in seperate
275 * AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the
276 * caches actually alias or not.
277 * - For HS38, only the aliasing I-cache configuration uses the PTAG reg
278 * (non aliasing I-cache version doesn't; while D-cache can't possibly alias)
275 */ 279 */
276static inline 280static inline
277void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr, 281void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
@@ -458,6 +462,21 @@ static inline void __dc_entire_op(const int op)
458 __after_dc_op(op); 462 __after_dc_op(op);
459} 463}
460 464
465static inline void __dc_disable(void)
466{
467 const int r = ARC_REG_DC_CTRL;
468
469 __dc_entire_op(OP_FLUSH_N_INV);
470 write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
471}
472
473static void __dc_enable(void)
474{
475 const int r = ARC_REG_DC_CTRL;
476
477 write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
478}
479
461/* For kernel mappings cache operation: index is same as paddr */ 480/* For kernel mappings cache operation: index is same as paddr */
462#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) 481#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
463 482
@@ -483,6 +502,8 @@ static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
483#else 502#else
484 503
485#define __dc_entire_op(op) 504#define __dc_entire_op(op)
505#define __dc_disable()
506#define __dc_enable()
486#define __dc_line_op(paddr, vaddr, sz, op) 507#define __dc_line_op(paddr, vaddr, sz, op)
487#define __dc_line_op_k(paddr, sz, op) 508#define __dc_line_op_k(paddr, sz, op)
488 509
@@ -597,6 +618,40 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
597#endif 618#endif
598} 619}
599 620
621noinline static void slc_entire_op(const int op)
622{
623 unsigned int ctrl, r = ARC_REG_SLC_CTRL;
624
625 ctrl = read_aux_reg(r);
626
627 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
628 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
629 else
630 ctrl |= SLC_CTRL_IM;
631
632 write_aux_reg(r, ctrl);
633
634 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
635
636 /* Important to wait for flush to complete */
637 while (read_aux_reg(r) & SLC_CTRL_BUSY);
638}
639
640static inline void arc_slc_disable(void)
641{
642 const int r = ARC_REG_SLC_CTRL;
643
644 slc_entire_op(OP_FLUSH_N_INV);
645 write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
646}
647
648static inline void arc_slc_enable(void)
649{
650 const int r = ARC_REG_SLC_CTRL;
651
652 write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
653}
654
600/*********************************************************** 655/***********************************************************
601 * Exported APIs 656 * Exported APIs
602 */ 657 */
@@ -923,21 +978,54 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
923 return 0; 978 return 0;
924} 979}
925 980
926void arc_cache_init(void) 981/*
982 * IO-Coherency (IOC) setup rules:
983 *
984 * 1. Needs to be at system level, so only once by Master core
985 * Non-Masters need not be accessing caches at that time
986 * - They are either HALT_ON_RESET and kick started much later or
987 * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
988 * doesn't perturb caches or coherency unit
989 *
990 * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
991 * otherwise any straggler data might behave strangely post IOC enabling
992 *
993 * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
994 * Coherency transactions
995 */
996noinline void __init arc_ioc_setup(void)
927{ 997{
928 unsigned int __maybe_unused cpu = smp_processor_id(); 998 unsigned int ap_sz;
929 char str[256];
930 999
931 printk(arc_cache_mumbojumbo(0, str, sizeof(str))); 1000 /* Flush + invalidate + disable L1 dcache */
1001 __dc_disable();
1002
1003 /* Flush + invalidate SLC */
1004 if (read_aux_reg(ARC_REG_SLC_BCR))
1005 slc_entire_op(OP_FLUSH_N_INV);
1006
1007 /* IOC Aperture start: TDB: handle non default CONFIG_LINUX_LINK_BASE */
1008 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
932 1009
933 /* 1010 /*
934 * Only master CPU needs to execute rest of function: 1011 * IOC Aperture size:
935 * - Assume SMP so all cores will have same cache config so 1012 * decoded as 2 ^ (SIZE + 2) KB: so setting 0x11 implies 512M
936 * any geomtry checks will be same for all 1013 * TBD: fix for PGU + 1GB of low mem
937 * - IOC setup / dma callbacks only need to be setup once 1014 * TBD: fix for PAE
938 */ 1015 */
939 if (cpu) 1016 ap_sz = order_base_2(arc_get_mem_sz()/1024) - 2;
940 return; 1017 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, ap_sz);
1018
1019 write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
1020 write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
1021
1022 /* Re-enable L1 dcache */
1023 __dc_enable();
1024}
1025
1026void __init arc_cache_init_master(void)
1027{
1028 unsigned int __maybe_unused cpu = smp_processor_id();
941 1029
942 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { 1030 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
943 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; 1031 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
@@ -985,30 +1073,14 @@ void arc_cache_init(void)
985 } 1073 }
986 } 1074 }
987 1075
988 if (is_isa_arcv2() && l2_line_sz && !slc_enable) { 1076 /* Note that SLC disable not formally supported till HS 3.0 */
989 1077 if (is_isa_arcv2() && l2_line_sz && !slc_enable)
990 /* IM set : flush before invalidate */ 1078 arc_slc_disable();
991 write_aux_reg(ARC_REG_SLC_CTRL,
992 read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM);
993 1079
994 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1); 1080 if (is_isa_arcv2() && ioc_enable)
995 1081 arc_ioc_setup();
996 /* Important to wait for flush to complete */
997 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
998 write_aux_reg(ARC_REG_SLC_CTRL,
999 read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
1000 }
1001 1082
1002 if (is_isa_arcv2() && ioc_enable) { 1083 if (is_isa_arcv2() && ioc_enable) {
1003 /* IO coherency base - 0x8z */
1004 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
1005 /* IO coherency aperture size - 512Mb: 0x8z-0xAz */
1006 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11);
1007 /* Enable partial writes */
1008 write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
1009 /* Enable IO coherency */
1010 write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
1011
1012 __dma_cache_wback_inv = __dma_cache_wback_inv_ioc; 1084 __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
1013 __dma_cache_inv = __dma_cache_inv_ioc; 1085 __dma_cache_inv = __dma_cache_inv_ioc;
1014 __dma_cache_wback = __dma_cache_wback_ioc; 1086 __dma_cache_wback = __dma_cache_wback_ioc;
@@ -1022,3 +1094,20 @@ void arc_cache_init(void)
1022 __dma_cache_wback = __dma_cache_wback_l1; 1094 __dma_cache_wback = __dma_cache_wback_l1;
1023 } 1095 }
1024} 1096}
1097
1098void __ref arc_cache_init(void)
1099{
1100 unsigned int __maybe_unused cpu = smp_processor_id();
1101 char str[256];
1102
1103 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
1104
1105 /*
1106 * Only master CPU needs to execute rest of function:
1107 * - Assume SMP so all cores will have same cache config so
1108 * any geomtry checks will be same for all
1109 * - IOC setup / dma callbacks only need to be setup once
1110 */
1111 if (!cpu)
1112 arc_cache_init_master();
1113}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 399e2f223d25..8c9415ed6280 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -40,6 +40,11 @@ struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
40EXPORT_SYMBOL(node_data); 40EXPORT_SYMBOL(node_data);
41#endif 41#endif
42 42
43long __init arc_get_mem_sz(void)
44{
45 return low_mem_sz;
46}
47
43/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */ 48/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
44static int __init setup_mem_sz(char *str) 49static int __init setup_mem_sz(char *str)
45{ 50{
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 7327250f0bb6..f10fe8526239 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -846,6 +846,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
846 sun8i-a83t-allwinner-h8homlet-v2.dtb \ 846 sun8i-a83t-allwinner-h8homlet-v2.dtb \
847 sun8i-a83t-cubietruck-plus.dtb \ 847 sun8i-a83t-cubietruck-plus.dtb \
848 sun8i-h3-bananapi-m2-plus.dtb \ 848 sun8i-h3-bananapi-m2-plus.dtb \
849 sun8i-h3-nanopi-m1.dtb \
849 sun8i-h3-nanopi-neo.dtb \ 850 sun8i-h3-nanopi-neo.dtb \
850 sun8i-h3-orangepi-2.dtb \ 851 sun8i-h3-orangepi-2.dtb \
851 sun8i-h3-orangepi-lite.dtb \ 852 sun8i-h3-orangepi-lite.dtb \
diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts
index 1463df3b5b19..8ed46f9d79b7 100644
--- a/arch/arm/boot/dts/am335x-icev2.dts
+++ b/arch/arm/boot/dts/am335x-icev2.dts
@@ -170,7 +170,6 @@
170 AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* (G16) mmc0_dat0.mmc0_dat0 */ 170 AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* (G16) mmc0_dat0.mmc0_dat0 */
171 AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* (G17) mmc0_clk.mmc0_clk */ 171 AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* (G17) mmc0_clk.mmc0_clk */
172 AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* (G18) mmc0_cmd.mmc0_cmd */ 172 AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* (G18) mmc0_cmd.mmc0_cmd */
173 AM33XX_IOPAD(0x960, PIN_INPUT_PULLUP | MUX_MODE5) /* (C15) spi0_cs1.mmc0_sdcd */
174 >; 173 >;
175 }; 174 };
176 175
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index b6142bda661e..15f07f9af3b3 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -160,7 +160,7 @@
160 160
161 axi { 161 axi {
162 compatible = "simple-bus"; 162 compatible = "simple-bus";
163 ranges = <0x00000000 0x18000000 0x0011c40a>; 163 ranges = <0x00000000 0x18000000 0x0011c40c>;
164 #address-cells = <1>; 164 #address-cells = <1>;
165 #size-cells = <1>; 165 #size-cells = <1>;
166 166
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index 41de15fe15a2..78492a0bbbab 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -99,6 +99,7 @@
99 #size-cells = <1>; 99 #size-cells = <1>;
100 compatible = "m25p64"; 100 compatible = "m25p64";
101 spi-max-frequency = <30000000>; 101 spi-max-frequency = <30000000>;
102 m25p,fast-read;
102 reg = <0>; 103 reg = <0>;
103 partition@0 { 104 partition@0 {
104 label = "U-Boot-SPL"; 105 label = "U-Boot-SPL";
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 1faf24acd521..5ba161679e01 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1378,6 +1378,7 @@
1378 phy-names = "sata-phy"; 1378 phy-names = "sata-phy";
1379 clocks = <&sata_ref_clk>; 1379 clocks = <&sata_ref_clk>;
1380 ti,hwmods = "sata"; 1380 ti,hwmods = "sata";
1381 ports-implemented = <0x1>;
1381 }; 1382 };
1382 1383
1383 rtc: rtc@48838000 { 1384 rtc: rtc@48838000 {
diff --git a/arch/arm/boot/dts/dra72-evm-revc.dts b/arch/arm/boot/dts/dra72-evm-revc.dts
index c3d939c9666c..3f808a47df03 100644
--- a/arch/arm/boot/dts/dra72-evm-revc.dts
+++ b/arch/arm/boot/dts/dra72-evm-revc.dts
@@ -75,6 +75,6 @@
75 ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>; 75 ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
76 ti,tx-internal-delay = <DP83867_RGMIIDCTL_250_PS>; 76 ti,tx-internal-delay = <DP83867_RGMIIDCTL_250_PS>;
77 ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>; 77 ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
78 ti,min-output-imepdance; 78 ti,min-output-impedance;
79 }; 79 };
80}; 80};
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
index 34887a10c5f1..47ba97229a48 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
@@ -319,8 +319,6 @@
319 compatible = "fsl,imx6q-nitrogen6_max-sgtl5000", 319 compatible = "fsl,imx6q-nitrogen6_max-sgtl5000",
320 "fsl,imx-audio-sgtl5000"; 320 "fsl,imx-audio-sgtl5000";
321 model = "imx6q-nitrogen6_max-sgtl5000"; 321 model = "imx6q-nitrogen6_max-sgtl5000";
322 pinctrl-names = "default";
323 pinctrl-0 = <&pinctrl_sgtl5000>;
324 ssi-controller = <&ssi1>; 322 ssi-controller = <&ssi1>;
325 audio-codec = <&codec>; 323 audio-codec = <&codec>;
326 audio-routing = 324 audio-routing =
@@ -402,6 +400,8 @@
402 400
403 codec: sgtl5000@0a { 401 codec: sgtl5000@0a {
404 compatible = "fsl,sgtl5000"; 402 compatible = "fsl,sgtl5000";
403 pinctrl-names = "default";
404 pinctrl-0 = <&pinctrl_sgtl5000>;
405 reg = <0x0a>; 405 reg = <0x0a>;
406 clocks = <&clks IMX6QDL_CLK_CKO>; 406 clocks = <&clks IMX6QDL_CLK_CKO>;
407 VDDA-supply = <&reg_2p5v>; 407 VDDA-supply = <&reg_2p5v>;
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
index d80f21abea62..31d4cc62dbc7 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
@@ -250,8 +250,6 @@
250 compatible = "fsl,imx6q-nitrogen6_som2-sgtl5000", 250 compatible = "fsl,imx6q-nitrogen6_som2-sgtl5000",
251 "fsl,imx-audio-sgtl5000"; 251 "fsl,imx-audio-sgtl5000";
252 model = "imx6q-nitrogen6_som2-sgtl5000"; 252 model = "imx6q-nitrogen6_som2-sgtl5000";
253 pinctrl-names = "default";
254 pinctrl-0 = <&pinctrl_sgtl5000>;
255 ssi-controller = <&ssi1>; 253 ssi-controller = <&ssi1>;
256 audio-codec = <&codec>; 254 audio-codec = <&codec>;
257 audio-routing = 255 audio-routing =
@@ -320,6 +318,8 @@
320 318
321 codec: sgtl5000@0a { 319 codec: sgtl5000@0a {
322 compatible = "fsl,sgtl5000"; 320 compatible = "fsl,sgtl5000";
321 pinctrl-names = "default";
322 pinctrl-0 = <&pinctrl_sgtl5000>;
323 reg = <0x0a>; 323 reg = <0x0a>;
324 clocks = <&clks IMX6QDL_CLK_CKO>; 324 clocks = <&clks IMX6QDL_CLK_CKO>;
325 VDDA-supply = <&reg_2p5v>; 325 VDDA-supply = <&reg_2p5v>;
diff --git a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
index da8598402ab8..38faa90007d7 100644
--- a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
@@ -158,7 +158,7 @@
158&mmc1 { 158&mmc1 {
159 interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>; 159 interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>;
160 pinctrl-names = "default"; 160 pinctrl-names = "default";
161 pinctrl-0 = <&mmc1_pins &mmc1_cd>; 161 pinctrl-0 = <&mmc1_pins>;
162 wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */ 162 wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */
163 cd-gpios = <&gpio4 14 IRQ_TYPE_LEVEL_LOW>; /* gpio_110 */ 163 cd-gpios = <&gpio4 14 IRQ_TYPE_LEVEL_LOW>; /* gpio_110 */
164 vmmc-supply = <&vmmc1>; 164 vmmc-supply = <&vmmc1>;
@@ -193,7 +193,8 @@
193 OMAP3_CORE1_IOPAD(0x214a, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */ 193 OMAP3_CORE1_IOPAD(0x214a, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */
194 OMAP3_CORE1_IOPAD(0x214c, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */ 194 OMAP3_CORE1_IOPAD(0x214c, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */
195 OMAP3_CORE1_IOPAD(0x214e, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */ 195 OMAP3_CORE1_IOPAD(0x214e, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */
196 OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 sdmmc1_wp*/ 196 OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 */
197 OMAP3_CORE1_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */
197 >; 198 >;
198 }; 199 };
199 200
@@ -242,12 +243,6 @@
242 OMAP3_WKUP_IOPAD(0x2a16, PIN_OUTPUT | PIN_OFF_OUTPUT_LOW | MUX_MODE4) /* sys_boot6.gpio_8 */ 243 OMAP3_WKUP_IOPAD(0x2a16, PIN_OUTPUT | PIN_OFF_OUTPUT_LOW | MUX_MODE4) /* sys_boot6.gpio_8 */
243 >; 244 >;
244 }; 245 };
245
246 mmc1_cd: pinmux_mmc1_cd {
247 pinctrl-single,pins = <
248 OMAP3_WKUP_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */
249 >;
250 };
251}; 246};
252 247
253 248
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 7cd92babc41a..0844737b72b2 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -988,6 +988,7 @@
988 phy-names = "sata-phy"; 988 phy-names = "sata-phy";
989 clocks = <&sata_ref_clk>; 989 clocks = <&sata_ref_clk>;
990 ti,hwmods = "sata"; 990 ti,hwmods = "sata";
991 ports-implemented = <0x1>;
991 }; 992 };
992 993
993 dss: dss@58000000 { 994 dss: dss@58000000 {
diff --git a/arch/arm/boot/dts/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom-mdm9615.dtsi
index 5ae4ec59e6ea..c852b69229c9 100644
--- a/arch/arm/boot/dts/qcom-mdm9615.dtsi
+++ b/arch/arm/boot/dts/qcom-mdm9615.dtsi
@@ -357,7 +357,7 @@
357 }; 357 };
358 358
359 amba { 359 amba {
360 compatible = "arm,amba-bus"; 360 compatible = "simple-bus";
361 #address-cells = <1>; 361 #address-cells = <1>;
362 #size-cells = <1>; 362 #size-cells = <1>;
363 ranges; 363 ranges;
diff --git a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
index 735914f6ae44..7cae328398b1 100644
--- a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
+++ b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
@@ -140,6 +140,10 @@
140 cpu-supply = <&reg_dcdc3>; 140 cpu-supply = <&reg_dcdc3>;
141}; 141};
142 142
143&de {
144 status = "okay";
145};
146
143&ehci0 { 147&ehci0 {
144 status = "okay"; 148 status = "okay";
145}; 149};
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 2b26175d55d1..e78faaf9243c 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -234,6 +234,7 @@
234 de: display-engine { 234 de: display-engine {
235 compatible = "allwinner,sun6i-a31-display-engine"; 235 compatible = "allwinner,sun6i-a31-display-engine";
236 allwinner,pipelines = <&fe0>; 236 allwinner,pipelines = <&fe0>;
237 status = "disabled";
237 }; 238 };
238 239
239 soc@01c00000 { 240 soc@01c00000 {
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
index 5ea4915f6d75..10d307408f23 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
@@ -56,7 +56,7 @@
56}; 56};
57 57
58&pio { 58&pio {
59 mmc2_pins_nrst: mmc2@0 { 59 mmc2_pins_nrst: mmc2-rst-pin {
60 allwinner,pins = "PC16"; 60 allwinner,pins = "PC16";
61 allwinner,function = "gpio_out"; 61 allwinner,function = "gpio_out";
62 allwinner,drive = <SUN4I_PINCTRL_10_MA>; 62 allwinner,drive = <SUN4I_PINCTRL_10_MA>;
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index b01a43851294..028d2b70e3b5 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -471,7 +471,7 @@ CONFIG_MESON_WATCHDOG=y
471CONFIG_DW_WATCHDOG=y 471CONFIG_DW_WATCHDOG=y
472CONFIG_DIGICOLOR_WATCHDOG=y 472CONFIG_DIGICOLOR_WATCHDOG=y
473CONFIG_BCM2835_WDT=y 473CONFIG_BCM2835_WDT=y
474CONFIG_BCM47XX_WATCHDOG=y 474CONFIG_BCM47XX_WDT=y
475CONFIG_BCM7038_WDT=m 475CONFIG_BCM7038_WDT=m
476CONFIG_BCM_KONA_WDT=y 476CONFIG_BCM_KONA_WDT=y
477CONFIG_MFD_ACT8945A=y 477CONFIG_MFD_ACT8945A=y
@@ -893,7 +893,7 @@ CONFIG_BCM2835_MBOX=y
893CONFIG_RASPBERRYPI_FIRMWARE=y 893CONFIG_RASPBERRYPI_FIRMWARE=y
894CONFIG_EFI_VARS=m 894CONFIG_EFI_VARS=m
895CONFIG_EFI_CAPSULE_LOADER=m 895CONFIG_EFI_CAPSULE_LOADER=m
896CONFIG_CONFIG_BCM47XX_NVRAM=y 896CONFIG_BCM47XX_NVRAM=y
897CONFIG_BCM47XX_SPROM=y 897CONFIG_BCM47XX_SPROM=y
898CONFIG_EXT4_FS=y 898CONFIG_EXT4_FS=y
899CONFIG_AUTOFS4_FS=y 899CONFIG_AUTOFS4_FS=y
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 4364040ed696..1e6c48dd7b11 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -86,9 +86,9 @@ CONFIG_IPV6_TUNNEL=m
86CONFIG_NETFILTER=y 86CONFIG_NETFILTER=y
87CONFIG_NF_CONNTRACK=m 87CONFIG_NF_CONNTRACK=m
88CONFIG_NF_CONNTRACK_EVENTS=y 88CONFIG_NF_CONNTRACK_EVENTS=y
89CONFIG_NF_CT_PROTO_DCCP=m 89CONFIG_NF_CT_PROTO_DCCP=y
90CONFIG_NF_CT_PROTO_SCTP=m 90CONFIG_NF_CT_PROTO_SCTP=y
91CONFIG_NF_CT_PROTO_UDPLITE=m 91CONFIG_NF_CT_PROTO_UDPLITE=y
92CONFIG_NF_CONNTRACK_AMANDA=m 92CONFIG_NF_CONNTRACK_AMANDA=m
93CONFIG_NF_CONNTRACK_FTP=m 93CONFIG_NF_CONNTRACK_FTP=m
94CONFIG_NF_CONNTRACK_H323=m 94CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 522b5feb4eaa..b62eaeb147aa 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -94,6 +94,9 @@
94#define ARM_CPU_XSCALE_ARCH_V2 0x4000 94#define ARM_CPU_XSCALE_ARCH_V2 0x4000
95#define ARM_CPU_XSCALE_ARCH_V3 0x6000 95#define ARM_CPU_XSCALE_ARCH_V3 0x6000
96 96
97/* Qualcomm implemented cores */
98#define ARM_CPU_PART_SCORPION 0x510002d0
99
97extern unsigned int processor_id; 100extern unsigned int processor_id;
98 101
99#ifdef CONFIG_CPU_CP15 102#ifdef CONFIG_CPU_CP15
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index bfe2a2f5a644..22b73112b75f 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level)
54 54
55#define ftrace_return_address(n) return_address(n) 55#define ftrace_return_address(n) return_address(n)
56 56
57#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
58
59static inline bool arch_syscall_match_sym_name(const char *sym,
60 const char *name)
61{
62 if (!strcmp(sym, "sys_mmap2"))
63 sym = "sys_mmap_pgoff";
64 else if (!strcmp(sym, "sys_statfs64_wrapper"))
65 sym = "sys_statfs64";
66 else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
67 sym = "sys_fstatfs64";
68 else if (!strcmp(sym, "sys_arm_fadvise64_64"))
69 sym = "sys_fadvise64_64";
70
71 /* Ignore case since sym may start with "SyS" instead of "sys" */
72 return !strcasecmp(sym, name);
73}
74
57#endif /* ifndef __ASSEMBLY__ */ 75#endif /* ifndef __ASSEMBLY__ */
58 76
59#endif /* _ASM_ARM_FTRACE */ 77#endif /* _ASM_ARM_FTRACE */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index d5423ab15ed5..cc495d799c67 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -60,9 +60,6 @@ struct kvm_arch {
60 /* The last vcpu id that ran on each physical CPU */ 60 /* The last vcpu id that ran on each physical CPU */
61 int __percpu *last_vcpu_ran; 61 int __percpu *last_vcpu_ran;
62 62
63 /* Timer */
64 struct arch_timer_kvm timer;
65
66 /* 63 /*
67 * Anything that is not used directly from assembly code goes 64 * Anything that is not used directly from assembly code goes
68 * here. 65 * here.
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 74a44727f8e1..95f38dcd611d 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -129,8 +129,7 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
129 129
130static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, 130static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
131 kvm_pfn_t pfn, 131 kvm_pfn_t pfn,
132 unsigned long size, 132 unsigned long size)
133 bool ipa_uncached)
134{ 133{
135 /* 134 /*
136 * If we are going to insert an instruction page and the icache is 135 * If we are going to insert an instruction page and the icache is
@@ -150,18 +149,12 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
150 * and iterate over the range. 149 * and iterate over the range.
151 */ 150 */
152 151
153 bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
154
155 VM_BUG_ON(size & ~PAGE_MASK); 152 VM_BUG_ON(size & ~PAGE_MASK);
156 153
157 if (!need_flush && !icache_is_pipt())
158 goto vipt_cache;
159
160 while (size) { 154 while (size) {
161 void *va = kmap_atomic_pfn(pfn); 155 void *va = kmap_atomic_pfn(pfn);
162 156
163 if (need_flush) 157 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
164 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
165 158
166 if (icache_is_pipt()) 159 if (icache_is_pipt())
167 __cpuc_coherent_user_range((unsigned long)va, 160 __cpuc_coherent_user_range((unsigned long)va,
@@ -173,7 +166,6 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
173 kunmap_atomic(va); 166 kunmap_atomic(va);
174 } 167 }
175 168
176vipt_cache:
177 if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) { 169 if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
178 /* any kind of VIPT cache */ 170 /* any kind of VIPT cache */
179 __flush_icache_all(); 171 __flush_icache_all();
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index a2e75b84e2ae..6dae1956c74d 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -80,6 +80,11 @@ static inline bool is_kernel_in_hyp_mode(void)
80 return false; 80 return false;
81} 81}
82 82
83static inline bool has_vhe(void)
84{
85 return false;
86}
87
83/* The section containing the hypervisor idmap text */ 88/* The section containing the hypervisor idmap text */
84extern char __hyp_idmap_text_start[]; 89extern char __hyp_idmap_text_start[];
85extern char __hyp_idmap_text_end[]; 90extern char __hyp_idmap_text_end[];
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index af05f8e0903e..6ebd3e6a1fd1 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -181,10 +181,23 @@ struct kvm_arch_memory_slot {
181#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2 181#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
182#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32 182#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
183#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) 183#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
184#define KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT 32
185#define KVM_DEV_ARM_VGIC_V3_MPIDR_MASK \
186 (0xffffffffULL << KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT)
184#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 187#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
185#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) 188#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
189#define KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK (0xffff)
186#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3 190#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
187#define KVM_DEV_ARM_VGIC_GRP_CTRL 4 191#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
192#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
193#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
194#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
195#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
196#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
197 (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
198#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
199#define VGIC_LEVEL_INFO_LINE_LEVEL 0
200
188#define KVM_DEV_ARM_VGIC_CTRL_INIT 0 201#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
189 202
190/* KVM_IRQ_LINE irq field index values */ 203/* KVM_IRQ_LINE irq field index values */
diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/uapi/asm/types.h
index a53cdb8f068c..9435a42f575e 100644
--- a/arch/arm/include/asm/types.h
+++ b/arch/arm/include/uapi/asm/types.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_TYPES_H 1#ifndef _UAPI_ASM_TYPES_H
2#define _ASM_TYPES_H 2#define _UAPI_ASM_TYPES_H
3 3
4#include <asm-generic/int-ll64.h> 4#include <asm-generic/int-ll64.h>
5 5
@@ -37,4 +37,4 @@
37#define __UINTPTR_TYPE__ unsigned long 37#define __UINTPTR_TYPE__ unsigned long
38#endif 38#endif
39 39
40#endif /* _ASM_TYPES_H */ 40#endif /* _UAPI_ASM_TYPES_H */
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 188180b5523d..be3b3fbd382f 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1063,6 +1063,22 @@ static int __init arch_hw_breakpoint_init(void)
1063 return 0; 1063 return 0;
1064 } 1064 }
1065 1065
1066 /*
1067 * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
1068 * whenever a WFI is issued, even if the core is not powered down, in
1069 * violation of the architecture. When DBGPRSR.SPD is set, accesses to
1070 * breakpoint and watchpoint registers are treated as undefined, so
1071 * this results in boot time and runtime failures when these are
1072 * accessed and we unexpectedly take a trap.
1073 *
1074 * It's not clear if/how this can be worked around, so we blacklist
1075 * Scorpion CPUs to avoid these issues.
1076 */
1077 if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
1078 pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
1079 return 0;
1080 }
1081
1066 has_ossr = core_has_os_save_restore(); 1082 has_ossr = core_has_os_save_restore();
1067 1083
1068 /* Determine how many BRPs/WRPs are available. */ 1084 /* Determine how many BRPs/WRPs are available. */
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 22313cb53362..9af0701f7094 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/preempt.h> 10#include <linux/preempt.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/uaccess.h>
12 13
13#include <asm/smp_plat.h> 14#include <asm/smp_plat.h>
14#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
@@ -40,8 +41,11 @@ static inline void ipi_flush_tlb_mm(void *arg)
40static inline void ipi_flush_tlb_page(void *arg) 41static inline void ipi_flush_tlb_page(void *arg)
41{ 42{
42 struct tlb_args *ta = (struct tlb_args *)arg; 43 struct tlb_args *ta = (struct tlb_args *)arg;
44 unsigned int __ua_flags = uaccess_save_and_enable();
43 45
44 local_flush_tlb_page(ta->ta_vma, ta->ta_start); 46 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
47
48 uaccess_restore(__ua_flags);
45} 49}
46 50
47static inline void ipi_flush_tlb_kernel_page(void *arg) 51static inline void ipi_flush_tlb_kernel_page(void *arg)
@@ -54,8 +58,11 @@ static inline void ipi_flush_tlb_kernel_page(void *arg)
54static inline void ipi_flush_tlb_range(void *arg) 58static inline void ipi_flush_tlb_range(void *arg)
55{ 59{
56 struct tlb_args *ta = (struct tlb_args *)arg; 60 struct tlb_args *ta = (struct tlb_args *)arg;
61 unsigned int __ua_flags = uaccess_save_and_enable();
57 62
58 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); 63 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
64
65 uaccess_restore(__ua_flags);
59} 66}
60 67
61static inline void ipi_flush_tlb_kernel_range(void *arg) 68static inline void ipi_flush_tlb_kernel_range(void *arg)
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index d571243ab4d1..7b3670c2ae7b 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -7,7 +7,7 @@ ifeq ($(plus_virt),+virt)
7 plus_virt_def := -DREQUIRES_VIRT=1 7 plus_virt_def := -DREQUIRES_VIRT=1
8endif 8endif
9 9
10ccflags-y += -Iarch/arm/kvm 10ccflags-y += -Iarch/arm/kvm -Ivirt/kvm/arm/vgic
11CFLAGS_arm.o := -I. $(plus_virt_def) 11CFLAGS_arm.o := -I. $(plus_virt_def)
12CFLAGS_mmu.o := -I. 12CFLAGS_mmu.o := -I.
13 13
@@ -20,7 +20,7 @@ kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vf
20obj-$(CONFIG_KVM_ARM_HOST) += hyp/ 20obj-$(CONFIG_KVM_ARM_HOST) += hyp/
21obj-y += kvm-arm.o init.o interrupts.o 21obj-y += kvm-arm.o init.o interrupts.o
22obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o 22obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
23obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o 23obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o vgic-v3-coproc.o
24obj-y += $(KVM)/arm/aarch32.o 24obj-y += $(KVM)/arm/aarch32.o
25 25
26obj-y += $(KVM)/arm/vgic/vgic.o 26obj-y += $(KVM)/arm/vgic/vgic.o
@@ -33,5 +33,6 @@ obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
33obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o 33obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o
34obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o 34obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o
35obj-y += $(KVM)/arm/vgic/vgic-its.o 35obj-y += $(KVM)/arm/vgic/vgic-its.o
36obj-y += $(KVM)/arm/vgic/vgic-debug.o
36obj-y += $(KVM)/irqchip.o 37obj-y += $(KVM)/irqchip.o
37obj-y += $(KVM)/arm/arch_timer.o 38obj-y += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 11676787ad49..21c493a9e5c9 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -135,7 +135,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
135 goto out_free_stage2_pgd; 135 goto out_free_stage2_pgd;
136 136
137 kvm_vgic_early_init(kvm); 137 kvm_vgic_early_init(kvm);
138 kvm_timer_init(kvm);
139 138
140 /* Mark the initial VMID generation invalid */ 139 /* Mark the initial VMID generation invalid */
141 kvm->arch.vmid_gen = 0; 140 kvm->arch.vmid_gen = 0;
@@ -301,7 +300,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
301 300
302int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 301int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
303{ 302{
304 return kvm_timer_should_fire(vcpu); 303 return kvm_timer_should_fire(vcpu_vtimer(vcpu)) ||
304 kvm_timer_should_fire(vcpu_ptimer(vcpu));
305} 305}
306 306
307void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) 307void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
@@ -1099,6 +1099,9 @@ static void cpu_init_hyp_mode(void *dummy)
1099 __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr); 1099 __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
1100 __cpu_init_stage2(); 1100 __cpu_init_stage2();
1101 1101
1102 if (is_kernel_in_hyp_mode())
1103 kvm_timer_init_vhe();
1104
1102 kvm_arm_init_debug(); 1105 kvm_arm_init_debug();
1103} 1106}
1104 1107
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index a5265edbeeab..962616fd4ddd 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1232,9 +1232,9 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1232} 1232}
1233 1233
1234static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn, 1234static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
1235 unsigned long size, bool uncached) 1235 unsigned long size)
1236{ 1236{
1237 __coherent_cache_guest_page(vcpu, pfn, size, uncached); 1237 __coherent_cache_guest_page(vcpu, pfn, size);
1238} 1238}
1239 1239
1240static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 1240static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
@@ -1250,7 +1250,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1250 struct vm_area_struct *vma; 1250 struct vm_area_struct *vma;
1251 kvm_pfn_t pfn; 1251 kvm_pfn_t pfn;
1252 pgprot_t mem_type = PAGE_S2; 1252 pgprot_t mem_type = PAGE_S2;
1253 bool fault_ipa_uncached;
1254 bool logging_active = memslot_is_logging(memslot); 1253 bool logging_active = memslot_is_logging(memslot);
1255 unsigned long flags = 0; 1254 unsigned long flags = 0;
1256 1255
@@ -1337,8 +1336,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1337 if (!hugetlb && !force_pte) 1336 if (!hugetlb && !force_pte)
1338 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); 1337 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
1339 1338
1340 fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT;
1341
1342 if (hugetlb) { 1339 if (hugetlb) {
1343 pmd_t new_pmd = pfn_pmd(pfn, mem_type); 1340 pmd_t new_pmd = pfn_pmd(pfn, mem_type);
1344 new_pmd = pmd_mkhuge(new_pmd); 1341 new_pmd = pmd_mkhuge(new_pmd);
@@ -1346,7 +1343,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1346 new_pmd = kvm_s2pmd_mkwrite(new_pmd); 1343 new_pmd = kvm_s2pmd_mkwrite(new_pmd);
1347 kvm_set_pfn_dirty(pfn); 1344 kvm_set_pfn_dirty(pfn);
1348 } 1345 }
1349 coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached); 1346 coherent_cache_guest_page(vcpu, pfn, PMD_SIZE);
1350 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); 1347 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1351 } else { 1348 } else {
1352 pte_t new_pte = pfn_pte(pfn, mem_type); 1349 pte_t new_pte = pfn_pte(pfn, mem_type);
@@ -1356,7 +1353,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1356 kvm_set_pfn_dirty(pfn); 1353 kvm_set_pfn_dirty(pfn);
1357 mark_page_dirty(kvm, gfn); 1354 mark_page_dirty(kvm, gfn);
1358 } 1355 }
1359 coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached); 1356 coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE);
1360 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags); 1357 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
1361 } 1358 }
1362 1359
@@ -1879,15 +1876,6 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1879int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 1876int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1880 unsigned long npages) 1877 unsigned long npages)
1881{ 1878{
1882 /*
1883 * Readonly memslots are not incoherent with the caches by definition,
1884 * but in practice, they are used mostly to emulate ROMs or NOR flashes
1885 * that the guest may consider devices and hence map as uncached.
1886 * To prevent incoherency issues in these cases, tag all readonly
1887 * regions as incoherent.
1888 */
1889 if (slot->flags & KVM_MEM_READONLY)
1890 slot->flags |= KVM_MEMSLOT_INCOHERENT;
1891 return 0; 1879 return 0;
1892} 1880}
1893 1881
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index 4b5e802e57d1..1da8b2d14550 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -37,6 +37,11 @@ static struct kvm_regs cortexa_regs_reset = {
37 .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT, 37 .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
38}; 38};
39 39
40static const struct kvm_irq_level cortexa_ptimer_irq = {
41 { .irq = 30 },
42 .level = 1,
43};
44
40static const struct kvm_irq_level cortexa_vtimer_irq = { 45static const struct kvm_irq_level cortexa_vtimer_irq = {
41 { .irq = 27 }, 46 { .irq = 27 },
42 .level = 1, 47 .level = 1,
@@ -58,6 +63,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
58{ 63{
59 struct kvm_regs *reset_regs; 64 struct kvm_regs *reset_regs;
60 const struct kvm_irq_level *cpu_vtimer_irq; 65 const struct kvm_irq_level *cpu_vtimer_irq;
66 const struct kvm_irq_level *cpu_ptimer_irq;
61 67
62 switch (vcpu->arch.target) { 68 switch (vcpu->arch.target) {
63 case KVM_ARM_TARGET_CORTEX_A7: 69 case KVM_ARM_TARGET_CORTEX_A7:
@@ -65,6 +71,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
65 reset_regs = &cortexa_regs_reset; 71 reset_regs = &cortexa_regs_reset;
66 vcpu->arch.midr = read_cpuid_id(); 72 vcpu->arch.midr = read_cpuid_id();
67 cpu_vtimer_irq = &cortexa_vtimer_irq; 73 cpu_vtimer_irq = &cortexa_vtimer_irq;
74 cpu_ptimer_irq = &cortexa_ptimer_irq;
68 break; 75 break;
69 default: 76 default:
70 return -ENODEV; 77 return -ENODEV;
@@ -77,5 +84,5 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
77 kvm_reset_coprocs(vcpu); 84 kvm_reset_coprocs(vcpu);
78 85
79 /* Reset arch_timer context */ 86 /* Reset arch_timer context */
80 return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); 87 return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq, cpu_ptimer_irq);
81} 88}
diff --git a/arch/arm/kvm/vgic-v3-coproc.c b/arch/arm/kvm/vgic-v3-coproc.c
new file mode 100644
index 000000000000..f41abf76366f
--- /dev/null
+++ b/arch/arm/kvm/vgic-v3-coproc.c
@@ -0,0 +1,35 @@
1/*
2 * VGIC system registers handling functions for AArch32 mode
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kvm.h>
15#include <linux/kvm_host.h>
16#include <asm/kvm_emulate.h>
17#include "vgic.h"
18
19int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
20 u64 *reg)
21{
22 /*
23 * TODO: Implement for AArch32
24 */
25 return -ENXIO;
26}
27
28int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write, u64 id,
29 u64 *reg)
30{
31 /*
32 * TODO: Implement for AArch32
33 */
34 return -ENXIO;
35}
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
index f6ba589cd312..c821c1d5610e 100644
--- a/arch/arm/mach-omap1/dma.c
+++ b/arch/arm/mach-omap1/dma.c
@@ -32,7 +32,6 @@
32#include "soc.h" 32#include "soc.h"
33 33
34#define OMAP1_DMA_BASE (0xfffed800) 34#define OMAP1_DMA_BASE (0xfffed800)
35#define OMAP1_LOGICAL_DMA_CH_COUNT 17
36 35
37static u32 enable_1510_mode; 36static u32 enable_1510_mode;
38 37
@@ -348,8 +347,6 @@ static int __init omap1_system_dma_init(void)
348 goto exit_iounmap; 347 goto exit_iounmap;
349 } 348 }
350 349
351 d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
352
353 /* Valid attributes for omap1 plus processors */ 350 /* Valid attributes for omap1 plus processors */
354 if (cpu_is_omap15xx()) 351 if (cpu_is_omap15xx())
355 d->dev_caps = ENABLE_1510_MODE; 352 d->dev_caps = ENABLE_1510_MODE;
@@ -366,13 +363,14 @@ static int __init omap1_system_dma_init(void)
366 d->dev_caps |= CLEAR_CSR_ON_READ; 363 d->dev_caps |= CLEAR_CSR_ON_READ;
367 d->dev_caps |= IS_WORD_16; 364 d->dev_caps |= IS_WORD_16;
368 365
369 if (cpu_is_omap15xx()) 366 /* available logical channels */
370 d->chan_count = 9; 367 if (cpu_is_omap15xx()) {
371 else if (cpu_is_omap16xx() || cpu_is_omap7xx()) { 368 d->lch_count = 9;
372 if (!(d->dev_caps & ENABLE_1510_MODE)) 369 } else {
373 d->chan_count = 16; 370 if (d->dev_caps & ENABLE_1510_MODE)
371 d->lch_count = 9;
374 else 372 else
375 d->chan_count = 9; 373 d->lch_count = 16;
376 } 374 }
377 375
378 p = dma_plat_info; 376 p = dma_plat_info;
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 477910a48448..70c004794880 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -161,7 +161,7 @@ static struct ti_st_plat_data wilink7_pdata = {
161 .nshutdown_gpio = 162, 161 .nshutdown_gpio = 162,
162 .dev_name = "/dev/ttyO1", 162 .dev_name = "/dev/ttyO1",
163 .flow_cntrl = 1, 163 .flow_cntrl = 1,
164 .baud_rate = 300000, 164 .baud_rate = 3000000,
165}; 165};
166 166
167static struct platform_device wl128x_device = { 167static struct platform_device wl128x_device = {
diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
index 8538910db202..a970e7fcba9e 100644
--- a/arch/arm/mach-ux500/pm.c
+++ b/arch/arm/mach-ux500/pm.c
@@ -134,8 +134,8 @@ bool prcmu_pending_irq(void)
134 */ 134 */
135bool prcmu_is_cpu_in_wfi(int cpu) 135bool prcmu_is_cpu_in_wfi(int cpu)
136{ 136{
137 return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : 137 return readl(PRCM_ARM_WFI_STANDBY) &
138 PRCM_ARM_WFI_STANDBY_WFI0; 138 (cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0);
139} 139}
140 140
141/* 141/*
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index 238fbeacd330..5d28e1cdc998 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -137,6 +137,10 @@
137 }; 137 };
138}; 138};
139 139
140&scpi_clocks {
141 status = "disabled";
142};
143
140&uart_AO { 144&uart_AO {
141 status = "okay"; 145 status = "okay";
142 pinctrl-0 = <&uart_ao_a_pins>; 146 pinctrl-0 = <&uart_ao_a_pins>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index 596240c38a9c..b35307321b63 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -55,7 +55,7 @@
55 mboxes = <&mailbox 1 &mailbox 2>; 55 mboxes = <&mailbox 1 &mailbox 2>;
56 shmem = <&cpu_scp_lpri &cpu_scp_hpri>; 56 shmem = <&cpu_scp_lpri &cpu_scp_hpri>;
57 57
58 clocks { 58 scpi_clocks: clocks {
59 compatible = "arm,scpi-clocks"; 59 compatible = "arm,scpi-clocks";
60 60
61 scpi_dvfs: scpi_clocks@0 { 61 scpi_dvfs: scpi_clocks@0 {
diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
index 64226d5ae471..135890cd8a85 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
@@ -1367,7 +1367,7 @@
1367 }; 1367 };
1368 1368
1369 amba { 1369 amba {
1370 compatible = "arm,amba-bus"; 1370 compatible = "simple-bus";
1371 #address-cells = <1>; 1371 #address-cells = <1>;
1372 #size-cells = <1>; 1372 #size-cells = <1>;
1373 ranges; 1373 ranges;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
index 358089687a69..ef1b9e573af0 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
@@ -27,7 +27,7 @@
27 stdout-path = "serial0:115200n8"; 27 stdout-path = "serial0:115200n8";
28 }; 28 };
29 29
30 memory { 30 memory@0 {
31 device_type = "memory"; 31 device_type = "memory";
32 reg = <0x0 0x0 0x0 0x40000000>; 32 reg = <0x0 0x0 0x0 0x40000000>;
33 }; 33 };
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 68a908334c7b..54dc28351c8c 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -72,7 +72,7 @@
72 <1 10 0xf08>; 72 <1 10 0xf08>;
73 }; 73 };
74 74
75 amba_apu { 75 amba_apu: amba_apu@0 {
76 compatible = "simple-bus"; 76 compatible = "simple-bus";
77 #address-cells = <2>; 77 #address-cells = <2>;
78 #size-cells = <1>; 78 #size-cells = <1>;
@@ -175,7 +175,7 @@
175 }; 175 };
176 176
177 i2c0: i2c@ff020000 { 177 i2c0: i2c@ff020000 {
178 compatible = "cdns,i2c-r1p10"; 178 compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
179 status = "disabled"; 179 status = "disabled";
180 interrupt-parent = <&gic>; 180 interrupt-parent = <&gic>;
181 interrupts = <0 17 4>; 181 interrupts = <0 17 4>;
@@ -185,7 +185,7 @@
185 }; 185 };
186 186
187 i2c1: i2c@ff030000 { 187 i2c1: i2c@ff030000 {
188 compatible = "cdns,i2c-r1p10"; 188 compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
189 status = "disabled"; 189 status = "disabled";
190 interrupt-parent = <&gic>; 190 interrupt-parent = <&gic>;
191 interrupts = <0 18 4>; 191 interrupts = <0 18 4>;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e5050388e062..4a758cba1262 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -70,9 +70,6 @@ struct kvm_arch {
70 70
71 /* Interrupt controller */ 71 /* Interrupt controller */
72 struct vgic_dist vgic; 72 struct vgic_dist vgic;
73
74 /* Timer */
75 struct arch_timer_kvm timer;
76}; 73};
77 74
78#define KVM_NR_MEM_OBJS 40 75#define KVM_NR_MEM_OBJS 40
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 6f72fe8b0e3e..aa1e6db15a2d 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -236,13 +236,11 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
236 236
237static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, 237static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
238 kvm_pfn_t pfn, 238 kvm_pfn_t pfn,
239 unsigned long size, 239 unsigned long size)
240 bool ipa_uncached)
241{ 240{
242 void *va = page_address(pfn_to_page(pfn)); 241 void *va = page_address(pfn_to_page(pfn));
243 242
244 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) 243 kvm_flush_dcache_to_poc(va, size);
245 kvm_flush_dcache_to_poc(va, size);
246 244
247 if (!icache_is_aliasing()) { /* PIPT */ 245 if (!icache_is_aliasing()) { /* PIPT */
248 flush_icache_range((unsigned long)va, 246 flush_icache_range((unsigned long)va,
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index bfe632808d77..90c39a662379 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -222,7 +222,7 @@ static inline void *phys_to_virt(phys_addr_t x)
222#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 222#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
223#else 223#else
224#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) 224#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
225#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) 225#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
226 226
227#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) 227#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
228#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) 228#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index fea10736b11f..439f6b5d31f6 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -47,6 +47,7 @@
47#include <asm/ptrace.h> 47#include <asm/ptrace.h>
48#include <asm/sections.h> 48#include <asm/sections.h>
49#include <asm/sysreg.h> 49#include <asm/sysreg.h>
50#include <asm/cpufeature.h>
50 51
51/* 52/*
52 * __boot_cpu_mode records what mode CPUs were booted in. 53 * __boot_cpu_mode records what mode CPUs were booted in.
@@ -80,6 +81,14 @@ static inline bool is_kernel_in_hyp_mode(void)
80 return read_sysreg(CurrentEL) == CurrentEL_EL2; 81 return read_sysreg(CurrentEL) == CurrentEL_EL2;
81} 82}
82 83
84static inline bool has_vhe(void)
85{
86 if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
87 return true;
88
89 return false;
90}
91
83#ifdef CONFIG_ARM64_VHE 92#ifdef CONFIG_ARM64_VHE
84extern void verify_cpu_run_el(void); 93extern void verify_cpu_run_el(void);
85#else 94#else
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 3051f86a9b5f..c2860358ae3e 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -201,10 +201,23 @@ struct kvm_arch_memory_slot {
201#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2 201#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
202#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32 202#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
203#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) 203#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
204#define KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT 32
205#define KVM_DEV_ARM_VGIC_V3_MPIDR_MASK \
206 (0xffffffffULL << KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT)
204#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 207#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
205#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) 208#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
209#define KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK (0xffff)
206#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3 210#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
207#define KVM_DEV_ARM_VGIC_GRP_CTRL 4 211#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
212#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
213#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
214#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
215#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
216#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
217 (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
218#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
219#define VGIC_LEVEL_INFO_LINE_LEVEL 0
220
208#define KVM_DEV_ARM_VGIC_CTRL_INIT 0 221#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
209 222
210/* Device Control API on vcpu fd */ 223/* Device Control API on vcpu fd */
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index b5c3933ed441..d1ff83dfe5de 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -77,6 +77,7 @@ struct user_fpsimd_state {
77 __uint128_t vregs[32]; 77 __uint128_t vregs[32];
78 __u32 fpsr; 78 __u32 fpsr;
79 __u32 fpcr; 79 __u32 fpcr;
80 __u32 __reserved[2];
80}; 81};
81 82
82struct user_hwdebug_state { 83struct user_hwdebug_state {
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 923841ffe4a9..43512d4d7df2 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -683,7 +683,7 @@ el0_inv:
683 mov x0, sp 683 mov x0, sp
684 mov x1, #BAD_SYNC 684 mov x1, #BAD_SYNC
685 mov x2, x25 685 mov x2, x25
686 bl bad_mode 686 bl bad_el0_sync
687 b ret_to_user 687 b ret_to_user
688ENDPROC(el0_sync) 688ENDPROC(el0_sync)
689 689
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index fc35e06ccaac..a22161ccf447 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -551,6 +551,8 @@ static int hw_break_set(struct task_struct *target,
551 /* (address, ctrl) registers */ 551 /* (address, ctrl) registers */
552 limit = regset->n * regset->size; 552 limit = regset->n * regset->size;
553 while (count && offset < limit) { 553 while (count && offset < limit) {
554 if (count < PTRACE_HBP_ADDR_SZ)
555 return -EINVAL;
554 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 556 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
555 offset, offset + PTRACE_HBP_ADDR_SZ); 557 offset, offset + PTRACE_HBP_ADDR_SZ);
556 if (ret) 558 if (ret)
@@ -560,6 +562,8 @@ static int hw_break_set(struct task_struct *target,
560 return ret; 562 return ret;
561 offset += PTRACE_HBP_ADDR_SZ; 563 offset += PTRACE_HBP_ADDR_SZ;
562 564
565 if (!count)
566 break;
563 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 567 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
564 offset, offset + PTRACE_HBP_CTRL_SZ); 568 offset, offset + PTRACE_HBP_CTRL_SZ);
565 if (ret) 569 if (ret)
@@ -596,7 +600,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
596 const void *kbuf, const void __user *ubuf) 600 const void *kbuf, const void __user *ubuf)
597{ 601{
598 int ret; 602 int ret;
599 struct user_pt_regs newregs; 603 struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
600 604
601 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 605 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
602 if (ret) 606 if (ret)
@@ -626,7 +630,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
626 const void *kbuf, const void __user *ubuf) 630 const void *kbuf, const void __user *ubuf)
627{ 631{
628 int ret; 632 int ret;
629 struct user_fpsimd_state newstate; 633 struct user_fpsimd_state newstate =
634 target->thread.fpsimd_state.user_fpsimd;
630 635
631 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); 636 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
632 if (ret) 637 if (ret)
@@ -650,7 +655,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
650 const void *kbuf, const void __user *ubuf) 655 const void *kbuf, const void __user *ubuf)
651{ 656{
652 int ret; 657 int ret;
653 unsigned long tls; 658 unsigned long tls = target->thread.tp_value;
654 659
655 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 660 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
656 if (ret) 661 if (ret)
@@ -676,7 +681,8 @@ static int system_call_set(struct task_struct *target,
676 unsigned int pos, unsigned int count, 681 unsigned int pos, unsigned int count,
677 const void *kbuf, const void __user *ubuf) 682 const void *kbuf, const void __user *ubuf)
678{ 683{
679 int syscallno, ret; 684 int syscallno = task_pt_regs(target)->syscallno;
685 int ret;
680 686
681 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); 687 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
682 if (ret) 688 if (ret)
@@ -948,7 +954,7 @@ static int compat_tls_set(struct task_struct *target,
948 const void __user *ubuf) 954 const void __user *ubuf)
949{ 955{
950 int ret; 956 int ret;
951 compat_ulong_t tls; 957 compat_ulong_t tls = target->thread.tp_value;
952 958
953 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 959 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
954 if (ret) 960 if (ret)
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 5b830be79c01..659b2e6b6cf7 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -604,17 +604,34 @@ const char *esr_get_class_string(u32 esr)
604} 604}
605 605
606/* 606/*
607 * bad_mode handles the impossible case in the exception vector. 607 * bad_mode handles the impossible case in the exception vector. This is always
608 * fatal.
608 */ 609 */
609asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) 610asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
610{ 611{
611 siginfo_t info;
612 void __user *pc = (void __user *)instruction_pointer(regs);
613 console_verbose(); 612 console_verbose();
614 613
615 pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", 614 pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
616 handler[reason], smp_processor_id(), esr, 615 handler[reason], smp_processor_id(), esr,
617 esr_get_class_string(esr)); 616 esr_get_class_string(esr));
617
618 die("Oops - bad mode", regs, 0);
619 local_irq_disable();
620 panic("bad mode");
621}
622
623/*
624 * bad_el0_sync handles unexpected, but potentially recoverable synchronous
625 * exceptions taken from EL0. Unlike bad_mode, this returns.
626 */
627asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
628{
629 siginfo_t info;
630 void __user *pc = (void __user *)instruction_pointer(regs);
631 console_verbose();
632
633 pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
634 smp_processor_id(), esr, esr_get_class_string(esr));
618 __show_regs(regs); 635 __show_regs(regs);
619 636
620 info.si_signo = SIGILL; 637 info.si_signo = SIGILL;
@@ -622,7 +639,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
622 info.si_code = ILL_ILLOPC; 639 info.si_code = ILL_ILLOPC;
623 info.si_addr = pc; 640 info.si_addr = pc;
624 641
625 arm64_notify_die("Oops - bad mode", regs, &info, 0); 642 current->thread.fault_address = 0;
643 current->thread.fault_code = 0;
644
645 force_sig_info(info.si_signo, &info, current);
626} 646}
627 647
628void __pte_error(const char *file, int line, unsigned long val) 648void __pte_error(const char *file, int line, unsigned long val)
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index d50a82a16ff6..afd51bebb9c5 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -2,7 +2,7 @@
2# Makefile for Kernel-based Virtual Machine module 2# Makefile for Kernel-based Virtual Machine module
3# 3#
4 4
5ccflags-y += -Iarch/arm64/kvm 5ccflags-y += -Iarch/arm64/kvm -Ivirt/kvm/arm/vgic
6CFLAGS_arm.o := -I. 6CFLAGS_arm.o := -I.
7CFLAGS_mmu.o := -I. 7CFLAGS_mmu.o := -I.
8 8
@@ -19,6 +19,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
19kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o 19kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o
20kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o 20kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
21kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o 21kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o
22kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o
22kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o 23kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o
23 24
24kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o 25kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o
@@ -31,6 +32,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v2.o
31kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v3.o 32kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v3.o
32kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-kvm-device.o 33kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-kvm-device.o
33kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-its.o 34kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-its.o
35kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-debug.o
34kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/irqchip.o 36kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/irqchip.o
35kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o 37kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
36kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o 38kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index e95d4f68bf54..d9e9697de1b2 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -46,6 +46,11 @@ static const struct kvm_regs default_regs_reset32 = {
46 COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT), 46 COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT),
47}; 47};
48 48
49static const struct kvm_irq_level default_ptimer_irq = {
50 .irq = 30,
51 .level = 1,
52};
53
49static const struct kvm_irq_level default_vtimer_irq = { 54static const struct kvm_irq_level default_vtimer_irq = {
50 .irq = 27, 55 .irq = 27,
51 .level = 1, 56 .level = 1,
@@ -104,6 +109,7 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
104int kvm_reset_vcpu(struct kvm_vcpu *vcpu) 109int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
105{ 110{
106 const struct kvm_irq_level *cpu_vtimer_irq; 111 const struct kvm_irq_level *cpu_vtimer_irq;
112 const struct kvm_irq_level *cpu_ptimer_irq;
107 const struct kvm_regs *cpu_reset; 113 const struct kvm_regs *cpu_reset;
108 114
109 switch (vcpu->arch.target) { 115 switch (vcpu->arch.target) {
@@ -117,6 +123,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
117 } 123 }
118 124
119 cpu_vtimer_irq = &default_vtimer_irq; 125 cpu_vtimer_irq = &default_vtimer_irq;
126 cpu_ptimer_irq = &default_ptimer_irq;
120 break; 127 break;
121 } 128 }
122 129
@@ -130,5 +137,5 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
130 kvm_pmu_vcpu_reset(vcpu); 137 kvm_pmu_vcpu_reset(vcpu);
131 138
132 /* Reset timer */ 139 /* Reset timer */
133 return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); 140 return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq, cpu_ptimer_irq);
134} 141}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 87e7e6608cd8..0e26f8c2b56f 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -820,6 +820,61 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
820 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ 820 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
821 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } 821 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
822 822
823static bool access_cntp_tval(struct kvm_vcpu *vcpu,
824 struct sys_reg_params *p,
825 const struct sys_reg_desc *r)
826{
827 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
828 u64 now = kvm_phys_timer_read();
829
830 if (p->is_write)
831 ptimer->cnt_cval = p->regval + now;
832 else
833 p->regval = ptimer->cnt_cval - now;
834
835 return true;
836}
837
838static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
839 struct sys_reg_params *p,
840 const struct sys_reg_desc *r)
841{
842 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
843
844 if (p->is_write) {
845 /* ISTATUS bit is read-only */
846 ptimer->cnt_ctl = p->regval & ~ARCH_TIMER_CTRL_IT_STAT;
847 } else {
848 u64 now = kvm_phys_timer_read();
849
850 p->regval = ptimer->cnt_ctl;
851 /*
852 * Set ISTATUS bit if it's expired.
853 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
854 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
855 * regardless of ENABLE bit for our implementation convenience.
856 */
857 if (ptimer->cnt_cval <= now)
858 p->regval |= ARCH_TIMER_CTRL_IT_STAT;
859 }
860
861 return true;
862}
863
864static bool access_cntp_cval(struct kvm_vcpu *vcpu,
865 struct sys_reg_params *p,
866 const struct sys_reg_desc *r)
867{
868 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
869
870 if (p->is_write)
871 ptimer->cnt_cval = p->regval;
872 else
873 p->regval = ptimer->cnt_cval;
874
875 return true;
876}
877
823/* 878/*
824 * Architected system registers. 879 * Architected system registers.
825 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 880 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
@@ -1029,6 +1084,16 @@ static const struct sys_reg_desc sys_reg_descs[] = {
1029 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), 1084 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
1030 NULL, reset_unknown, TPIDRRO_EL0 }, 1085 NULL, reset_unknown, TPIDRRO_EL0 },
1031 1086
1087 /* CNTP_TVAL_EL0 */
1088 { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b0010), Op2(0b000),
1089 access_cntp_tval },
1090 /* CNTP_CTL_EL0 */
1091 { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b0010), Op2(0b001),
1092 access_cntp_ctl },
1093 /* CNTP_CVAL_EL0 */
1094 { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b0010), Op2(0b010),
1095 access_cntp_cval },
1096
1032 /* PMEVCNTRn_EL0 */ 1097 /* PMEVCNTRn_EL0 */
1033 PMU_PMEVCNTR_EL0(0), 1098 PMU_PMEVCNTR_EL0(0),
1034 PMU_PMEVCNTR_EL0(1), 1099 PMU_PMEVCNTR_EL0(1),
@@ -1795,6 +1860,17 @@ static bool index_to_params(u64 id, struct sys_reg_params *params)
1795 } 1860 }
1796} 1861}
1797 1862
1863const struct sys_reg_desc *find_reg_by_id(u64 id,
1864 struct sys_reg_params *params,
1865 const struct sys_reg_desc table[],
1866 unsigned int num)
1867{
1868 if (!index_to_params(id, params))
1869 return NULL;
1870
1871 return find_reg(params, table, num);
1872}
1873
1798/* Decode an index value, and find the sys_reg_desc entry. */ 1874/* Decode an index value, and find the sys_reg_desc entry. */
1799static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, 1875static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
1800 u64 id) 1876 u64 id)
@@ -1807,11 +1883,8 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
1807 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) 1883 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
1808 return NULL; 1884 return NULL;
1809 1885
1810 if (!index_to_params(id, &params))
1811 return NULL;
1812
1813 table = get_target_table(vcpu->arch.target, true, &num); 1886 table = get_target_table(vcpu->arch.target, true, &num);
1814 r = find_reg(&params, table, num); 1887 r = find_reg_by_id(id, &params, table, num);
1815 if (!r) 1888 if (!r)
1816 r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 1889 r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1817 1890
@@ -1918,10 +1991,8 @@ static int get_invariant_sys_reg(u64 id, void __user *uaddr)
1918 struct sys_reg_params params; 1991 struct sys_reg_params params;
1919 const struct sys_reg_desc *r; 1992 const struct sys_reg_desc *r;
1920 1993
1921 if (!index_to_params(id, &params)) 1994 r = find_reg_by_id(id, &params, invariant_sys_regs,
1922 return -ENOENT; 1995 ARRAY_SIZE(invariant_sys_regs));
1923
1924 r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
1925 if (!r) 1996 if (!r)
1926 return -ENOENT; 1997 return -ENOENT;
1927 1998
@@ -1935,9 +2006,8 @@ static int set_invariant_sys_reg(u64 id, void __user *uaddr)
1935 int err; 2006 int err;
1936 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ 2007 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
1937 2008
1938 if (!index_to_params(id, &params)) 2009 r = find_reg_by_id(id, &params, invariant_sys_regs,
1939 return -ENOENT; 2010 ARRAY_SIZE(invariant_sys_regs));
1940 r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
1941 if (!r) 2011 if (!r)
1942 return -ENOENT; 2012 return -ENOENT;
1943 2013
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index dbbb01cfbee9..9c6ffd0f0196 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -136,6 +136,10 @@ static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
136 return i1->Op2 - i2->Op2; 136 return i1->Op2 - i2->Op2;
137} 137}
138 138
139const struct sys_reg_desc *find_reg_by_id(u64 id,
140 struct sys_reg_params *params,
141 const struct sys_reg_desc table[],
142 unsigned int num);
139 143
140#define Op0(_x) .Op0 = _x 144#define Op0(_x) .Op0 = _x
141#define Op1(_x) .Op1 = _x 145#define Op1(_x) .Op1 = _x
diff --git a/arch/arm64/kvm/vgic-sys-reg-v3.c b/arch/arm64/kvm/vgic-sys-reg-v3.c
new file mode 100644
index 000000000000..79f37e37d367
--- /dev/null
+++ b/arch/arm64/kvm/vgic-sys-reg-v3.c
@@ -0,0 +1,346 @@
1/*
2 * VGIC system registers handling functions for AArch64 mode
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/irqchip/arm-gic-v3.h>
15#include <linux/kvm.h>
16#include <linux/kvm_host.h>
17#include <asm/kvm_emulate.h>
18#include "vgic.h"
19#include "sys_regs.h"
20
21static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
22 const struct sys_reg_desc *r)
23{
24 u32 host_pri_bits, host_id_bits, host_seis, host_a3v, seis, a3v;
25 struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
26 struct vgic_vmcr vmcr;
27 u64 val;
28
29 vgic_get_vmcr(vcpu, &vmcr);
30 if (p->is_write) {
31 val = p->regval;
32
33 /*
34 * Disallow restoring VM state if not supported by this
35 * hardware.
36 */
37 host_pri_bits = ((val & ICC_CTLR_EL1_PRI_BITS_MASK) >>
38 ICC_CTLR_EL1_PRI_BITS_SHIFT) + 1;
39 if (host_pri_bits > vgic_v3_cpu->num_pri_bits)
40 return false;
41
42 vgic_v3_cpu->num_pri_bits = host_pri_bits;
43
44 host_id_bits = (val & ICC_CTLR_EL1_ID_BITS_MASK) >>
45 ICC_CTLR_EL1_ID_BITS_SHIFT;
46 if (host_id_bits > vgic_v3_cpu->num_id_bits)
47 return false;
48
49 vgic_v3_cpu->num_id_bits = host_id_bits;
50
51 host_seis = ((kvm_vgic_global_state.ich_vtr_el2 &
52 ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT);
53 seis = (val & ICC_CTLR_EL1_SEIS_MASK) >>
54 ICC_CTLR_EL1_SEIS_SHIFT;
55 if (host_seis != seis)
56 return false;
57
58 host_a3v = ((kvm_vgic_global_state.ich_vtr_el2 &
59 ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT);
60 a3v = (val & ICC_CTLR_EL1_A3V_MASK) >> ICC_CTLR_EL1_A3V_SHIFT;
61 if (host_a3v != a3v)
62 return false;
63
64 /*
65 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
66 * The vgic_set_vmcr() will convert to ICH_VMCR layout.
67 */
68 vmcr.ctlr = val & ICC_CTLR_EL1_CBPR_MASK;
69 vmcr.ctlr |= val & ICC_CTLR_EL1_EOImode_MASK;
70 vgic_set_vmcr(vcpu, &vmcr);
71 } else {
72 val = 0;
73 val |= (vgic_v3_cpu->num_pri_bits - 1) <<
74 ICC_CTLR_EL1_PRI_BITS_SHIFT;
75 val |= vgic_v3_cpu->num_id_bits << ICC_CTLR_EL1_ID_BITS_SHIFT;
76 val |= ((kvm_vgic_global_state.ich_vtr_el2 &
77 ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT) <<
78 ICC_CTLR_EL1_SEIS_SHIFT;
79 val |= ((kvm_vgic_global_state.ich_vtr_el2 &
80 ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT) <<
81 ICC_CTLR_EL1_A3V_SHIFT;
82 /*
83 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
84 * Extract it directly using ICC_CTLR_EL1 reg definitions.
85 */
86 val |= vmcr.ctlr & ICC_CTLR_EL1_CBPR_MASK;
87 val |= vmcr.ctlr & ICC_CTLR_EL1_EOImode_MASK;
88
89 p->regval = val;
90 }
91
92 return true;
93}
94
95static bool access_gic_pmr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
96 const struct sys_reg_desc *r)
97{
98 struct vgic_vmcr vmcr;
99
100 vgic_get_vmcr(vcpu, &vmcr);
101 if (p->is_write) {
102 vmcr.pmr = (p->regval & ICC_PMR_EL1_MASK) >> ICC_PMR_EL1_SHIFT;
103 vgic_set_vmcr(vcpu, &vmcr);
104 } else {
105 p->regval = (vmcr.pmr << ICC_PMR_EL1_SHIFT) & ICC_PMR_EL1_MASK;
106 }
107
108 return true;
109}
110
111static bool access_gic_bpr0(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
112 const struct sys_reg_desc *r)
113{
114 struct vgic_vmcr vmcr;
115
116 vgic_get_vmcr(vcpu, &vmcr);
117 if (p->is_write) {
118 vmcr.bpr = (p->regval & ICC_BPR0_EL1_MASK) >>
119 ICC_BPR0_EL1_SHIFT;
120 vgic_set_vmcr(vcpu, &vmcr);
121 } else {
122 p->regval = (vmcr.bpr << ICC_BPR0_EL1_SHIFT) &
123 ICC_BPR0_EL1_MASK;
124 }
125
126 return true;
127}
128
129static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
130 const struct sys_reg_desc *r)
131{
132 struct vgic_vmcr vmcr;
133
134 if (!p->is_write)
135 p->regval = 0;
136
137 vgic_get_vmcr(vcpu, &vmcr);
138 if (!((vmcr.ctlr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT)) {
139 if (p->is_write) {
140 vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
141 ICC_BPR1_EL1_SHIFT;
142 vgic_set_vmcr(vcpu, &vmcr);
143 } else {
144 p->regval = (vmcr.abpr << ICC_BPR1_EL1_SHIFT) &
145 ICC_BPR1_EL1_MASK;
146 }
147 } else {
148 if (!p->is_write)
149 p->regval = min((vmcr.bpr + 1), 7U);
150 }
151
152 return true;
153}
154
155static bool access_gic_grpen0(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
156 const struct sys_reg_desc *r)
157{
158 struct vgic_vmcr vmcr;
159
160 vgic_get_vmcr(vcpu, &vmcr);
161 if (p->is_write) {
162 vmcr.grpen0 = (p->regval & ICC_IGRPEN0_EL1_MASK) >>
163 ICC_IGRPEN0_EL1_SHIFT;
164 vgic_set_vmcr(vcpu, &vmcr);
165 } else {
166 p->regval = (vmcr.grpen0 << ICC_IGRPEN0_EL1_SHIFT) &
167 ICC_IGRPEN0_EL1_MASK;
168 }
169
170 return true;
171}
172
173static bool access_gic_grpen1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
174 const struct sys_reg_desc *r)
175{
176 struct vgic_vmcr vmcr;
177
178 vgic_get_vmcr(vcpu, &vmcr);
179 if (p->is_write) {
180 vmcr.grpen1 = (p->regval & ICC_IGRPEN1_EL1_MASK) >>
181 ICC_IGRPEN1_EL1_SHIFT;
182 vgic_set_vmcr(vcpu, &vmcr);
183 } else {
184 p->regval = (vmcr.grpen1 << ICC_IGRPEN1_EL1_SHIFT) &
185 ICC_IGRPEN1_EL1_MASK;
186 }
187
188 return true;
189}
190
191static void vgic_v3_access_apr_reg(struct kvm_vcpu *vcpu,
192 struct sys_reg_params *p, u8 apr, u8 idx)
193{
194 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
195 uint32_t *ap_reg;
196
197 if (apr)
198 ap_reg = &vgicv3->vgic_ap1r[idx];
199 else
200 ap_reg = &vgicv3->vgic_ap0r[idx];
201
202 if (p->is_write)
203 *ap_reg = p->regval;
204 else
205 p->regval = *ap_reg;
206}
207
208static bool access_gic_aprn(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
209 const struct sys_reg_desc *r, u8 apr)
210{
211 struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
212 u8 idx = r->Op2 & 3;
213
214 /*
215 * num_pri_bits are initialized with HW supported values.
216 * We can rely safely on num_pri_bits even if VM has not
217 * restored ICC_CTLR_EL1 before restoring APnR registers.
218 */
219 switch (vgic_v3_cpu->num_pri_bits) {
220 case 7:
221 vgic_v3_access_apr_reg(vcpu, p, apr, idx);
222 break;
223 case 6:
224 if (idx > 1)
225 goto err;
226 vgic_v3_access_apr_reg(vcpu, p, apr, idx);
227 break;
228 default:
229 if (idx > 0)
230 goto err;
231 vgic_v3_access_apr_reg(vcpu, p, apr, idx);
232 }
233
234 return true;
235err:
236 if (!p->is_write)
237 p->regval = 0;
238
239 return false;
240}
241
242static bool access_gic_ap0r(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
243 const struct sys_reg_desc *r)
244
245{
246 return access_gic_aprn(vcpu, p, r, 0);
247}
248
249static bool access_gic_ap1r(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
250 const struct sys_reg_desc *r)
251{
252 return access_gic_aprn(vcpu, p, r, 1);
253}
254
255static bool access_gic_sre(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
256 const struct sys_reg_desc *r)
257{
258 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
259
260 /* Validate SRE bit */
261 if (p->is_write) {
262 if (!(p->regval & ICC_SRE_EL1_SRE))
263 return false;
264 } else {
265 p->regval = vgicv3->vgic_sre;
266 }
267
268 return true;
269}
270static const struct sys_reg_desc gic_v3_icc_reg_descs[] = {
271 /* ICC_PMR_EL1 */
272 { Op0(3), Op1(0), CRn(4), CRm(6), Op2(0), access_gic_pmr },
273 /* ICC_BPR0_EL1 */
274 { Op0(3), Op1(0), CRn(12), CRm(8), Op2(3), access_gic_bpr0 },
275 /* ICC_AP0R0_EL1 */
276 { Op0(3), Op1(0), CRn(12), CRm(8), Op2(4), access_gic_ap0r },
277 /* ICC_AP0R1_EL1 */
278 { Op0(3), Op1(0), CRn(12), CRm(8), Op2(5), access_gic_ap0r },
279 /* ICC_AP0R2_EL1 */
280 { Op0(3), Op1(0), CRn(12), CRm(8), Op2(6), access_gic_ap0r },
281 /* ICC_AP0R3_EL1 */
282 { Op0(3), Op1(0), CRn(12), CRm(8), Op2(7), access_gic_ap0r },
283 /* ICC_AP1R0_EL1 */
284 { Op0(3), Op1(0), CRn(12), CRm(9), Op2(0), access_gic_ap1r },
285 /* ICC_AP1R1_EL1 */
286 { Op0(3), Op1(0), CRn(12), CRm(9), Op2(1), access_gic_ap1r },
287 /* ICC_AP1R2_EL1 */
288 { Op0(3), Op1(0), CRn(12), CRm(9), Op2(2), access_gic_ap1r },
289 /* ICC_AP1R3_EL1 */
290 { Op0(3), Op1(0), CRn(12), CRm(9), Op2(3), access_gic_ap1r },
291 /* ICC_BPR1_EL1 */
292 { Op0(3), Op1(0), CRn(12), CRm(12), Op2(3), access_gic_bpr1 },
293 /* ICC_CTLR_EL1 */
294 { Op0(3), Op1(0), CRn(12), CRm(12), Op2(4), access_gic_ctlr },
295 /* ICC_SRE_EL1 */
296 { Op0(3), Op1(0), CRn(12), CRm(12), Op2(5), access_gic_sre },
297 /* ICC_IGRPEN0_EL1 */
298 { Op0(3), Op1(0), CRn(12), CRm(12), Op2(6), access_gic_grpen0 },
299 /* ICC_GRPEN1_EL1 */
300 { Op0(3), Op1(0), CRn(12), CRm(12), Op2(7), access_gic_grpen1 },
301};
302
303int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
304 u64 *reg)
305{
306 struct sys_reg_params params;
307 u64 sysreg = (id & KVM_DEV_ARM_VGIC_SYSREG_MASK) | KVM_REG_SIZE_U64;
308
309 params.regval = *reg;
310 params.is_write = is_write;
311 params.is_aarch32 = false;
312 params.is_32bit = false;
313
314 if (find_reg_by_id(sysreg, &params, gic_v3_icc_reg_descs,
315 ARRAY_SIZE(gic_v3_icc_reg_descs)))
316 return 0;
317
318 return -ENXIO;
319}
320
321int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write, u64 id,
322 u64 *reg)
323{
324 struct sys_reg_params params;
325 const struct sys_reg_desc *r;
326 u64 sysreg = (id & KVM_DEV_ARM_VGIC_SYSREG_MASK) | KVM_REG_SIZE_U64;
327
328 if (is_write)
329 params.regval = *reg;
330 params.is_write = is_write;
331 params.is_aarch32 = false;
332 params.is_32bit = false;
333
334 r = find_reg_by_id(sysreg, &params, gic_v3_icc_reg_descs,
335 ARRAY_SIZE(gic_v3_icc_reg_descs));
336 if (!r)
337 return -ENXIO;
338
339 if (!r->access(vcpu, &params, r))
340 return -EINVAL;
341
342 if (!is_write)
343 *reg = params.regval;
344
345 return 0;
346}
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 716d1226ba69..380ebe705093 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -404,6 +404,8 @@ void __init mem_init(void)
404 if (swiotlb_force == SWIOTLB_FORCE || 404 if (swiotlb_force == SWIOTLB_FORCE ||
405 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) 405 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
406 swiotlb_init(1); 406 swiotlb_init(1);
407 else
408 swiotlb_force = SWIOTLB_NO_FORCE;
407 409
408 set_max_mapnr(pfn_to_page(max_pfn) - mem_map); 410 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
409 411
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 1c64bc6330bc..0c4e470571ca 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -36,12 +36,13 @@
36#ifdef CONFIG_HUGETLB_PAGE 36#ifdef CONFIG_HUGETLB_PAGE
37static inline int hash__hugepd_ok(hugepd_t hpd) 37static inline int hash__hugepd_ok(hugepd_t hpd)
38{ 38{
39 unsigned long hpdval = hpd_val(hpd);
39 /* 40 /*
40 * if it is not a pte and have hugepd shift mask 41 * if it is not a pte and have hugepd shift mask
41 * set, then it is a hugepd directory pointer 42 * set, then it is a hugepd directory pointer
42 */ 43 */
43 if (!(hpd.pd & _PAGE_PTE) && 44 if (!(hpdval & _PAGE_PTE) &&
44 ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)) 45 ((hpdval & HUGEPD_SHIFT_MASK) != 0))
45 return true; 46 return true;
46 return false; 47 return false;
47} 48}
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index f61cad3de4e6..4c935f7504f7 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -201,6 +201,10 @@ extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
201 unsigned long phys); 201 unsigned long phys);
202extern void hash__vmemmap_remove_mapping(unsigned long start, 202extern void hash__vmemmap_remove_mapping(unsigned long start,
203 unsigned long page_size); 203 unsigned long page_size);
204
205int hash__create_section_mapping(unsigned long start, unsigned long end);
206int hash__remove_section_mapping(unsigned long start, unsigned long end);
207
204#endif /* !__ASSEMBLY__ */ 208#endif /* !__ASSEMBLY__ */
205#endif /* __KERNEL__ */ 209#endif /* __KERNEL__ */
206#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */ 210#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index ede215167d1a..7f4025a6c69e 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -21,12 +21,12 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
21 * We have only four bits to encode, MMU page size 21 * We have only four bits to encode, MMU page size
22 */ 22 */
23 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf); 23 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
24 return __va(hpd.pd & HUGEPD_ADDR_MASK); 24 return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
25} 25}
26 26
27static inline unsigned int hugepd_mmu_psize(hugepd_t hpd) 27static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
28{ 28{
29 return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2; 29 return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
30} 30}
31 31
32static inline unsigned int hugepd_shift(hugepd_t hpd) 32static inline unsigned int hugepd_shift(hugepd_t hpd)
@@ -52,18 +52,20 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
52{ 52{
53 BUG_ON(!hugepd_ok(hpd)); 53 BUG_ON(!hugepd_ok(hpd));
54#ifdef CONFIG_PPC_8xx 54#ifdef CONFIG_PPC_8xx
55 return (pte_t *)__va(hpd.pd & ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK)); 55 return (pte_t *)__va(hpd_val(hpd) &
56 ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
56#else 57#else
57 return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE); 58 return (pte_t *)((hpd_val(hpd) &
59 ~HUGEPD_SHIFT_MASK) | PD_HUGE);
58#endif 60#endif
59} 61}
60 62
61static inline unsigned int hugepd_shift(hugepd_t hpd) 63static inline unsigned int hugepd_shift(hugepd_t hpd)
62{ 64{
63#ifdef CONFIG_PPC_8xx 65#ifdef CONFIG_PPC_8xx
64 return ((hpd.pd & _PMD_PAGE_MASK) >> 1) + 17; 66 return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
65#else 67#else
66 return hpd.pd & HUGEPD_SHIFT_MASK; 68 return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
67#endif 69#endif
68} 70}
69 71
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 172849727054..0cd8a3852763 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -227,9 +227,10 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
227static inline int hugepd_ok(hugepd_t hpd) 227static inline int hugepd_ok(hugepd_t hpd)
228{ 228{
229#ifdef CONFIG_PPC_8xx 229#ifdef CONFIG_PPC_8xx
230 return ((hpd.pd & 0x4) != 0); 230 return ((hpd_val(hpd) & 0x4) != 0);
231#else 231#else
232 return (hpd.pd > 0); 232 /* We clear the top bit to indicate hugepd */
233 return ((hpd_val(hpd) & PD_HUGE) == 0);
233#endif 234#endif
234} 235}
235 236
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 56398e7e6100..47120bf2670c 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -294,15 +294,12 @@ extern long long virt_phys_offset;
294#include <asm/pgtable-types.h> 294#include <asm/pgtable-types.h>
295#endif 295#endif
296 296
297typedef struct { signed long pd; } hugepd_t;
298 297
299#ifndef CONFIG_HUGETLB_PAGE 298#ifndef CONFIG_HUGETLB_PAGE
300#define is_hugepd(pdep) (0) 299#define is_hugepd(pdep) (0)
301#define pgd_huge(pgd) (0) 300#define pgd_huge(pgd) (0)
302#endif /* CONFIG_HUGETLB_PAGE */ 301#endif /* CONFIG_HUGETLB_PAGE */
303 302
304#define __hugepd(x) ((hugepd_t) { (x) })
305
306struct page; 303struct page;
307extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); 304extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
308extern void copy_user_page(void *to, void *from, unsigned long vaddr, 305extern void copy_user_page(void *to, void *from, unsigned long vaddr,
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index e157489ee7a1..ae0a23091a9b 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -65,6 +65,7 @@ struct power_pmu {
65#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */ 65#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
66#define PPMU_HAS_SIER 0x00000040 /* Has SIER */ 66#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
67#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */ 67#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */
68#define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */
68 69
69/* 70/*
70 * Values for flags to get_alternatives() 71 * Values for flags to get_alternatives()
diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
index 49c0a5a80efa..9c0f5db5cf46 100644
--- a/arch/powerpc/include/asm/pgtable-be-types.h
+++ b/arch/powerpc/include/asm/pgtable-be-types.h
@@ -104,4 +104,12 @@ static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new)
104 return pmd_raw(old) == prev; 104 return pmd_raw(old) == prev;
105} 105}
106 106
107typedef struct { __be64 pdbe; } hugepd_t;
108#define __hugepd(x) ((hugepd_t) { cpu_to_be64(x) })
109
110static inline unsigned long hpd_val(hugepd_t x)
111{
112 return be64_to_cpu(x.pdbe);
113}
114
107#endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */ 115#endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */
diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
index e7f4f3e0fcde..8bd3b13fe2fb 100644
--- a/arch/powerpc/include/asm/pgtable-types.h
+++ b/arch/powerpc/include/asm/pgtable-types.h
@@ -66,4 +66,11 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
66} 66}
67#endif 67#endif
68 68
69typedef struct { unsigned long pd; } hugepd_t;
70#define __hugepd(x) ((hugepd_t) { (x) })
71static inline unsigned long hpd_val(hugepd_t x)
72{
73 return x.pd;
74}
75
69#endif /* _ASM_POWERPC_PGTABLE_TYPES_H */ 76#endif /* _ASM_POWERPC_PGTABLE_TYPES_H */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index c56ea8c84abb..c4ced1d01d57 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -157,7 +157,7 @@
157#define PPC_INST_MCRXR 0x7c000400 157#define PPC_INST_MCRXR 0x7c000400
158#define PPC_INST_MCRXR_MASK 0xfc0007fe 158#define PPC_INST_MCRXR_MASK 0xfc0007fe
159#define PPC_INST_MFSPR_PVR 0x7c1f42a6 159#define PPC_INST_MFSPR_PVR 0x7c1f42a6
160#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff 160#define PPC_INST_MFSPR_PVR_MASK 0xfc1ffffe
161#define PPC_INST_MFTMR 0x7c0002dc 161#define PPC_INST_MFTMR 0x7c0002dc
162#define PPC_INST_MSGSND 0x7c00019c 162#define PPC_INST_MSGSND 0x7c00019c
163#define PPC_INST_MSGCLR 0x7c0001dc 163#define PPC_INST_MSGCLR 0x7c0001dc
@@ -174,13 +174,13 @@
174#define PPC_INST_RFDI 0x4c00004e 174#define PPC_INST_RFDI 0x4c00004e
175#define PPC_INST_RFMCI 0x4c00004c 175#define PPC_INST_RFMCI 0x4c00004c
176#define PPC_INST_MFSPR_DSCR 0x7c1102a6 176#define PPC_INST_MFSPR_DSCR 0x7c1102a6
177#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff 177#define PPC_INST_MFSPR_DSCR_MASK 0xfc1ffffe
178#define PPC_INST_MTSPR_DSCR 0x7c1103a6 178#define PPC_INST_MTSPR_DSCR 0x7c1103a6
179#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff 179#define PPC_INST_MTSPR_DSCR_MASK 0xfc1ffffe
180#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6 180#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6
181#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff 181#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1ffffe
182#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6 182#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
183#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff 183#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe
184#define PPC_INST_MFVSRD 0x7c000066 184#define PPC_INST_MFVSRD 0x7c000066
185#define PPC_INST_MTVSRD 0x7c000166 185#define PPC_INST_MTVSRD 0x7c000166
186#define PPC_INST_SLBFEE 0x7c0007a7 186#define PPC_INST_SLBFEE 0x7c0007a7
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 8180bfd7ab93..9de7f79e702b 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -298,9 +298,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
298 * 298 *
299 * For pHyp, we have to enable IO for log retrieval. Otherwise, 299 * For pHyp, we have to enable IO for log retrieval. Otherwise,
300 * 0xFF's is always returned from PCI config space. 300 * 0xFF's is always returned from PCI config space.
301 *
302 * When the @severity is EEH_LOG_PERM, the PE is going to be
303 * removed. Prior to that, the drivers for devices included in
304 * the PE will be closed. The drivers rely on working IO path
305 * to bring the devices to quiet state. Otherwise, PCI traffic
306 * from those devices after they are removed is like to cause
307 * another unexpected EEH error.
301 */ 308 */
302 if (!(pe->type & EEH_PE_PHB)) { 309 if (!(pe->type & EEH_PE_PHB)) {
303 if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG)) 310 if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
311 severity == EEH_LOG_PERM)
304 eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); 312 eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
305 313
306 /* 314 /*
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index e4744ff38a17..925a4ef90559 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -463,6 +463,10 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
463 463
464 flush_fp_to_thread(target); 464 flush_fp_to_thread(target);
465 465
466 for (i = 0; i < 32 ; i++)
467 buf[i] = target->thread.TS_FPR(i);
468 buf[32] = target->thread.fp_state.fpscr;
469
466 /* copy to local buffer then write that out */ 470 /* copy to local buffer then write that out */
467 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1); 471 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
468 if (i) 472 if (i)
@@ -672,6 +676,9 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
672 flush_altivec_to_thread(target); 676 flush_altivec_to_thread(target);
673 flush_vsx_to_thread(target); 677 flush_vsx_to_thread(target);
674 678
679 for (i = 0; i < 32 ; i++)
680 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
681
675 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 682 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
676 buf, 0, 32 * sizeof(double)); 683 buf, 0, 32 * sizeof(double));
677 if (!ret) 684 if (!ret)
@@ -1019,6 +1026,10 @@ static int tm_cfpr_set(struct task_struct *target,
1019 flush_fp_to_thread(target); 1026 flush_fp_to_thread(target);
1020 flush_altivec_to_thread(target); 1027 flush_altivec_to_thread(target);
1021 1028
1029 for (i = 0; i < 32; i++)
1030 buf[i] = target->thread.TS_CKFPR(i);
1031 buf[32] = target->thread.ckfp_state.fpscr;
1032
1022 /* copy to local buffer then write that out */ 1033 /* copy to local buffer then write that out */
1023 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1); 1034 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1024 if (i) 1035 if (i)
@@ -1283,6 +1294,9 @@ static int tm_cvsx_set(struct task_struct *target,
1283 flush_altivec_to_thread(target); 1294 flush_altivec_to_thread(target);
1284 flush_vsx_to_thread(target); 1295 flush_vsx_to_thread(target);
1285 1296
1297 for (i = 0; i < 32 ; i++)
1298 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1299
1286 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1300 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1287 buf, 0, 32 * sizeof(double)); 1301 buf, 0, 32 * sizeof(double));
1288 if (!ret) 1302 if (!ret)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 80334937e14f..67e19a0821be 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -747,7 +747,7 @@ static unsigned long __init htab_get_table_size(void)
747} 747}
748 748
749#ifdef CONFIG_MEMORY_HOTPLUG 749#ifdef CONFIG_MEMORY_HOTPLUG
750int create_section_mapping(unsigned long start, unsigned long end) 750int hash__create_section_mapping(unsigned long start, unsigned long end)
751{ 751{
752 int rc = htab_bolt_mapping(start, end, __pa(start), 752 int rc = htab_bolt_mapping(start, end, __pa(start),
753 pgprot_val(PAGE_KERNEL), mmu_linear_psize, 753 pgprot_val(PAGE_KERNEL), mmu_linear_psize,
@@ -761,7 +761,7 @@ int create_section_mapping(unsigned long start, unsigned long end)
761 return rc; 761 return rc;
762} 762}
763 763
764int remove_section_mapping(unsigned long start, unsigned long end) 764int hash__remove_section_mapping(unsigned long start, unsigned long end)
765{ 765{
766 int rc = htab_remove_mapping(start, end, mmu_linear_psize, 766 int rc = htab_remove_mapping(start, end, mmu_linear_psize,
767 mmu_kernel_ssize); 767 mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index d5026f3800b6..37b5f91e381b 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -125,11 +125,14 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
125int hugepd_ok(hugepd_t hpd) 125int hugepd_ok(hugepd_t hpd)
126{ 126{
127 bool is_hugepd; 127 bool is_hugepd;
128 unsigned long hpdval;
129
130 hpdval = hpd_val(hpd);
128 131
129 /* 132 /*
130 * We should not find this format in page directory, warn otherwise. 133 * We should not find this format in page directory, warn otherwise.
131 */ 134 */
132 is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)); 135 is_hugepd = (((hpdval & 0x3) == 0x0) && ((hpdval & HUGEPD_SHIFT_MASK) != 0));
133 WARN(is_hugepd, "Found wrong page directory format\n"); 136 WARN(is_hugepd, "Found wrong page directory format\n");
134 return 0; 137 return 0;
135} 138}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 289df38fb7e0..8c3389cbcd12 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -53,7 +53,7 @@ static u64 gpage_freearray[MAX_NUMBER_GPAGES];
53static unsigned nr_gpages; 53static unsigned nr_gpages;
54#endif 54#endif
55 55
56#define hugepd_none(hpd) ((hpd).pd == 0) 56#define hugepd_none(hpd) (hpd_val(hpd) == 0)
57 57
58pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 58pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
59{ 59{
@@ -103,24 +103,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
103 for (i = 0; i < num_hugepd; i++, hpdp++) { 103 for (i = 0; i < num_hugepd; i++, hpdp++) {
104 if (unlikely(!hugepd_none(*hpdp))) 104 if (unlikely(!hugepd_none(*hpdp)))
105 break; 105 break;
106 else 106 else {
107#ifdef CONFIG_PPC_BOOK3S_64 107#ifdef CONFIG_PPC_BOOK3S_64
108 hpdp->pd = __pa(new) | 108 *hpdp = __hugepd(__pa(new) |
109 (shift_to_mmu_psize(pshift) << 2); 109 (shift_to_mmu_psize(pshift) << 2));
110#elif defined(CONFIG_PPC_8xx) 110#elif defined(CONFIG_PPC_8xx)
111 hpdp->pd = __pa(new) | 111 *hpdp = __hugepd(__pa(new) |
112 (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M : 112 (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
113 _PMD_PAGE_512K) | 113 _PMD_PAGE_512K) | _PMD_PRESENT);
114 _PMD_PRESENT;
115#else 114#else
116 /* We use the old format for PPC_FSL_BOOK3E */ 115 /* We use the old format for PPC_FSL_BOOK3E */
117 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; 116 *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
118#endif 117#endif
118 }
119 } 119 }
120 /* If we bailed from the for loop early, an error occurred, clean up */ 120 /* If we bailed from the for loop early, an error occurred, clean up */
121 if (i < num_hugepd) { 121 if (i < num_hugepd) {
122 for (i = i - 1 ; i >= 0; i--, hpdp--) 122 for (i = i - 1 ; i >= 0; i--, hpdp--)
123 hpdp->pd = 0; 123 *hpdp = __hugepd(0);
124 kmem_cache_free(cachep, new); 124 kmem_cache_free(cachep, new);
125 } 125 }
126 spin_unlock(&mm->page_table_lock); 126 spin_unlock(&mm->page_table_lock);
@@ -454,7 +454,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
454 return; 454 return;
455 455
456 for (i = 0; i < num_hugepd; i++, hpdp++) 456 for (i = 0; i < num_hugepd; i++, hpdp++)
457 hpdp->pd = 0; 457 *hpdp = __hugepd(0);
458 458
459 if (shift >= pdshift) 459 if (shift >= pdshift)
460 hugepd_free(tlb, hugepte); 460 hugepd_free(tlb, hugepte);
@@ -810,12 +810,8 @@ static int __init hugetlbpage_init(void)
810 * if we have pdshift and shift value same, we don't 810 * if we have pdshift and shift value same, we don't
811 * use pgt cache for hugepd. 811 * use pgt cache for hugepd.
812 */ 812 */
813 if (pdshift > shift) { 813 if (pdshift > shift)
814 pgtable_cache_add(pdshift - shift, NULL); 814 pgtable_cache_add(pdshift - shift, NULL);
815 if (!PGT_CACHE(pdshift - shift))
816 panic("hugetlbpage_init(): could not create "
817 "pgtable cache for %d bit pagesize\n", shift);
818 }
819#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) 815#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
820 else if (!hugepte_cache) { 816 else if (!hugepte_cache) {
821 /* 817 /*
@@ -852,9 +848,6 @@ static int __init hugetlbpage_init(void)
852 else if (mmu_psize_defs[MMU_PAGE_2M].shift) 848 else if (mmu_psize_defs[MMU_PAGE_2M].shift)
853 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift; 849 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
854#endif 850#endif
855 else
856 panic("%s: Unable to set default huge page size\n", __func__);
857
858 return 0; 851 return 0;
859} 852}
860 853
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index 2be5dc242832..eb8c6c8c4851 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -79,8 +79,12 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
79 align = max_t(unsigned long, align, minalign); 79 align = max_t(unsigned long, align, minalign);
80 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); 80 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
81 new = kmem_cache_create(name, table_size, align, 0, ctor); 81 new = kmem_cache_create(name, table_size, align, 0, ctor);
82 if (!new)
83 panic("Could not allocate pgtable cache for order %d", shift);
84
82 kfree(name); 85 kfree(name);
83 pgtable_cache[shift - 1] = new; 86 pgtable_cache[shift - 1] = new;
87
84 pr_debug("Allocated pgtable cache for order %d\n", shift); 88 pr_debug("Allocated pgtable cache for order %d\n", shift);
85} 89}
86EXPORT_SYMBOL_GPL(pgtable_cache_add); /* used by kvm_hv module */ 90EXPORT_SYMBOL_GPL(pgtable_cache_add); /* used by kvm_hv module */
@@ -89,7 +93,7 @@ void pgtable_cache_init(void)
89{ 93{
90 pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); 94 pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
91 95
92 if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE)) 96 if (PMD_CACHE_INDEX && !PGT_CACHE(PMD_CACHE_INDEX))
93 pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor); 97 pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
94 /* 98 /*
95 * In all current configs, when the PUD index exists it's the 99 * In all current configs, when the PUD index exists it's the
@@ -98,11 +102,4 @@ void pgtable_cache_init(void)
98 */ 102 */
99 if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) 103 if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
100 pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor); 104 pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
101
102 if (!PGT_CACHE(PGD_INDEX_SIZE))
103 panic("Couldn't allocate pgd cache");
104 if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
105 panic("Couldn't allocate pmd pgtable caches");
106 if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
107 panic("Couldn't allocate pud pgtable caches");
108} 105}
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index ebf9782bacf9..653ff6c74ebe 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -126,3 +126,21 @@ void mmu_cleanup_all(void)
126 else if (mmu_hash_ops.hpte_clear_all) 126 else if (mmu_hash_ops.hpte_clear_all)
127 mmu_hash_ops.hpte_clear_all(); 127 mmu_hash_ops.hpte_clear_all();
128} 128}
129
130#ifdef CONFIG_MEMORY_HOTPLUG
131int create_section_mapping(unsigned long start, unsigned long end)
132{
133 if (radix_enabled())
134 return -ENODEV;
135
136 return hash__create_section_mapping(start, end);
137}
138
139int remove_section_mapping(unsigned long start, unsigned long end)
140{
141 if (radix_enabled())
142 return -ENODEV;
143
144 return hash__remove_section_mapping(start, end);
145}
146#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index fd3e4034c04d..270eb9b74e2e 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -295,6 +295,8 @@ static inline void perf_read_regs(struct pt_regs *regs)
295 */ 295 */
296 if (TRAP(regs) != 0xf00) 296 if (TRAP(regs) != 0xf00)
297 use_siar = 0; 297 use_siar = 0;
298 else if ((ppmu->flags & PPMU_NO_SIAR))
299 use_siar = 0;
298 else if (marked) 300 else if (marked)
299 use_siar = 1; 301 use_siar = 1;
300 else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING)) 302 else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h
index 6447dc1c3d89..929b56d47ad9 100644
--- a/arch/powerpc/perf/power9-events-list.h
+++ b/arch/powerpc/perf/power9-events-list.h
@@ -16,7 +16,7 @@ EVENT(PM_CYC, 0x0001e)
16EVENT(PM_ICT_NOSLOT_CYC, 0x100f8) 16EVENT(PM_ICT_NOSLOT_CYC, 0x100f8)
17EVENT(PM_CMPLU_STALL, 0x1e054) 17EVENT(PM_CMPLU_STALL, 0x1e054)
18EVENT(PM_INST_CMPL, 0x00002) 18EVENT(PM_INST_CMPL, 0x00002)
19EVENT(PM_BRU_CMPL, 0x40060) 19EVENT(PM_BRU_CMPL, 0x10012)
20EVENT(PM_BR_MPRED_CMPL, 0x400f6) 20EVENT(PM_BR_MPRED_CMPL, 0x400f6)
21 21
22/* All L1 D cache load references counted at finish, gated by reject */ 22/* All L1 D cache load references counted at finish, gated by reject */
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 346010e8d463..7332634e18c9 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -384,7 +384,7 @@ static struct power_pmu power9_isa207_pmu = {
384 .bhrb_filter_map = power9_bhrb_filter_map, 384 .bhrb_filter_map = power9_bhrb_filter_map,
385 .get_constraint = isa207_get_constraint, 385 .get_constraint = isa207_get_constraint,
386 .disable_pmc = isa207_disable_pmc, 386 .disable_pmc = isa207_disable_pmc,
387 .flags = PPMU_HAS_SIER | PPMU_ARCH_207S, 387 .flags = PPMU_NO_SIAR | PPMU_ARCH_207S,
388 .n_generic = ARRAY_SIZE(power9_generic_events), 388 .n_generic = ARRAY_SIZE(power9_generic_events),
389 .generic_events = power9_generic_events, 389 .generic_events = power9_generic_events,
390 .cache_events = &power9_cache_events, 390 .cache_events = &power9_cache_events,
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index d38e86fd5720..60c57657c772 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -20,6 +20,7 @@
20#include <asm/xics.h> 20#include <asm/xics.h>
21#include <asm/io.h> 21#include <asm/io.h>
22#include <asm/opal.h> 22#include <asm/opal.h>
23#include <asm/kvm_ppc.h>
23 24
24static void icp_opal_teardown_cpu(void) 25static void icp_opal_teardown_cpu(void)
25{ 26{
@@ -39,7 +40,26 @@ static void icp_opal_flush_ipi(void)
39 * Should we be flagging idle loop instead? 40 * Should we be flagging idle loop instead?
40 * Or creating some task to be scheduled? 41 * Or creating some task to be scheduled?
41 */ 42 */
42 opal_int_eoi((0x00 << 24) | XICS_IPI); 43 if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0)
44 force_external_irq_replay();
45}
46
47static unsigned int icp_opal_get_xirr(void)
48{
49 unsigned int kvm_xirr;
50 __be32 hw_xirr;
51 int64_t rc;
52
53 /* Handle an interrupt latched by KVM first */
54 kvm_xirr = kvmppc_get_xics_latch();
55 if (kvm_xirr)
56 return kvm_xirr;
57
58 /* Then ask OPAL */
59 rc = opal_int_get_xirr(&hw_xirr, false);
60 if (rc < 0)
61 return 0;
62 return be32_to_cpu(hw_xirr);
43} 63}
44 64
45static unsigned int icp_opal_get_irq(void) 65static unsigned int icp_opal_get_irq(void)
@@ -47,12 +67,8 @@ static unsigned int icp_opal_get_irq(void)
47 unsigned int xirr; 67 unsigned int xirr;
48 unsigned int vec; 68 unsigned int vec;
49 unsigned int irq; 69 unsigned int irq;
50 int64_t rc;
51 70
52 rc = opal_int_get_xirr(&xirr, false); 71 xirr = icp_opal_get_xirr();
53 if (rc < 0)
54 return 0;
55 xirr = be32_to_cpu(xirr);
56 vec = xirr & 0x00ffffff; 72 vec = xirr & 0x00ffffff;
57 if (vec == XICS_IRQ_SPURIOUS) 73 if (vec == XICS_IRQ_SPURIOUS)
58 return 0; 74 return 0;
@@ -67,7 +83,8 @@ static unsigned int icp_opal_get_irq(void)
67 xics_mask_unknown_vec(vec); 83 xics_mask_unknown_vec(vec);
68 84
69 /* We might learn about it later, so EOI it */ 85 /* We might learn about it later, so EOI it */
70 opal_int_eoi(xirr); 86 if (opal_int_eoi(xirr) > 0)
87 force_external_irq_replay();
71 88
72 return 0; 89 return 0;
73} 90}
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index e659daffe368..e00975361fec 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -69,7 +69,7 @@ CONFIG_CMA=y
69CONFIG_CMA_DEBUG=y 69CONFIG_CMA_DEBUG=y
70CONFIG_CMA_DEBUGFS=y 70CONFIG_CMA_DEBUGFS=y
71CONFIG_MEM_SOFT_DIRTY=y 71CONFIG_MEM_SOFT_DIRTY=y
72CONFIG_ZPOOL=m 72CONFIG_ZSWAP=y
73CONFIG_ZBUD=m 73CONFIG_ZBUD=m
74CONFIG_ZSMALLOC=m 74CONFIG_ZSMALLOC=m
75CONFIG_ZSMALLOC_STAT=y 75CONFIG_ZSMALLOC_STAT=y
@@ -141,8 +141,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
141CONFIG_NF_CONNTRACK_EVENTS=y 141CONFIG_NF_CONNTRACK_EVENTS=y
142CONFIG_NF_CONNTRACK_TIMEOUT=y 142CONFIG_NF_CONNTRACK_TIMEOUT=y
143CONFIG_NF_CONNTRACK_TIMESTAMP=y 143CONFIG_NF_CONNTRACK_TIMESTAMP=y
144CONFIG_NF_CT_PROTO_DCCP=m
145CONFIG_NF_CT_PROTO_UDPLITE=m
146CONFIG_NF_CONNTRACK_AMANDA=m 144CONFIG_NF_CONNTRACK_AMANDA=m
147CONFIG_NF_CONNTRACK_FTP=m 145CONFIG_NF_CONNTRACK_FTP=m
148CONFIG_NF_CONNTRACK_H323=m 146CONFIG_NF_CONNTRACK_H323=m
@@ -159,13 +157,12 @@ CONFIG_NF_TABLES=m
159CONFIG_NFT_EXTHDR=m 157CONFIG_NFT_EXTHDR=m
160CONFIG_NFT_META=m 158CONFIG_NFT_META=m
161CONFIG_NFT_CT=m 159CONFIG_NFT_CT=m
162CONFIG_NFT_RBTREE=m
163CONFIG_NFT_HASH=m
164CONFIG_NFT_COUNTER=m 160CONFIG_NFT_COUNTER=m
165CONFIG_NFT_LOG=m 161CONFIG_NFT_LOG=m
166CONFIG_NFT_LIMIT=m 162CONFIG_NFT_LIMIT=m
167CONFIG_NFT_NAT=m 163CONFIG_NFT_NAT=m
168CONFIG_NFT_COMPAT=m 164CONFIG_NFT_COMPAT=m
165CONFIG_NFT_HASH=m
169CONFIG_NETFILTER_XT_SET=m 166CONFIG_NETFILTER_XT_SET=m
170CONFIG_NETFILTER_XT_TARGET_AUDIT=m 167CONFIG_NETFILTER_XT_TARGET_AUDIT=m
171CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 168CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -219,7 +216,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
219CONFIG_NETFILTER_XT_MATCH_RATEEST=m 216CONFIG_NETFILTER_XT_MATCH_RATEEST=m
220CONFIG_NETFILTER_XT_MATCH_REALM=m 217CONFIG_NETFILTER_XT_MATCH_REALM=m
221CONFIG_NETFILTER_XT_MATCH_RECENT=m 218CONFIG_NETFILTER_XT_MATCH_RECENT=m
222CONFIG_NETFILTER_XT_MATCH_SOCKET=m
223CONFIG_NETFILTER_XT_MATCH_STATE=m 219CONFIG_NETFILTER_XT_MATCH_STATE=m
224CONFIG_NETFILTER_XT_MATCH_STATISTIC=m 220CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
225CONFIG_NETFILTER_XT_MATCH_STRING=m 221CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -258,7 +254,6 @@ CONFIG_IP_VS_NQ=m
258CONFIG_IP_VS_FTP=m 254CONFIG_IP_VS_FTP=m
259CONFIG_IP_VS_PE_SIP=m 255CONFIG_IP_VS_PE_SIP=m
260CONFIG_NF_CONNTRACK_IPV4=m 256CONFIG_NF_CONNTRACK_IPV4=m
261# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
262CONFIG_NF_TABLES_IPV4=m 257CONFIG_NF_TABLES_IPV4=m
263CONFIG_NFT_CHAIN_ROUTE_IPV4=m 258CONFIG_NFT_CHAIN_ROUTE_IPV4=m
264CONFIG_NF_TABLES_ARP=m 259CONFIG_NF_TABLES_ARP=m
@@ -436,7 +431,6 @@ CONFIG_EQUALIZER=m
436CONFIG_IFB=m 431CONFIG_IFB=m
437CONFIG_MACVLAN=m 432CONFIG_MACVLAN=m
438CONFIG_MACVTAP=m 433CONFIG_MACVTAP=m
439CONFIG_IPVLAN=m
440CONFIG_VXLAN=m 434CONFIG_VXLAN=m
441CONFIG_TUN=m 435CONFIG_TUN=m
442CONFIG_VETH=m 436CONFIG_VETH=m
@@ -480,6 +474,7 @@ CONFIG_VIRTIO_BALLOON=m
480CONFIG_EXT4_FS=y 474CONFIG_EXT4_FS=y
481CONFIG_EXT4_FS_POSIX_ACL=y 475CONFIG_EXT4_FS_POSIX_ACL=y
482CONFIG_EXT4_FS_SECURITY=y 476CONFIG_EXT4_FS_SECURITY=y
477CONFIG_EXT4_ENCRYPTION=y
483CONFIG_JBD2_DEBUG=y 478CONFIG_JBD2_DEBUG=y
484CONFIG_JFS_FS=m 479CONFIG_JFS_FS=m
485CONFIG_JFS_POSIX_ACL=y 480CONFIG_JFS_POSIX_ACL=y
@@ -592,14 +587,12 @@ CONFIG_LOCK_STAT=y
592CONFIG_DEBUG_LOCKDEP=y 587CONFIG_DEBUG_LOCKDEP=y
593CONFIG_DEBUG_ATOMIC_SLEEP=y 588CONFIG_DEBUG_ATOMIC_SLEEP=y
594CONFIG_DEBUG_LOCKING_API_SELFTESTS=y 589CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
595CONFIG_DEBUG_LIST=y
596CONFIG_DEBUG_SG=y 590CONFIG_DEBUG_SG=y
597CONFIG_DEBUG_NOTIFIERS=y 591CONFIG_DEBUG_NOTIFIERS=y
598CONFIG_DEBUG_CREDENTIALS=y 592CONFIG_DEBUG_CREDENTIALS=y
599CONFIG_RCU_TORTURE_TEST=m 593CONFIG_RCU_TORTURE_TEST=m
600CONFIG_RCU_CPU_STALL_TIMEOUT=300 594CONFIG_RCU_CPU_STALL_TIMEOUT=300
601CONFIG_NOTIFIER_ERROR_INJECTION=m 595CONFIG_NOTIFIER_ERROR_INJECTION=m
602CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
603CONFIG_PM_NOTIFIER_ERROR_INJECT=m 596CONFIG_PM_NOTIFIER_ERROR_INJECT=m
604CONFIG_FAULT_INJECTION=y 597CONFIG_FAULT_INJECTION=y
605CONFIG_FAILSLAB=y 598CONFIG_FAILSLAB=y
@@ -618,6 +611,7 @@ CONFIG_STACK_TRACER=y
618CONFIG_BLK_DEV_IO_TRACE=y 611CONFIG_BLK_DEV_IO_TRACE=y
619CONFIG_UPROBE_EVENT=y 612CONFIG_UPROBE_EVENT=y
620CONFIG_FUNCTION_PROFILER=y 613CONFIG_FUNCTION_PROFILER=y
614CONFIG_HIST_TRIGGERS=y
621CONFIG_TRACE_ENUM_MAP_FILE=y 615CONFIG_TRACE_ENUM_MAP_FILE=y
622CONFIG_LKDTM=m 616CONFIG_LKDTM=m
623CONFIG_TEST_LIST_SORT=y 617CONFIG_TEST_LIST_SORT=y
@@ -630,6 +624,7 @@ CONFIG_TEST_STRING_HELPERS=y
630CONFIG_TEST_KSTRTOX=y 624CONFIG_TEST_KSTRTOX=y
631CONFIG_DMA_API_DEBUG=y 625CONFIG_DMA_API_DEBUG=y
632CONFIG_TEST_BPF=m 626CONFIG_TEST_BPF=m
627CONFIG_BUG_ON_DATA_CORRUPTION=y
633CONFIG_S390_PTDUMP=y 628CONFIG_S390_PTDUMP=y
634CONFIG_ENCRYPTED_KEYS=m 629CONFIG_ENCRYPTED_KEYS=m
635CONFIG_SECURITY=y 630CONFIG_SECURITY=y
@@ -640,16 +635,18 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
640CONFIG_SECURITY_SELINUX_DISABLE=y 635CONFIG_SECURITY_SELINUX_DISABLE=y
641CONFIG_IMA=y 636CONFIG_IMA=y
642CONFIG_IMA_APPRAISE=y 637CONFIG_IMA_APPRAISE=y
638CONFIG_CRYPTO_RSA=m
639CONFIG_CRYPTO_DH=m
640CONFIG_CRYPTO_ECDH=m
643CONFIG_CRYPTO_USER=m 641CONFIG_CRYPTO_USER=m
644# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
645CONFIG_CRYPTO_CRYPTD=m 642CONFIG_CRYPTO_CRYPTD=m
646CONFIG_CRYPTO_TEST=m 643CONFIG_CRYPTO_TEST=m
647CONFIG_CRYPTO_CCM=m 644CONFIG_CRYPTO_CCM=m
648CONFIG_CRYPTO_GCM=m 645CONFIG_CRYPTO_GCM=m
649CONFIG_CRYPTO_CTS=m 646CONFIG_CRYPTO_CHACHA20POLY1305=m
650CONFIG_CRYPTO_LRW=m 647CONFIG_CRYPTO_LRW=m
651CONFIG_CRYPTO_PCBC=m 648CONFIG_CRYPTO_PCBC=m
652CONFIG_CRYPTO_XTS=m 649CONFIG_CRYPTO_KEYWRAP=m
653CONFIG_CRYPTO_XCBC=m 650CONFIG_CRYPTO_XCBC=m
654CONFIG_CRYPTO_VMAC=m 651CONFIG_CRYPTO_VMAC=m
655CONFIG_CRYPTO_CRC32=m 652CONFIG_CRYPTO_CRC32=m
@@ -673,11 +670,13 @@ CONFIG_CRYPTO_SEED=m
673CONFIG_CRYPTO_SERPENT=m 670CONFIG_CRYPTO_SERPENT=m
674CONFIG_CRYPTO_TEA=m 671CONFIG_CRYPTO_TEA=m
675CONFIG_CRYPTO_TWOFISH=m 672CONFIG_CRYPTO_TWOFISH=m
676CONFIG_CRYPTO_LZO=m 673CONFIG_CRYPTO_842=m
677CONFIG_CRYPTO_LZ4=m 674CONFIG_CRYPTO_LZ4=m
678CONFIG_CRYPTO_LZ4HC=m 675CONFIG_CRYPTO_LZ4HC=m
679CONFIG_CRYPTO_USER_API_HASH=m 676CONFIG_CRYPTO_USER_API_HASH=m
680CONFIG_CRYPTO_USER_API_SKCIPHER=m 677CONFIG_CRYPTO_USER_API_SKCIPHER=m
678CONFIG_CRYPTO_USER_API_RNG=m
679CONFIG_CRYPTO_USER_API_AEAD=m
681CONFIG_ZCRYPT=m 680CONFIG_ZCRYPT=m
682CONFIG_CRYPTO_SHA1_S390=m 681CONFIG_CRYPTO_SHA1_S390=m
683CONFIG_CRYPTO_SHA256_S390=m 682CONFIG_CRYPTO_SHA256_S390=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 95ceac50bc65..f05d2d6e1087 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -12,6 +12,7 @@ CONFIG_TASK_IO_ACCOUNTING=y
12CONFIG_IKCONFIG=y 12CONFIG_IKCONFIG=y
13CONFIG_IKCONFIG_PROC=y 13CONFIG_IKCONFIG_PROC=y
14CONFIG_NUMA_BALANCING=y 14CONFIG_NUMA_BALANCING=y
15# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
15CONFIG_MEMCG=y 16CONFIG_MEMCG=y
16CONFIG_MEMCG_SWAP=y 17CONFIG_MEMCG_SWAP=y
17CONFIG_BLK_CGROUP=y 18CONFIG_BLK_CGROUP=y
@@ -54,8 +55,9 @@ CONFIG_SOLARIS_X86_PARTITION=y
54CONFIG_UNIXWARE_DISKLABEL=y 55CONFIG_UNIXWARE_DISKLABEL=y
55CONFIG_CFQ_GROUP_IOSCHED=y 56CONFIG_CFQ_GROUP_IOSCHED=y
56CONFIG_DEFAULT_DEADLINE=y 57CONFIG_DEFAULT_DEADLINE=y
58CONFIG_LIVEPATCH=y
57CONFIG_TUNE_ZEC12=y 59CONFIG_TUNE_ZEC12=y
58CONFIG_NR_CPUS=256 60CONFIG_NR_CPUS=512
59CONFIG_NUMA=y 61CONFIG_NUMA=y
60CONFIG_HZ_100=y 62CONFIG_HZ_100=y
61CONFIG_MEMORY_HOTPLUG=y 63CONFIG_MEMORY_HOTPLUG=y
@@ -65,6 +67,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
65CONFIG_CLEANCACHE=y 67CONFIG_CLEANCACHE=y
66CONFIG_FRONTSWAP=y 68CONFIG_FRONTSWAP=y
67CONFIG_CMA=y 69CONFIG_CMA=y
70CONFIG_MEM_SOFT_DIRTY=y
68CONFIG_ZSWAP=y 71CONFIG_ZSWAP=y
69CONFIG_ZBUD=m 72CONFIG_ZBUD=m
70CONFIG_ZSMALLOC=m 73CONFIG_ZSMALLOC=m
@@ -136,8 +139,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
136CONFIG_NF_CONNTRACK_EVENTS=y 139CONFIG_NF_CONNTRACK_EVENTS=y
137CONFIG_NF_CONNTRACK_TIMEOUT=y 140CONFIG_NF_CONNTRACK_TIMEOUT=y
138CONFIG_NF_CONNTRACK_TIMESTAMP=y 141CONFIG_NF_CONNTRACK_TIMESTAMP=y
139CONFIG_NF_CT_PROTO_DCCP=m
140CONFIG_NF_CT_PROTO_UDPLITE=m
141CONFIG_NF_CONNTRACK_AMANDA=m 142CONFIG_NF_CONNTRACK_AMANDA=m
142CONFIG_NF_CONNTRACK_FTP=m 143CONFIG_NF_CONNTRACK_FTP=m
143CONFIG_NF_CONNTRACK_H323=m 144CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +155,12 @@ CONFIG_NF_TABLES=m
154CONFIG_NFT_EXTHDR=m 155CONFIG_NFT_EXTHDR=m
155CONFIG_NFT_META=m 156CONFIG_NFT_META=m
156CONFIG_NFT_CT=m 157CONFIG_NFT_CT=m
157CONFIG_NFT_RBTREE=m
158CONFIG_NFT_HASH=m
159CONFIG_NFT_COUNTER=m 158CONFIG_NFT_COUNTER=m
160CONFIG_NFT_LOG=m 159CONFIG_NFT_LOG=m
161CONFIG_NFT_LIMIT=m 160CONFIG_NFT_LIMIT=m
162CONFIG_NFT_NAT=m 161CONFIG_NFT_NAT=m
163CONFIG_NFT_COMPAT=m 162CONFIG_NFT_COMPAT=m
163CONFIG_NFT_HASH=m
164CONFIG_NETFILTER_XT_SET=m 164CONFIG_NETFILTER_XT_SET=m
165CONFIG_NETFILTER_XT_TARGET_AUDIT=m 165CONFIG_NETFILTER_XT_TARGET_AUDIT=m
166CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 166CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +214,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
214CONFIG_NETFILTER_XT_MATCH_RATEEST=m 214CONFIG_NETFILTER_XT_MATCH_RATEEST=m
215CONFIG_NETFILTER_XT_MATCH_REALM=m 215CONFIG_NETFILTER_XT_MATCH_REALM=m
216CONFIG_NETFILTER_XT_MATCH_RECENT=m 216CONFIG_NETFILTER_XT_MATCH_RECENT=m
217CONFIG_NETFILTER_XT_MATCH_SOCKET=m
218CONFIG_NETFILTER_XT_MATCH_STATE=m 217CONFIG_NETFILTER_XT_MATCH_STATE=m
219CONFIG_NETFILTER_XT_MATCH_STATISTIC=m 218CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
220CONFIG_NETFILTER_XT_MATCH_STRING=m 219CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +252,6 @@ CONFIG_IP_VS_NQ=m
253CONFIG_IP_VS_FTP=m 252CONFIG_IP_VS_FTP=m
254CONFIG_IP_VS_PE_SIP=m 253CONFIG_IP_VS_PE_SIP=m
255CONFIG_NF_CONNTRACK_IPV4=m 254CONFIG_NF_CONNTRACK_IPV4=m
256# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
257CONFIG_NF_TABLES_IPV4=m 255CONFIG_NF_TABLES_IPV4=m
258CONFIG_NFT_CHAIN_ROUTE_IPV4=m 256CONFIG_NFT_CHAIN_ROUTE_IPV4=m
259CONFIG_NF_TABLES_ARP=m 257CONFIG_NF_TABLES_ARP=m
@@ -430,7 +428,6 @@ CONFIG_EQUALIZER=m
430CONFIG_IFB=m 428CONFIG_IFB=m
431CONFIG_MACVLAN=m 429CONFIG_MACVLAN=m
432CONFIG_MACVTAP=m 430CONFIG_MACVTAP=m
433CONFIG_IPVLAN=m
434CONFIG_VXLAN=m 431CONFIG_VXLAN=m
435CONFIG_TUN=m 432CONFIG_TUN=m
436CONFIG_VETH=m 433CONFIG_VETH=m
@@ -460,6 +457,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
460CONFIG_RAW_DRIVER=m 457CONFIG_RAW_DRIVER=m
461CONFIG_HANGCHECK_TIMER=m 458CONFIG_HANGCHECK_TIMER=m
462CONFIG_TN3270_FS=y 459CONFIG_TN3270_FS=y
460# CONFIG_HWMON is not set
463CONFIG_WATCHDOG=y 461CONFIG_WATCHDOG=y
464CONFIG_WATCHDOG_NOWAYOUT=y 462CONFIG_WATCHDOG_NOWAYOUT=y
465CONFIG_SOFT_WATCHDOG=m 463CONFIG_SOFT_WATCHDOG=m
@@ -473,6 +471,7 @@ CONFIG_VIRTIO_BALLOON=m
473CONFIG_EXT4_FS=y 471CONFIG_EXT4_FS=y
474CONFIG_EXT4_FS_POSIX_ACL=y 472CONFIG_EXT4_FS_POSIX_ACL=y
475CONFIG_EXT4_FS_SECURITY=y 473CONFIG_EXT4_FS_SECURITY=y
474CONFIG_EXT4_ENCRYPTION=y
476CONFIG_JBD2_DEBUG=y 475CONFIG_JBD2_DEBUG=y
477CONFIG_JFS_FS=m 476CONFIG_JFS_FS=m
478CONFIG_JFS_POSIX_ACL=y 477CONFIG_JFS_POSIX_ACL=y
@@ -495,6 +494,7 @@ CONFIG_AUTOFS4_FS=m
495CONFIG_FUSE_FS=y 494CONFIG_FUSE_FS=y
496CONFIG_CUSE=m 495CONFIG_CUSE=m
497CONFIG_OVERLAY_FS=m 496CONFIG_OVERLAY_FS=m
497CONFIG_OVERLAY_FS_REDIRECT_DIR=y
498CONFIG_FSCACHE=m 498CONFIG_FSCACHE=m
499CONFIG_CACHEFILES=m 499CONFIG_CACHEFILES=m
500CONFIG_ISO9660_FS=y 500CONFIG_ISO9660_FS=y
@@ -551,25 +551,27 @@ CONFIG_FRAME_WARN=1024
551CONFIG_UNUSED_SYMBOLS=y 551CONFIG_UNUSED_SYMBOLS=y
552CONFIG_MAGIC_SYSRQ=y 552CONFIG_MAGIC_SYSRQ=y
553CONFIG_DEBUG_MEMORY_INIT=y 553CONFIG_DEBUG_MEMORY_INIT=y
554CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
555CONFIG_PANIC_ON_OOPS=y 554CONFIG_PANIC_ON_OOPS=y
556CONFIG_TIMER_STATS=y 555CONFIG_TIMER_STATS=y
557CONFIG_RCU_TORTURE_TEST=m 556CONFIG_RCU_TORTURE_TEST=m
558CONFIG_RCU_CPU_STALL_TIMEOUT=60 557CONFIG_RCU_CPU_STALL_TIMEOUT=60
559CONFIG_NOTIFIER_ERROR_INJECTION=m
560CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
561CONFIG_PM_NOTIFIER_ERROR_INJECT=m
562CONFIG_LATENCYTOP=y 558CONFIG_LATENCYTOP=y
559CONFIG_SCHED_TRACER=y
560CONFIG_FTRACE_SYSCALLS=y
561CONFIG_STACK_TRACER=y
563CONFIG_BLK_DEV_IO_TRACE=y 562CONFIG_BLK_DEV_IO_TRACE=y
564# CONFIG_KPROBE_EVENT is not set 563CONFIG_UPROBE_EVENT=y
564CONFIG_FUNCTION_PROFILER=y
565CONFIG_HIST_TRIGGERS=y
565CONFIG_TRACE_ENUM_MAP_FILE=y 566CONFIG_TRACE_ENUM_MAP_FILE=y
566CONFIG_LKDTM=m 567CONFIG_LKDTM=m
567CONFIG_RBTREE_TEST=m
568CONFIG_INTERVAL_TREE_TEST=m
569CONFIG_PERCPU_TEST=m 568CONFIG_PERCPU_TEST=m
570CONFIG_ATOMIC64_SELFTEST=y 569CONFIG_ATOMIC64_SELFTEST=y
571CONFIG_TEST_BPF=m 570CONFIG_TEST_BPF=m
571CONFIG_BUG_ON_DATA_CORRUPTION=y
572CONFIG_S390_PTDUMP=y 572CONFIG_S390_PTDUMP=y
573CONFIG_PERSISTENT_KEYRINGS=y
574CONFIG_BIG_KEYS=y
573CONFIG_ENCRYPTED_KEYS=m 575CONFIG_ENCRYPTED_KEYS=m
574CONFIG_SECURITY=y 576CONFIG_SECURITY=y
575CONFIG_SECURITY_NETWORK=y 577CONFIG_SECURITY_NETWORK=y
@@ -577,18 +579,25 @@ CONFIG_SECURITY_SELINUX=y
577CONFIG_SECURITY_SELINUX_BOOTPARAM=y 579CONFIG_SECURITY_SELINUX_BOOTPARAM=y
578CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 580CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
579CONFIG_SECURITY_SELINUX_DISABLE=y 581CONFIG_SECURITY_SELINUX_DISABLE=y
582CONFIG_INTEGRITY_SIGNATURE=y
583CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
580CONFIG_IMA=y 584CONFIG_IMA=y
585CONFIG_IMA_WRITE_POLICY=y
581CONFIG_IMA_APPRAISE=y 586CONFIG_IMA_APPRAISE=y
587CONFIG_CRYPTO_DH=m
588CONFIG_CRYPTO_ECDH=m
582CONFIG_CRYPTO_USER=m 589CONFIG_CRYPTO_USER=m
583# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set 590# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
591CONFIG_CRYPTO_PCRYPT=m
584CONFIG_CRYPTO_CRYPTD=m 592CONFIG_CRYPTO_CRYPTD=m
593CONFIG_CRYPTO_MCRYPTD=m
585CONFIG_CRYPTO_TEST=m 594CONFIG_CRYPTO_TEST=m
586CONFIG_CRYPTO_CCM=m 595CONFIG_CRYPTO_CCM=m
587CONFIG_CRYPTO_GCM=m 596CONFIG_CRYPTO_GCM=m
588CONFIG_CRYPTO_CTS=m 597CONFIG_CRYPTO_CHACHA20POLY1305=m
589CONFIG_CRYPTO_LRW=m 598CONFIG_CRYPTO_LRW=m
590CONFIG_CRYPTO_PCBC=m 599CONFIG_CRYPTO_PCBC=m
591CONFIG_CRYPTO_XTS=m 600CONFIG_CRYPTO_KEYWRAP=m
592CONFIG_CRYPTO_XCBC=m 601CONFIG_CRYPTO_XCBC=m
593CONFIG_CRYPTO_VMAC=m 602CONFIG_CRYPTO_VMAC=m
594CONFIG_CRYPTO_CRC32=m 603CONFIG_CRYPTO_CRC32=m
@@ -598,6 +607,7 @@ CONFIG_CRYPTO_RMD160=m
598CONFIG_CRYPTO_RMD256=m 607CONFIG_CRYPTO_RMD256=m
599CONFIG_CRYPTO_RMD320=m 608CONFIG_CRYPTO_RMD320=m
600CONFIG_CRYPTO_SHA512=m 609CONFIG_CRYPTO_SHA512=m
610CONFIG_CRYPTO_SHA3=m
601CONFIG_CRYPTO_TGR192=m 611CONFIG_CRYPTO_TGR192=m
602CONFIG_CRYPTO_WP512=m 612CONFIG_CRYPTO_WP512=m
603CONFIG_CRYPTO_ANUBIS=m 613CONFIG_CRYPTO_ANUBIS=m
@@ -612,10 +622,13 @@ CONFIG_CRYPTO_SEED=m
612CONFIG_CRYPTO_SERPENT=m 622CONFIG_CRYPTO_SERPENT=m
613CONFIG_CRYPTO_TEA=m 623CONFIG_CRYPTO_TEA=m
614CONFIG_CRYPTO_TWOFISH=m 624CONFIG_CRYPTO_TWOFISH=m
625CONFIG_CRYPTO_842=m
615CONFIG_CRYPTO_LZ4=m 626CONFIG_CRYPTO_LZ4=m
616CONFIG_CRYPTO_LZ4HC=m 627CONFIG_CRYPTO_LZ4HC=m
617CONFIG_CRYPTO_USER_API_HASH=m 628CONFIG_CRYPTO_USER_API_HASH=m
618CONFIG_CRYPTO_USER_API_SKCIPHER=m 629CONFIG_CRYPTO_USER_API_SKCIPHER=m
630CONFIG_CRYPTO_USER_API_RNG=m
631CONFIG_CRYPTO_USER_API_AEAD=m
619CONFIG_ZCRYPT=m 632CONFIG_ZCRYPT=m
620CONFIG_CRYPTO_SHA1_S390=m 633CONFIG_CRYPTO_SHA1_S390=m
621CONFIG_CRYPTO_SHA256_S390=m 634CONFIG_CRYPTO_SHA256_S390=m
@@ -624,9 +637,6 @@ CONFIG_CRYPTO_DES_S390=m
624CONFIG_CRYPTO_AES_S390=m 637CONFIG_CRYPTO_AES_S390=m
625CONFIG_CRYPTO_GHASH_S390=m 638CONFIG_CRYPTO_GHASH_S390=m
626CONFIG_CRYPTO_CRC32_S390=y 639CONFIG_CRYPTO_CRC32_S390=y
627CONFIG_ASYMMETRIC_KEY_TYPE=y
628CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
629CONFIG_X509_CERTIFICATE_PARSER=m
630CONFIG_CRC7=m 640CONFIG_CRC7=m
631CONFIG_CRC8=m 641CONFIG_CRC8=m
632CONFIG_CORDIC=m 642CONFIG_CORDIC=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index bc7b176f5795..2cf87343b590 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -65,6 +65,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
65CONFIG_CLEANCACHE=y 65CONFIG_CLEANCACHE=y
66CONFIG_FRONTSWAP=y 66CONFIG_FRONTSWAP=y
67CONFIG_CMA=y 67CONFIG_CMA=y
68CONFIG_MEM_SOFT_DIRTY=y
68CONFIG_ZSWAP=y 69CONFIG_ZSWAP=y
69CONFIG_ZBUD=m 70CONFIG_ZBUD=m
70CONFIG_ZSMALLOC=m 71CONFIG_ZSMALLOC=m
@@ -136,8 +137,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
136CONFIG_NF_CONNTRACK_EVENTS=y 137CONFIG_NF_CONNTRACK_EVENTS=y
137CONFIG_NF_CONNTRACK_TIMEOUT=y 138CONFIG_NF_CONNTRACK_TIMEOUT=y
138CONFIG_NF_CONNTRACK_TIMESTAMP=y 139CONFIG_NF_CONNTRACK_TIMESTAMP=y
139CONFIG_NF_CT_PROTO_DCCP=m
140CONFIG_NF_CT_PROTO_UDPLITE=m
141CONFIG_NF_CONNTRACK_AMANDA=m 140CONFIG_NF_CONNTRACK_AMANDA=m
142CONFIG_NF_CONNTRACK_FTP=m 141CONFIG_NF_CONNTRACK_FTP=m
143CONFIG_NF_CONNTRACK_H323=m 142CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +153,12 @@ CONFIG_NF_TABLES=m
154CONFIG_NFT_EXTHDR=m 153CONFIG_NFT_EXTHDR=m
155CONFIG_NFT_META=m 154CONFIG_NFT_META=m
156CONFIG_NFT_CT=m 155CONFIG_NFT_CT=m
157CONFIG_NFT_RBTREE=m
158CONFIG_NFT_HASH=m
159CONFIG_NFT_COUNTER=m 156CONFIG_NFT_COUNTER=m
160CONFIG_NFT_LOG=m 157CONFIG_NFT_LOG=m
161CONFIG_NFT_LIMIT=m 158CONFIG_NFT_LIMIT=m
162CONFIG_NFT_NAT=m 159CONFIG_NFT_NAT=m
163CONFIG_NFT_COMPAT=m 160CONFIG_NFT_COMPAT=m
161CONFIG_NFT_HASH=m
164CONFIG_NETFILTER_XT_SET=m 162CONFIG_NETFILTER_XT_SET=m
165CONFIG_NETFILTER_XT_TARGET_AUDIT=m 163CONFIG_NETFILTER_XT_TARGET_AUDIT=m
166CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 164CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +212,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
214CONFIG_NETFILTER_XT_MATCH_RATEEST=m 212CONFIG_NETFILTER_XT_MATCH_RATEEST=m
215CONFIG_NETFILTER_XT_MATCH_REALM=m 213CONFIG_NETFILTER_XT_MATCH_REALM=m
216CONFIG_NETFILTER_XT_MATCH_RECENT=m 214CONFIG_NETFILTER_XT_MATCH_RECENT=m
217CONFIG_NETFILTER_XT_MATCH_SOCKET=m
218CONFIG_NETFILTER_XT_MATCH_STATE=m 215CONFIG_NETFILTER_XT_MATCH_STATE=m
219CONFIG_NETFILTER_XT_MATCH_STATISTIC=m 216CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
220CONFIG_NETFILTER_XT_MATCH_STRING=m 217CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +250,6 @@ CONFIG_IP_VS_NQ=m
253CONFIG_IP_VS_FTP=m 250CONFIG_IP_VS_FTP=m
254CONFIG_IP_VS_PE_SIP=m 251CONFIG_IP_VS_PE_SIP=m
255CONFIG_NF_CONNTRACK_IPV4=m 252CONFIG_NF_CONNTRACK_IPV4=m
256# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
257CONFIG_NF_TABLES_IPV4=m 253CONFIG_NF_TABLES_IPV4=m
258CONFIG_NFT_CHAIN_ROUTE_IPV4=m 254CONFIG_NFT_CHAIN_ROUTE_IPV4=m
259CONFIG_NF_TABLES_ARP=m 255CONFIG_NF_TABLES_ARP=m
@@ -430,7 +426,6 @@ CONFIG_EQUALIZER=m
430CONFIG_IFB=m 426CONFIG_IFB=m
431CONFIG_MACVLAN=m 427CONFIG_MACVLAN=m
432CONFIG_MACVTAP=m 428CONFIG_MACVTAP=m
433CONFIG_IPVLAN=m
434CONFIG_VXLAN=m 429CONFIG_VXLAN=m
435CONFIG_TUN=m 430CONFIG_TUN=m
436CONFIG_VETH=m 431CONFIG_VETH=m
@@ -474,6 +469,7 @@ CONFIG_VIRTIO_BALLOON=m
474CONFIG_EXT4_FS=y 469CONFIG_EXT4_FS=y
475CONFIG_EXT4_FS_POSIX_ACL=y 470CONFIG_EXT4_FS_POSIX_ACL=y
476CONFIG_EXT4_FS_SECURITY=y 471CONFIG_EXT4_FS_SECURITY=y
472CONFIG_EXT4_ENCRYPTION=y
477CONFIG_JBD2_DEBUG=y 473CONFIG_JBD2_DEBUG=y
478CONFIG_JFS_FS=m 474CONFIG_JFS_FS=m
479CONFIG_JFS_POSIX_ACL=y 475CONFIG_JFS_POSIX_ACL=y
@@ -496,6 +492,7 @@ CONFIG_AUTOFS4_FS=m
496CONFIG_FUSE_FS=y 492CONFIG_FUSE_FS=y
497CONFIG_CUSE=m 493CONFIG_CUSE=m
498CONFIG_OVERLAY_FS=m 494CONFIG_OVERLAY_FS=m
495CONFIG_OVERLAY_FS_REDIRECT_DIR=y
499CONFIG_FSCACHE=m 496CONFIG_FSCACHE=m
500CONFIG_CACHEFILES=m 497CONFIG_CACHEFILES=m
501CONFIG_ISO9660_FS=y 498CONFIG_ISO9660_FS=y
@@ -563,12 +560,16 @@ CONFIG_STACK_TRACER=y
563CONFIG_BLK_DEV_IO_TRACE=y 560CONFIG_BLK_DEV_IO_TRACE=y
564CONFIG_UPROBE_EVENT=y 561CONFIG_UPROBE_EVENT=y
565CONFIG_FUNCTION_PROFILER=y 562CONFIG_FUNCTION_PROFILER=y
563CONFIG_HIST_TRIGGERS=y
566CONFIG_TRACE_ENUM_MAP_FILE=y 564CONFIG_TRACE_ENUM_MAP_FILE=y
567CONFIG_LKDTM=m 565CONFIG_LKDTM=m
568CONFIG_PERCPU_TEST=m 566CONFIG_PERCPU_TEST=m
569CONFIG_ATOMIC64_SELFTEST=y 567CONFIG_ATOMIC64_SELFTEST=y
570CONFIG_TEST_BPF=m 568CONFIG_TEST_BPF=m
569CONFIG_BUG_ON_DATA_CORRUPTION=y
571CONFIG_S390_PTDUMP=y 570CONFIG_S390_PTDUMP=y
571CONFIG_PERSISTENT_KEYRINGS=y
572CONFIG_BIG_KEYS=y
572CONFIG_ENCRYPTED_KEYS=m 573CONFIG_ENCRYPTED_KEYS=m
573CONFIG_SECURITY=y 574CONFIG_SECURITY=y
574CONFIG_SECURITY_NETWORK=y 575CONFIG_SECURITY_NETWORK=y
@@ -576,18 +577,25 @@ CONFIG_SECURITY_SELINUX=y
576CONFIG_SECURITY_SELINUX_BOOTPARAM=y 577CONFIG_SECURITY_SELINUX_BOOTPARAM=y
577CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 578CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
578CONFIG_SECURITY_SELINUX_DISABLE=y 579CONFIG_SECURITY_SELINUX_DISABLE=y
580CONFIG_INTEGRITY_SIGNATURE=y
581CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
579CONFIG_IMA=y 582CONFIG_IMA=y
583CONFIG_IMA_WRITE_POLICY=y
580CONFIG_IMA_APPRAISE=y 584CONFIG_IMA_APPRAISE=y
585CONFIG_CRYPTO_DH=m
586CONFIG_CRYPTO_ECDH=m
581CONFIG_CRYPTO_USER=m 587CONFIG_CRYPTO_USER=m
582# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set 588# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
589CONFIG_CRYPTO_PCRYPT=m
583CONFIG_CRYPTO_CRYPTD=m 590CONFIG_CRYPTO_CRYPTD=m
591CONFIG_CRYPTO_MCRYPTD=m
584CONFIG_CRYPTO_TEST=m 592CONFIG_CRYPTO_TEST=m
585CONFIG_CRYPTO_CCM=m 593CONFIG_CRYPTO_CCM=m
586CONFIG_CRYPTO_GCM=m 594CONFIG_CRYPTO_GCM=m
587CONFIG_CRYPTO_CTS=m 595CONFIG_CRYPTO_CHACHA20POLY1305=m
588CONFIG_CRYPTO_LRW=m 596CONFIG_CRYPTO_LRW=m
589CONFIG_CRYPTO_PCBC=m 597CONFIG_CRYPTO_PCBC=m
590CONFIG_CRYPTO_XTS=m 598CONFIG_CRYPTO_KEYWRAP=m
591CONFIG_CRYPTO_XCBC=m 599CONFIG_CRYPTO_XCBC=m
592CONFIG_CRYPTO_VMAC=m 600CONFIG_CRYPTO_VMAC=m
593CONFIG_CRYPTO_CRC32=m 601CONFIG_CRYPTO_CRC32=m
@@ -597,6 +605,7 @@ CONFIG_CRYPTO_RMD160=m
597CONFIG_CRYPTO_RMD256=m 605CONFIG_CRYPTO_RMD256=m
598CONFIG_CRYPTO_RMD320=m 606CONFIG_CRYPTO_RMD320=m
599CONFIG_CRYPTO_SHA512=m 607CONFIG_CRYPTO_SHA512=m
608CONFIG_CRYPTO_SHA3=m
600CONFIG_CRYPTO_TGR192=m 609CONFIG_CRYPTO_TGR192=m
601CONFIG_CRYPTO_WP512=m 610CONFIG_CRYPTO_WP512=m
602CONFIG_CRYPTO_ANUBIS=m 611CONFIG_CRYPTO_ANUBIS=m
@@ -611,10 +620,13 @@ CONFIG_CRYPTO_SEED=m
611CONFIG_CRYPTO_SERPENT=m 620CONFIG_CRYPTO_SERPENT=m
612CONFIG_CRYPTO_TEA=m 621CONFIG_CRYPTO_TEA=m
613CONFIG_CRYPTO_TWOFISH=m 622CONFIG_CRYPTO_TWOFISH=m
623CONFIG_CRYPTO_842=m
614CONFIG_CRYPTO_LZ4=m 624CONFIG_CRYPTO_LZ4=m
615CONFIG_CRYPTO_LZ4HC=m 625CONFIG_CRYPTO_LZ4HC=m
616CONFIG_CRYPTO_USER_API_HASH=m 626CONFIG_CRYPTO_USER_API_HASH=m
617CONFIG_CRYPTO_USER_API_SKCIPHER=m 627CONFIG_CRYPTO_USER_API_SKCIPHER=m
628CONFIG_CRYPTO_USER_API_RNG=m
629CONFIG_CRYPTO_USER_API_AEAD=m
618CONFIG_ZCRYPT=m 630CONFIG_ZCRYPT=m
619CONFIG_CRYPTO_SHA1_S390=m 631CONFIG_CRYPTO_SHA1_S390=m
620CONFIG_CRYPTO_SHA256_S390=m 632CONFIG_CRYPTO_SHA256_S390=m
@@ -623,9 +635,6 @@ CONFIG_CRYPTO_DES_S390=m
623CONFIG_CRYPTO_AES_S390=m 635CONFIG_CRYPTO_AES_S390=m
624CONFIG_CRYPTO_GHASH_S390=m 636CONFIG_CRYPTO_GHASH_S390=m
625CONFIG_CRYPTO_CRC32_S390=y 637CONFIG_CRYPTO_CRC32_S390=y
626CONFIG_ASYMMETRIC_KEY_TYPE=y
627CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
628CONFIG_X509_CERTIFICATE_PARSER=m
629CONFIG_CRC7=m 638CONFIG_CRC7=m
630CONFIG_CRC8=m 639CONFIG_CRC8=m
631CONFIG_CORDIC=m 640CONFIG_CORDIC=m
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 2d40ef0a6295..d00e368fb5e6 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -38,7 +38,6 @@ CONFIG_JUMP_LABEL=y
38CONFIG_STATIC_KEYS_SELFTEST=y 38CONFIG_STATIC_KEYS_SELFTEST=y
39CONFIG_MODULES=y 39CONFIG_MODULES=y
40CONFIG_MODULE_UNLOAD=y 40CONFIG_MODULE_UNLOAD=y
41CONFIG_MODVERSIONS=y
42CONFIG_BLK_DEV_INTEGRITY=y 41CONFIG_BLK_DEV_INTEGRITY=y
43CONFIG_PARTITION_ADVANCED=y 42CONFIG_PARTITION_ADVANCED=y
44CONFIG_IBM_PARTITION=y 43CONFIG_IBM_PARTITION=y
@@ -130,8 +129,11 @@ CONFIG_DUMMY=m
130CONFIG_EQUALIZER=m 129CONFIG_EQUALIZER=m
131CONFIG_TUN=m 130CONFIG_TUN=m
132CONFIG_VIRTIO_NET=y 131CONFIG_VIRTIO_NET=y
132# CONFIG_NET_VENDOR_ALACRITECH is not set
133# CONFIG_NET_VENDOR_SOLARFLARE is not set
133# CONFIG_INPUT is not set 134# CONFIG_INPUT is not set
134# CONFIG_SERIO is not set 135# CONFIG_SERIO is not set
136CONFIG_DEVKMEM=y
135CONFIG_RAW_DRIVER=m 137CONFIG_RAW_DRIVER=m
136CONFIG_VIRTIO_BALLOON=y 138CONFIG_VIRTIO_BALLOON=y
137CONFIG_EXT4_FS=y 139CONFIG_EXT4_FS=y
@@ -183,7 +185,6 @@ CONFIG_TRACE_ENUM_MAP_FILE=y
183CONFIG_KPROBES_SANITY_TEST=y 185CONFIG_KPROBES_SANITY_TEST=y
184CONFIG_S390_PTDUMP=y 186CONFIG_S390_PTDUMP=y
185CONFIG_CRYPTO_CRYPTD=m 187CONFIG_CRYPTO_CRYPTD=m
186CONFIG_CRYPTO_AUTHENC=m
187CONFIG_CRYPTO_TEST=m 188CONFIG_CRYPTO_TEST=m
188CONFIG_CRYPTO_CCM=m 189CONFIG_CRYPTO_CCM=m
189CONFIG_CRYPTO_GCM=m 190CONFIG_CRYPTO_GCM=m
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index d7697ab802f6..8e136b88cdf4 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -15,7 +15,9 @@
15 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ 15 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
16 asm volatile( \ 16 asm volatile( \
17 " lctlg %1,%2,%0\n" \ 17 " lctlg %1,%2,%0\n" \
18 : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\ 18 : \
19 : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
20 : "memory"); \
19} 21}
20 22
21#define __ctl_store(array, low, high) { \ 23#define __ctl_store(array, low, high) { \
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 05612a2529c8..496e60391fac 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -1010,7 +1010,7 @@ static __init int amd_ibs_init(void)
1010 * all online cpus. 1010 * all online cpus.
1011 */ 1011 */
1012 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING, 1012 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
1013 "perf/x86/amd/ibs:STARTING", 1013 "perf/x86/amd/ibs:starting",
1014 x86_pmu_amd_ibs_starting_cpu, 1014 x86_pmu_amd_ibs_starting_cpu,
1015 x86_pmu_amd_ibs_dying_cpu); 1015 x86_pmu_amd_ibs_dying_cpu);
1016 1016
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index d611cab214a6..eb1484c86bb4 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3176,13 +3176,16 @@ static void intel_pmu_cpu_starting(int cpu)
3176 3176
3177 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 3177 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3178 for_each_cpu(i, topology_sibling_cpumask(cpu)) { 3178 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3179 struct cpu_hw_events *sibling;
3179 struct intel_excl_cntrs *c; 3180 struct intel_excl_cntrs *c;
3180 3181
3181 c = per_cpu(cpu_hw_events, i).excl_cntrs; 3182 sibling = &per_cpu(cpu_hw_events, i);
3183 c = sibling->excl_cntrs;
3182 if (c && c->core_id == core_id) { 3184 if (c && c->core_id == core_id) {
3183 cpuc->kfree_on_online[1] = cpuc->excl_cntrs; 3185 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
3184 cpuc->excl_cntrs = c; 3186 cpuc->excl_cntrs = c;
3185 cpuc->excl_thread_id = 1; 3187 if (!sibling->excl_thread_id)
3188 cpuc->excl_thread_id = 1;
3186 break; 3189 break;
3187 } 3190 }
3188 } 3191 }
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 945e512a112a..1e35dd06b090 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1875,6 +1875,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
1875 .irq_ack = irq_chip_ack_parent, 1875 .irq_ack = irq_chip_ack_parent,
1876 .irq_eoi = ioapic_ack_level, 1876 .irq_eoi = ioapic_ack_level,
1877 .irq_set_affinity = ioapic_set_affinity, 1877 .irq_set_affinity = ioapic_set_affinity,
1878 .irq_retrigger = irq_chip_retrigger_hierarchy,
1878 .flags = IRQCHIP_SKIP_SET_WAKE, 1879 .flags = IRQCHIP_SKIP_SET_WAKE,
1879}; 1880};
1880 1881
@@ -1886,6 +1887,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
1886 .irq_ack = irq_chip_ack_parent, 1887 .irq_ack = irq_chip_ack_parent,
1887 .irq_eoi = ioapic_ir_ack_level, 1888 .irq_eoi = ioapic_ir_ack_level,
1888 .irq_set_affinity = ioapic_set_affinity, 1889 .irq_set_affinity = ioapic_set_affinity,
1890 .irq_retrigger = irq_chip_retrigger_hierarchy,
1889 .flags = IRQCHIP_SKIP_SET_WAKE, 1891 .flags = IRQCHIP_SKIP_SET_WAKE,
1890}; 1892};
1891 1893
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 96dd7dd13ee6..500008f800dc 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6263,7 +6263,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
6263 6263
6264 kvm_x86_ops->patch_hypercall(vcpu, instruction); 6264 kvm_x86_ops->patch_hypercall(vcpu, instruction);
6265 6265
6266 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); 6266 return emulator_write_emulated(ctxt, rip, instruction, 3,
6267 &ctxt->exception);
6267} 6268}
6268 6269
6269static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) 6270static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 3cd69832d7f4..3961103e9176 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -114,6 +114,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
114 DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"), 114 DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
115 }, 115 },
116 }, 116 },
117 /* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */
118 {
119 .callback = set_nouse_crs,
120 .ident = "Supermicro X8DTH",
121 .matches = {
122 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
123 DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"),
124 DMI_MATCH(DMI_BIOS_VERSION, "2.0a"),
125 },
126 },
117 127
118 /* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */ 128 /* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */
119 { 129 {
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a8e67a155d04..c3400b5444a7 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -912,7 +912,6 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
912static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx) 912static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
913{ 913{
914 LIST_HEAD(rq_list); 914 LIST_HEAD(rq_list);
915 LIST_HEAD(driver_list);
916 915
917 if (unlikely(blk_mq_hctx_stopped(hctx))) 916 if (unlikely(blk_mq_hctx_stopped(hctx)))
918 return; 917 return;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 50a2020b5b72..9fd06eeb1a17 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -271,7 +271,7 @@ static inline int sock_send_bvec(struct nbd_device *nbd, int index,
271static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) 271static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
272{ 272{
273 struct request *req = blk_mq_rq_from_pdu(cmd); 273 struct request *req = blk_mq_rq_from_pdu(cmd);
274 int result, flags; 274 int result;
275 struct nbd_request request; 275 struct nbd_request request;
276 unsigned long size = blk_rq_bytes(req); 276 unsigned long size = blk_rq_bytes(req);
277 struct bio *bio; 277 struct bio *bio;
@@ -310,7 +310,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
310 if (type != NBD_CMD_WRITE) 310 if (type != NBD_CMD_WRITE)
311 return 0; 311 return 0;
312 312
313 flags = 0;
314 bio = req->bio; 313 bio = req->bio;
315 while (bio) { 314 while (bio) {
316 struct bio *next = bio->bi_next; 315 struct bio *next = bio->bi_next;
@@ -319,9 +318,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
319 318
320 bio_for_each_segment(bvec, bio, iter) { 319 bio_for_each_segment(bvec, bio, iter) {
321 bool is_last = !next && bio_iter_last(bvec, iter); 320 bool is_last = !next && bio_iter_last(bvec, iter);
321 int flags = is_last ? 0 : MSG_MORE;
322 322
323 if (is_last)
324 flags = MSG_MORE;
325 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", 323 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
326 cmd, bvec.bv_len); 324 cmd, bvec.bv_len);
327 result = sock_send_bvec(nbd, index, &bvec, flags); 325 result = sock_send_bvec(nbd, index, &bvec, flags);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 8b00e79c2683..17857beb4892 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1862,7 +1862,7 @@ static void config_work_handler(struct work_struct *work)
1862{ 1862{
1863 struct ports_device *portdev; 1863 struct ports_device *portdev;
1864 1864
1865 portdev = container_of(work, struct ports_device, control_work); 1865 portdev = container_of(work, struct ports_device, config_work);
1866 if (!use_multiport(portdev)) { 1866 if (!use_multiport(portdev)) {
1867 struct virtio_device *vdev; 1867 struct virtio_device *vdev;
1868 struct port *port; 1868 struct port *port;
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 8c8b495cbf0d..cdc092a1d9ef 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -586,7 +586,7 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
586 GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam", 586 GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
587 GATE_BUS_TOP, 24, 0, 0), 587 GATE_BUS_TOP, 24, 0, 0),
588 GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", 588 GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
589 GATE_BUS_TOP, 27, 0, 0), 589 GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
590}; 590};
591 591
592static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = { 592static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
@@ -956,20 +956,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
956 GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0), 956 GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0),
957 957
958 GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys", 958 GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys",
959 GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0), 959 GATE_BUS_FSYS0, 9, CLK_IS_CRITICAL, 0),
960 GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2", 960 GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2",
961 GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0), 961 GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0),
962 962
963 GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d", 963 GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d",
964 GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0), 964 GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0),
965 GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d", 965 GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d",
966 GATE_BUS_TOP, 1, CLK_IGNORE_UNUSED, 0), 966 GATE_BUS_TOP, 1, CLK_IS_CRITICAL, 0),
967 GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg", 967 GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg",
968 GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0), 968 GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0),
969 GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0", 969 GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0",
970 GATE_BUS_TOP, 5, 0, 0), 970 GATE_BUS_TOP, 5, 0, 0),
971 GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl", 971 GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl",
972 GATE_BUS_TOP, 6, CLK_IGNORE_UNUSED, 0), 972 GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0),
973 GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl", 973 GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl",
974 GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0), 974 GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0),
975 GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp", 975 GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp",
@@ -983,20 +983,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
983 GATE(0, "aclk166", "mout_user_aclk166", 983 GATE(0, "aclk166", "mout_user_aclk166",
984 GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0), 984 GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0),
985 GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333", 985 GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333",
986 GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0), 986 GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0),
987 GATE(0, "aclk400_isp", "mout_user_aclk400_isp", 987 GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
988 GATE_BUS_TOP, 16, 0, 0), 988 GATE_BUS_TOP, 16, 0, 0),
989 GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl", 989 GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
990 GATE_BUS_TOP, 17, 0, 0), 990 GATE_BUS_TOP, 17, 0, 0),
991 GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1", 991 GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
992 GATE_BUS_TOP, 18, 0, 0), 992 GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0),
993 GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24", 993 GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24",
994 GATE_BUS_TOP, 28, 0, 0), 994 GATE_BUS_TOP, 28, 0, 0),
995 GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m", 995 GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m",
996 GATE_BUS_TOP, 29, 0, 0), 996 GATE_BUS_TOP, 29, 0, 0),
997 997
998 GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1", 998 GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1",
999 SRC_MASK_TOP2, 24, 0, 0), 999 SRC_MASK_TOP2, 24, CLK_IS_CRITICAL, 0),
1000 1000
1001 GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", 1001 GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
1002 SRC_MASK_TOP7, 20, 0, 0), 1002 SRC_MASK_TOP7, 20, 0, 0),
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 4da1dc2278bd..670ff0f25b67 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -495,6 +495,7 @@ static int exynos4_mct_dying_cpu(unsigned int cpu)
495 if (mct_int_type == MCT_INT_SPI) { 495 if (mct_int_type == MCT_INT_SPI) {
496 if (evt->irq != -1) 496 if (evt->irq != -1)
497 disable_irq_nosync(evt->irq); 497 disable_irq_nosync(evt->irq);
498 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
498 } else { 499 } else {
499 disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); 500 disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
500 } 501 }
diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
index 717704e9ae07..c0303f61c26a 100644
--- a/drivers/hid/hid-corsair.c
+++ b/drivers/hid/hid-corsair.c
@@ -148,26 +148,36 @@ static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev)
148 struct usb_interface *usbif = to_usb_interface(dev->parent); 148 struct usb_interface *usbif = to_usb_interface(dev->parent);
149 struct usb_device *usbdev = interface_to_usbdev(usbif); 149 struct usb_device *usbdev = interface_to_usbdev(usbif);
150 int brightness; 150 int brightness;
151 char data[8]; 151 char *data;
152
153 data = kmalloc(8, GFP_KERNEL);
154 if (!data)
155 return -ENOMEM;
152 156
153 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 157 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
154 K90_REQUEST_STATUS, 158 K90_REQUEST_STATUS,
155 USB_DIR_IN | USB_TYPE_VENDOR | 159 USB_DIR_IN | USB_TYPE_VENDOR |
156 USB_RECIP_DEVICE, 0, 0, data, 8, 160 USB_RECIP_DEVICE, 0, 0, data, 8,
157 USB_CTRL_SET_TIMEOUT); 161 USB_CTRL_SET_TIMEOUT);
158 if (ret < 0) { 162 if (ret < 5) {
159 dev_warn(dev, "Failed to get K90 initial state (error %d).\n", 163 dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
160 ret); 164 ret);
161 return -EIO; 165 ret = -EIO;
166 goto out;
162 } 167 }
163 brightness = data[4]; 168 brightness = data[4];
164 if (brightness < 0 || brightness > 3) { 169 if (brightness < 0 || brightness > 3) {
165 dev_warn(dev, 170 dev_warn(dev,
166 "Read invalid backlight brightness: %02hhx.\n", 171 "Read invalid backlight brightness: %02hhx.\n",
167 data[4]); 172 data[4]);
168 return -EIO; 173 ret = -EIO;
174 goto out;
169 } 175 }
170 return brightness; 176 ret = brightness;
177out:
178 kfree(data);
179
180 return ret;
171} 181}
172 182
173static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev) 183static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev)
@@ -253,17 +263,22 @@ static ssize_t k90_show_macro_mode(struct device *dev,
253 struct usb_interface *usbif = to_usb_interface(dev->parent); 263 struct usb_interface *usbif = to_usb_interface(dev->parent);
254 struct usb_device *usbdev = interface_to_usbdev(usbif); 264 struct usb_device *usbdev = interface_to_usbdev(usbif);
255 const char *macro_mode; 265 const char *macro_mode;
256 char data[8]; 266 char *data;
267
268 data = kmalloc(2, GFP_KERNEL);
269 if (!data)
270 return -ENOMEM;
257 271
258 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 272 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
259 K90_REQUEST_GET_MODE, 273 K90_REQUEST_GET_MODE,
260 USB_DIR_IN | USB_TYPE_VENDOR | 274 USB_DIR_IN | USB_TYPE_VENDOR |
261 USB_RECIP_DEVICE, 0, 0, data, 2, 275 USB_RECIP_DEVICE, 0, 0, data, 2,
262 USB_CTRL_SET_TIMEOUT); 276 USB_CTRL_SET_TIMEOUT);
263 if (ret < 0) { 277 if (ret < 1) {
264 dev_warn(dev, "Failed to get K90 initial mode (error %d).\n", 278 dev_warn(dev, "Failed to get K90 initial mode (error %d).\n",
265 ret); 279 ret);
266 return -EIO; 280 ret = -EIO;
281 goto out;
267 } 282 }
268 283
269 switch (data[0]) { 284 switch (data[0]) {
@@ -277,10 +292,15 @@ static ssize_t k90_show_macro_mode(struct device *dev,
277 default: 292 default:
278 dev_warn(dev, "K90 in unknown mode: %02hhx.\n", 293 dev_warn(dev, "K90 in unknown mode: %02hhx.\n",
279 data[0]); 294 data[0]);
280 return -EIO; 295 ret = -EIO;
296 goto out;
281 } 297 }
282 298
283 return snprintf(buf, PAGE_SIZE, "%s\n", macro_mode); 299 ret = snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
300out:
301 kfree(data);
302
303 return ret;
284} 304}
285 305
286static ssize_t k90_store_macro_mode(struct device *dev, 306static ssize_t k90_store_macro_mode(struct device *dev,
@@ -320,26 +340,36 @@ static ssize_t k90_show_current_profile(struct device *dev,
320 struct usb_interface *usbif = to_usb_interface(dev->parent); 340 struct usb_interface *usbif = to_usb_interface(dev->parent);
321 struct usb_device *usbdev = interface_to_usbdev(usbif); 341 struct usb_device *usbdev = interface_to_usbdev(usbif);
322 int current_profile; 342 int current_profile;
323 char data[8]; 343 char *data;
344
345 data = kmalloc(8, GFP_KERNEL);
346 if (!data)
347 return -ENOMEM;
324 348
325 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 349 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
326 K90_REQUEST_STATUS, 350 K90_REQUEST_STATUS,
327 USB_DIR_IN | USB_TYPE_VENDOR | 351 USB_DIR_IN | USB_TYPE_VENDOR |
328 USB_RECIP_DEVICE, 0, 0, data, 8, 352 USB_RECIP_DEVICE, 0, 0, data, 8,
329 USB_CTRL_SET_TIMEOUT); 353 USB_CTRL_SET_TIMEOUT);
330 if (ret < 0) { 354 if (ret < 8) {
331 dev_warn(dev, "Failed to get K90 initial state (error %d).\n", 355 dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
332 ret); 356 ret);
333 return -EIO; 357 ret = -EIO;
358 goto out;
334 } 359 }
335 current_profile = data[7]; 360 current_profile = data[7];
336 if (current_profile < 1 || current_profile > 3) { 361 if (current_profile < 1 || current_profile > 3) {
337 dev_warn(dev, "Read invalid current profile: %02hhx.\n", 362 dev_warn(dev, "Read invalid current profile: %02hhx.\n",
338 data[7]); 363 data[7]);
339 return -EIO; 364 ret = -EIO;
365 goto out;
340 } 366 }
341 367
342 return snprintf(buf, PAGE_SIZE, "%d\n", current_profile); 368 ret = snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
369out:
370 kfree(data);
371
372 return ret;
343} 373}
344 374
345static ssize_t k90_store_current_profile(struct device *dev, 375static ssize_t k90_store_current_profile(struct device *dev,
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index b9779bcbd140..8aeca038cc73 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -740,6 +740,11 @@ static int wacom_add_shared_data(struct hid_device *hdev)
740 return retval; 740 return retval;
741 } 741 }
742 742
743 if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
744 wacom_wac->shared->touch = hdev;
745 else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
746 wacom_wac->shared->pen = hdev;
747
743out: 748out:
744 mutex_unlock(&wacom_udev_list_lock); 749 mutex_unlock(&wacom_udev_list_lock);
745 return retval; 750 return retval;
@@ -2036,10 +2041,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
2036 if (error) 2041 if (error)
2037 goto fail; 2042 goto fail;
2038 2043
2039 error = wacom_add_shared_data(hdev);
2040 if (error)
2041 goto fail;
2042
2043 /* 2044 /*
2044 * Bamboo Pad has a generic hid handling for the Pen, and we switch it 2045 * Bamboo Pad has a generic hid handling for the Pen, and we switch it
2045 * into debug mode for the touch part. 2046 * into debug mode for the touch part.
@@ -2080,10 +2081,9 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
2080 2081
2081 wacom_update_name(wacom, wireless ? " (WL)" : ""); 2082 wacom_update_name(wacom, wireless ? " (WL)" : "");
2082 2083
2083 if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH) 2084 error = wacom_add_shared_data(hdev);
2084 wacom_wac->shared->touch = hdev; 2085 if (error)
2085 else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN) 2086 goto fail;
2086 wacom_wac->shared->pen = hdev;
2087 2087
2088 if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) && 2088 if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) &&
2089 (features->quirks & WACOM_QUIRK_BATTERY)) { 2089 (features->quirks & WACOM_QUIRK_BATTERY)) {
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index b1a9a3ca6d56..0884dc9554fd 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2187,6 +2187,16 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
2187 2187
2188 wacom_report_events(hdev, report); 2188 wacom_report_events(hdev, report);
2189 2189
2190 /*
2191 * Non-input reports may be sent prior to the device being
2192 * completely initialized. Since only their events need
2193 * to be processed, exit after 'wacom_report_events' has
2194 * been called to prevent potential crashes in the report-
2195 * processing functions.
2196 */
2197 if (report->type != HID_INPUT_REPORT)
2198 return;
2199
2190 if (WACOM_PAD_FIELD(field)) { 2200 if (WACOM_PAD_FIELD(field)) {
2191 wacom_wac_pad_battery_report(hdev, report); 2201 wacom_wac_pad_battery_report(hdev, report);
2192 if (wacom->wacom_wac.pad_input) 2202 if (wacom->wacom_wac.pad_input)
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index b11c3455b040..e6ea8503f40c 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -506,9 +506,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
506 } 506 }
507 } while (busy); 507 } while (busy);
508 508
509 if (host->ops->card_busy && send_status)
510 return mmc_switch_status(card);
511
512 return 0; 509 return 0;
513} 510}
514 511
@@ -577,24 +574,26 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
577 if (!use_busy_signal) 574 if (!use_busy_signal)
578 goto out; 575 goto out;
579 576
580 /* Switch to new timing before poll and check switch status. */
581 if (timing)
582 mmc_set_timing(host, timing);
583
584 /*If SPI or used HW busy detection above, then we don't need to poll. */ 577 /*If SPI or used HW busy detection above, then we don't need to poll. */
585 if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) || 578 if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
586 mmc_host_is_spi(host)) { 579 mmc_host_is_spi(host))
587 if (send_status)
588 err = mmc_switch_status(card);
589 goto out_tim; 580 goto out_tim;
590 }
591 581
592 /* Let's try to poll to find out when the command is completed. */ 582 /* Let's try to poll to find out when the command is completed. */
593 err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err); 583 err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
584 if (err)
585 goto out;
594 586
595out_tim: 587out_tim:
596 if (err && timing) 588 /* Switch to new timing before check switch status. */
597 mmc_set_timing(host, old_timing); 589 if (timing)
590 mmc_set_timing(host, timing);
591
592 if (send_status) {
593 err = mmc_switch_status(card);
594 if (err && timing)
595 mmc_set_timing(host, old_timing);
596 }
598out: 597out:
599 mmc_retune_release(host); 598 mmc_retune_release(host);
600 599
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index b352760c041e..09739352834c 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -578,13 +578,15 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
578{ 578{
579 struct meson_host *host = dev_id; 579 struct meson_host *host = dev_id;
580 struct mmc_request *mrq; 580 struct mmc_request *mrq;
581 struct mmc_command *cmd = host->cmd; 581 struct mmc_command *cmd;
582 u32 irq_en, status, raw_status; 582 u32 irq_en, status, raw_status;
583 irqreturn_t ret = IRQ_HANDLED; 583 irqreturn_t ret = IRQ_HANDLED;
584 584
585 if (WARN_ON(!host)) 585 if (WARN_ON(!host))
586 return IRQ_NONE; 586 return IRQ_NONE;
587 587
588 cmd = host->cmd;
589
588 mrq = host->mrq; 590 mrq = host->mrq;
589 591
590 if (WARN_ON(!mrq)) 592 if (WARN_ON(!mrq))
@@ -670,10 +672,10 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
670 int ret = IRQ_HANDLED; 672 int ret = IRQ_HANDLED;
671 673
672 if (WARN_ON(!mrq)) 674 if (WARN_ON(!mrq))
673 ret = IRQ_NONE; 675 return IRQ_NONE;
674 676
675 if (WARN_ON(!cmd)) 677 if (WARN_ON(!cmd))
676 ret = IRQ_NONE; 678 return IRQ_NONE;
677 679
678 data = cmd->data; 680 data = cmd->data;
679 if (data) { 681 if (data) {
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 44ecebd1ea8c..c8b8ac66ff7e 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -309,6 +309,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
309 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD); 309 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
310 cmd1 = cmd->arg; 310 cmd1 = cmd->arg;
311 311
312 if (cmd->opcode == MMC_STOP_TRANSMISSION)
313 cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
314
312 if (host->sdio_irq_en) { 315 if (host->sdio_irq_en) {
313 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; 316 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
314 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; 317 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
@@ -417,8 +420,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
417 ssp->base + HW_SSP_BLOCK_SIZE); 420 ssp->base + HW_SSP_BLOCK_SIZE);
418 } 421 }
419 422
420 if ((cmd->opcode == MMC_STOP_TRANSMISSION) || 423 if (cmd->opcode == SD_IO_RW_EXTENDED)
421 (cmd->opcode == SD_IO_RW_EXTENDED))
422 cmd0 |= BM_SSP_CMD0_APPEND_8CYC; 424 cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
423 425
424 cmd1 = cmd->arg; 426 cmd1 = cmd->arg;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 160f695cc09c..278a5a435ab7 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -395,7 +395,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
395 /* Power on the SDHCI controller and its children */ 395 /* Power on the SDHCI controller and its children */
396 acpi_device_fix_up_power(device); 396 acpi_device_fix_up_power(device);
397 list_for_each_entry(child, &device->children, node) 397 list_for_each_entry(child, &device->children, node)
398 acpi_device_fix_up_power(child); 398 if (child->status.present && child->status.enabled)
399 acpi_device_fix_up_power(child);
399 400
400 if (acpi_bus_get_status(device) || !device->status.present) 401 if (acpi_bus_get_status(device) || !device->status.present)
401 return -ENODEV; 402 return -ENODEV;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 353a9ddf6b97..9ce5dcb4abd0 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -426,6 +426,7 @@ config MTD_NAND_ORION
426 426
427config MTD_NAND_OXNAS 427config MTD_NAND_OXNAS
428 tristate "NAND Flash support for Oxford Semiconductor SoC" 428 tristate "NAND Flash support for Oxford Semiconductor SoC"
429 depends on HAS_IOMEM
429 help 430 help
430 This enables the NAND flash controller on Oxford Semiconductor SoCs. 431 This enables the NAND flash controller on Oxford Semiconductor SoCs.
431 432
@@ -540,7 +541,7 @@ config MTD_NAND_FSMC
540 Flexible Static Memory Controller (FSMC) 541 Flexible Static Memory Controller (FSMC)
541 542
542config MTD_NAND_XWAY 543config MTD_NAND_XWAY
543 tristate "Support for NAND on Lantiq XWAY SoC" 544 bool "Support for NAND on Lantiq XWAY SoC"
544 depends on LANTIQ && SOC_TYPE_XWAY 545 depends on LANTIQ && SOC_TYPE_XWAY
545 help 546 help
546 Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached 547 Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 5553a5d9efd1..846a66c1b133 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -775,7 +775,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
775 init_completion(&host->comp_controller); 775 init_completion(&host->comp_controller);
776 776
777 host->irq = platform_get_irq(pdev, 0); 777 host->irq = platform_get_irq(pdev, 0);
778 if ((host->irq < 0) || (host->irq >= NR_IRQS)) { 778 if (host->irq < 0) {
779 dev_err(&pdev->dev, "failed to get platform irq\n"); 779 dev_err(&pdev->dev, "failed to get platform irq\n");
780 res = -EINVAL; 780 res = -EINVAL;
781 goto err_exit3; 781 goto err_exit3;
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
index 28c7f474be77..4a5e948c62df 100644
--- a/drivers/mtd/nand/tango_nand.c
+++ b/drivers/mtd/nand/tango_nand.c
@@ -632,11 +632,13 @@ static int tango_nand_probe(struct platform_device *pdev)
632 if (IS_ERR(nfc->pbus_base)) 632 if (IS_ERR(nfc->pbus_base))
633 return PTR_ERR(nfc->pbus_base); 633 return PTR_ERR(nfc->pbus_base);
634 634
635 writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
636
635 clk = clk_get(&pdev->dev, NULL); 637 clk = clk_get(&pdev->dev, NULL);
636 if (IS_ERR(clk)) 638 if (IS_ERR(clk))
637 return PTR_ERR(clk); 639 return PTR_ERR(clk);
638 640
639 nfc->chan = dma_request_chan(&pdev->dev, "nfc_sbox"); 641 nfc->chan = dma_request_chan(&pdev->dev, "rxtx");
640 if (IS_ERR(nfc->chan)) 642 if (IS_ERR(nfc->chan))
641 return PTR_ERR(nfc->chan); 643 return PTR_ERR(nfc->chan);
642 644
diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c
index 1f2948c0c458..895101a5e686 100644
--- a/drivers/mtd/nand/xway_nand.c
+++ b/drivers/mtd/nand/xway_nand.c
@@ -232,7 +232,6 @@ static const struct of_device_id xway_nand_match[] = {
232 { .compatible = "lantiq,nand-xway" }, 232 { .compatible = "lantiq,nand-xway" },
233 {}, 233 {},
234}; 234};
235MODULE_DEVICE_TABLE(of, xway_nand_match);
236 235
237static struct platform_driver xway_nand_driver = { 236static struct platform_driver xway_nand_driver = {
238 .probe = xway_nand_probe, 237 .probe = xway_nand_probe,
@@ -243,6 +242,4 @@ static struct platform_driver xway_nand_driver = {
243 }, 242 },
244}; 243};
245 244
246module_platform_driver(xway_nand_driver); 245builtin_platform_driver(xway_nand_driver);
247
248MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 7e8cf213fd81..744ed6ddaf37 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -710,11 +710,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
710 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; 710 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
711 unsigned int pkts_compl = 0, bytes_compl = 0; 711 unsigned int pkts_compl = 0, bytes_compl = 0;
712 struct bcm_sysport_cb *cb; 712 struct bcm_sysport_cb *cb;
713 struct netdev_queue *txq;
714 u32 hw_ind; 713 u32 hw_ind;
715 714
716 txq = netdev_get_tx_queue(ndev, ring->index);
717
718 /* Compute how many descriptors have been processed since last call */ 715 /* Compute how many descriptors have been processed since last call */
719 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); 716 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
720 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; 717 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
@@ -745,9 +742,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
745 742
746 ring->c_index = c_index; 743 ring->c_index = c_index;
747 744
748 if (netif_tx_queue_stopped(txq) && pkts_compl)
749 netif_tx_wake_queue(txq);
750
751 netif_dbg(priv, tx_done, ndev, 745 netif_dbg(priv, tx_done, ndev,
752 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", 746 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
753 ring->index, ring->c_index, pkts_compl, bytes_compl); 747 ring->index, ring->c_index, pkts_compl, bytes_compl);
@@ -759,16 +753,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
759static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 753static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
760 struct bcm_sysport_tx_ring *ring) 754 struct bcm_sysport_tx_ring *ring)
761{ 755{
756 struct netdev_queue *txq;
762 unsigned int released; 757 unsigned int released;
763 unsigned long flags; 758 unsigned long flags;
764 759
760 txq = netdev_get_tx_queue(priv->netdev, ring->index);
761
765 spin_lock_irqsave(&ring->lock, flags); 762 spin_lock_irqsave(&ring->lock, flags);
766 released = __bcm_sysport_tx_reclaim(priv, ring); 763 released = __bcm_sysport_tx_reclaim(priv, ring);
764 if (released)
765 netif_tx_wake_queue(txq);
766
767 spin_unlock_irqrestore(&ring->lock, flags); 767 spin_unlock_irqrestore(&ring->lock, flags);
768 768
769 return released; 769 return released;
770} 770}
771 771
772/* Locked version of the per-ring TX reclaim, but does not wake the queue */
773static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
774 struct bcm_sysport_tx_ring *ring)
775{
776 unsigned long flags;
777
778 spin_lock_irqsave(&ring->lock, flags);
779 __bcm_sysport_tx_reclaim(priv, ring);
780 spin_unlock_irqrestore(&ring->lock, flags);
781}
782
772static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) 783static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
773{ 784{
774 struct bcm_sysport_tx_ring *ring = 785 struct bcm_sysport_tx_ring *ring =
@@ -1252,7 +1263,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1252 napi_disable(&ring->napi); 1263 napi_disable(&ring->napi);
1253 netif_napi_del(&ring->napi); 1264 netif_napi_del(&ring->napi);
1254 1265
1255 bcm_sysport_tx_reclaim(priv, ring); 1266 bcm_sysport_tx_clean(priv, ring);
1256 1267
1257 kfree(ring->cbs); 1268 kfree(ring->cbs);
1258 ring->cbs = NULL; 1269 ring->cbs = NULL;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 9211c750e064..2f85b64f01fa 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -47,8 +47,9 @@ struct lmac {
47struct bgx { 47struct bgx {
48 u8 bgx_id; 48 u8 bgx_id;
49 struct lmac lmac[MAX_LMAC_PER_BGX]; 49 struct lmac lmac[MAX_LMAC_PER_BGX];
50 int lmac_count; 50 u8 lmac_count;
51 u8 max_lmac; 51 u8 max_lmac;
52 u8 acpi_lmac_idx;
52 void __iomem *reg_base; 53 void __iomem *reg_base;
53 struct pci_dev *pdev; 54 struct pci_dev *pdev;
54 bool is_dlm; 55 bool is_dlm;
@@ -1143,13 +1144,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
1143 if (acpi_bus_get_device(handle, &adev)) 1144 if (acpi_bus_get_device(handle, &adev))
1144 goto out; 1145 goto out;
1145 1146
1146 acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac); 1147 acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
1147 1148
1148 SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev); 1149 SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
1149 1150
1150 bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count; 1151 bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
1152 bgx->acpi_lmac_idx++; /* move to next LMAC */
1151out: 1153out:
1152 bgx->lmac_count++;
1153 return AE_OK; 1154 return AE_OK;
1154} 1155}
1155 1156
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 0e74529a4209..30e855004c57 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1118,7 +1118,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1118err: 1118err:
1119 mutex_unlock(&adapter->mcc_lock); 1119 mutex_unlock(&adapter->mcc_lock);
1120 1120
1121 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) 1121 if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
1122 status = -EPERM; 1122 status = -EPERM;
1123 1123
1124 return status; 1124 return status;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index ec010ced6c99..1a7f8ad7b9c6 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -318,6 +318,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
318 if (ether_addr_equal(addr->sa_data, adapter->dev_mac)) 318 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
319 return 0; 319 return 0;
320 320
321 /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
322 * address
323 */
324 if (BEx_chip(adapter) && be_virtfn(adapter) &&
325 !check_privilege(adapter, BE_PRIV_FILTMGMT))
326 return -EPERM;
327
321 /* if device is not running, copy MAC to netdev->dev_addr */ 328 /* if device is not running, copy MAC to netdev->dev_addr */
322 if (!netif_running(netdev)) 329 if (!netif_running(netdev))
323 goto done; 330 goto done;
@@ -3609,7 +3616,11 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
3609 3616
3610static void be_disable_if_filters(struct be_adapter *adapter) 3617static void be_disable_if_filters(struct be_adapter *adapter)
3611{ 3618{
3612 be_dev_mac_del(adapter, adapter->pmac_id[0]); 3619 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3620 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
3621 check_privilege(adapter, BE_PRIV_FILTMGMT))
3622 be_dev_mac_del(adapter, adapter->pmac_id[0]);
3623
3613 be_clear_uc_list(adapter); 3624 be_clear_uc_list(adapter);
3614 be_clear_mc_list(adapter); 3625 be_clear_mc_list(adapter);
3615 3626
@@ -3762,8 +3773,9 @@ static int be_enable_if_filters(struct be_adapter *adapter)
3762 if (status) 3773 if (status)
3763 return status; 3774 return status;
3764 3775
3765 /* For BE3 VFs, the PF programs the initial MAC address */ 3776 /* Don't add MAC on BE3 VFs without FILTMGMT privilege */
3766 if (!(BEx_chip(adapter) && be_virtfn(adapter))) { 3777 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
3778 check_privilege(adapter, BE_PRIV_FILTMGMT)) {
3767 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr); 3779 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
3768 if (status) 3780 if (status)
3769 return status; 3781 return status;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index a849da92f857..6b8635378f1f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
101{ 101{
102 struct mlx4_cq *cq; 102 struct mlx4_cq *cq;
103 103
104 rcu_read_lock();
104 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, 105 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
105 cqn & (dev->caps.num_cqs - 1)); 106 cqn & (dev->caps.num_cqs - 1));
107 rcu_read_unlock();
108
106 if (!cq) { 109 if (!cq) {
107 mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn); 110 mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
108 return; 111 return;
109 } 112 }
110 113
114 /* Acessing the CQ outside of rcu_read_lock is safe, because
115 * the CQ is freed only after interrupt handling is completed.
116 */
111 ++cq->arm_sn; 117 ++cq->arm_sn;
112 118
113 cq->comp(cq); 119 cq->comp(cq);
@@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
118 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; 124 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
119 struct mlx4_cq *cq; 125 struct mlx4_cq *cq;
120 126
121 spin_lock(&cq_table->lock); 127 rcu_read_lock();
122
123 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); 128 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
124 if (cq) 129 rcu_read_unlock();
125 atomic_inc(&cq->refcount);
126
127 spin_unlock(&cq_table->lock);
128 130
129 if (!cq) { 131 if (!cq) {
130 mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn); 132 mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
131 return; 133 return;
132 } 134 }
133 135
136 /* Acessing the CQ outside of rcu_read_lock is safe, because
137 * the CQ is freed only after interrupt handling is completed.
138 */
134 cq->event(cq, event_type); 139 cq->event(cq, event_type);
135
136 if (atomic_dec_and_test(&cq->refcount))
137 complete(&cq->free);
138} 140}
139 141
140static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 142static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
301 if (err) 303 if (err)
302 return err; 304 return err;
303 305
304 spin_lock_irq(&cq_table->lock); 306 spin_lock(&cq_table->lock);
305 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); 307 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
306 spin_unlock_irq(&cq_table->lock); 308 spin_unlock(&cq_table->lock);
307 if (err) 309 if (err)
308 goto err_icm; 310 goto err_icm;
309 311
@@ -349,9 +351,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
349 return 0; 351 return 0;
350 352
351err_radix: 353err_radix:
352 spin_lock_irq(&cq_table->lock); 354 spin_lock(&cq_table->lock);
353 radix_tree_delete(&cq_table->tree, cq->cqn); 355 radix_tree_delete(&cq_table->tree, cq->cqn);
354 spin_unlock_irq(&cq_table->lock); 356 spin_unlock(&cq_table->lock);
355 357
356err_icm: 358err_icm:
357 mlx4_cq_free_icm(dev, cq->cqn); 359 mlx4_cq_free_icm(dev, cq->cqn);
@@ -370,15 +372,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
370 if (err) 372 if (err)
371 mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); 373 mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
372 374
375 spin_lock(&cq_table->lock);
376 radix_tree_delete(&cq_table->tree, cq->cqn);
377 spin_unlock(&cq_table->lock);
378
373 synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq); 379 synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
374 if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq != 380 if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
375 priv->eq_table.eq[MLX4_EQ_ASYNC].irq) 381 priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
376 synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq); 382 synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
377 383
378 spin_lock_irq(&cq_table->lock);
379 radix_tree_delete(&cq_table->tree, cq->cqn);
380 spin_unlock_irq(&cq_table->lock);
381
382 if (atomic_dec_and_test(&cq->refcount)) 384 if (atomic_dec_and_test(&cq->refcount))
383 complete(&cq->free); 385 complete(&cq->free);
384 wait_for_completion(&cq->free); 386 wait_for_completion(&cq->free);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 4910d9af1933..761f8b12399c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1748,8 +1748,11 @@ int mlx4_en_start_port(struct net_device *dev)
1748 /* Process all completions if exist to prevent 1748 /* Process all completions if exist to prevent
1749 * the queues freezing if they are full 1749 * the queues freezing if they are full
1750 */ 1750 */
1751 for (i = 0; i < priv->rx_ring_num; i++) 1751 for (i = 0; i < priv->rx_ring_num; i++) {
1752 local_bh_disable();
1752 napi_schedule(&priv->rx_cq[i]->napi); 1753 napi_schedule(&priv->rx_cq[i]->napi);
1754 local_bh_enable();
1755 }
1753 1756
1754 netif_tx_start_all_queues(dev); 1757 netif_tx_start_all_queues(dev);
1755 netif_device_attach(dev); 1758 netif_device_attach(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index cd3638e6fe25..0509996957d9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
554 break; 554 break;
555 555
556 case MLX4_EVENT_TYPE_SRQ_LIMIT: 556 case MLX4_EVENT_TYPE_SRQ_LIMIT:
557 mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", 557 mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
558 __func__); 558 __func__, be32_to_cpu(eqe->event.srq.srqn),
559 eq->eqn);
559 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: 560 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
560 if (mlx4_is_master(dev)) { 561 if (mlx4_is_master(dev)) {
561 /* forward only to slave owning the SRQ */ 562 /* forward only to slave owning the SRQ */
@@ -570,15 +571,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
570 eq->eqn, eq->cons_index, ret); 571 eq->eqn, eq->cons_index, ret);
571 break; 572 break;
572 } 573 }
573 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", 574 if (eqe->type ==
574 __func__, slave, 575 MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
575 be32_to_cpu(eqe->event.srq.srqn), 576 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
576 eqe->type, eqe->subtype); 577 __func__, slave,
578 be32_to_cpu(eqe->event.srq.srqn),
579 eqe->type, eqe->subtype);
577 580
578 if (!ret && slave != dev->caps.function) { 581 if (!ret && slave != dev->caps.function) {
579 mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", 582 if (eqe->type ==
580 __func__, eqe->type, 583 MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
581 eqe->subtype, slave); 584 mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
585 __func__, eqe->type,
586 eqe->subtype, slave);
582 mlx4_slave_event(dev, slave, eqe); 587 mlx4_slave_event(dev, slave, eqe);
583 break; 588 break;
584 } 589 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 56185a0b827d..1822382212ee 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2980,6 +2980,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2980 put_res(dev, slave, srqn, RES_SRQ); 2980 put_res(dev, slave, srqn, RES_SRQ);
2981 qp->srq = srq; 2981 qp->srq = srq;
2982 } 2982 }
2983
2984 /* Save param3 for dynamic changes from VST back to VGT */
2985 qp->param3 = qpc->param3;
2983 put_res(dev, slave, rcqn, RES_CQ); 2986 put_res(dev, slave, rcqn, RES_CQ);
2984 put_res(dev, slave, mtt_base, RES_MTT); 2987 put_res(dev, slave, mtt_base, RES_MTT);
2985 res_end_move(dev, slave, RES_QP, qpn); 2988 res_end_move(dev, slave, RES_QP, qpn);
@@ -3772,7 +3775,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3772 int qpn = vhcr->in_modifier & 0x7fffff; 3775 int qpn = vhcr->in_modifier & 0x7fffff;
3773 struct res_qp *qp; 3776 struct res_qp *qp;
3774 u8 orig_sched_queue; 3777 u8 orig_sched_queue;
3775 __be32 orig_param3 = qpc->param3;
3776 u8 orig_vlan_control = qpc->pri_path.vlan_control; 3778 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3777 u8 orig_fvl_rx = qpc->pri_path.fvl_rx; 3779 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3778 u8 orig_pri_path_fl = qpc->pri_path.fl; 3780 u8 orig_pri_path_fl = qpc->pri_path.fl;
@@ -3814,7 +3816,6 @@ out:
3814 */ 3816 */
3815 if (!err) { 3817 if (!err) {
3816 qp->sched_queue = orig_sched_queue; 3818 qp->sched_queue = orig_sched_queue;
3817 qp->param3 = orig_param3;
3818 qp->vlan_control = orig_vlan_control; 3819 qp->vlan_control = orig_vlan_control;
3819 qp->fvl_rx = orig_fvl_rx; 3820 qp->fvl_rx = orig_fvl_rx;
3820 qp->pri_path_fl = orig_pri_path_fl; 3821 qp->pri_path_fl = orig_pri_path_fl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 118cea5e5489..46bef6a26a8c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -668,9 +668,12 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
668 int ttl; 668 int ttl;
669 669
670#if IS_ENABLED(CONFIG_INET) 670#if IS_ENABLED(CONFIG_INET)
671 int ret;
672
671 rt = ip_route_output_key(dev_net(mirred_dev), fl4); 673 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
672 if (IS_ERR(rt)) 674 ret = PTR_ERR_OR_ZERO(rt);
673 return PTR_ERR(rt); 675 if (ret)
676 return ret;
674#else 677#else
675 return -EOPNOTSUPP; 678 return -EOPNOTSUPP;
676#endif 679#endif
@@ -741,8 +744,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
741 struct flowi4 fl4 = {}; 744 struct flowi4 fl4 = {};
742 char *encap_header; 745 char *encap_header;
743 int encap_size; 746 int encap_size;
744 __be32 saddr = 0; 747 __be32 saddr;
745 int ttl = 0; 748 int ttl;
746 int err; 749 int err;
747 750
748 encap_header = kzalloc(max_encap_size, GFP_KERNEL); 751 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index d147ddd97997..0af3338bfcb4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -209,21 +209,21 @@ MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
209/* pci_eqe_cmd_token 209/* pci_eqe_cmd_token
210 * Command completion event - token 210 * Command completion event - token
211 */ 211 */
212MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16); 212MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16);
213 213
214/* pci_eqe_cmd_status 214/* pci_eqe_cmd_status
215 * Command completion event - status 215 * Command completion event - status
216 */ 216 */
217MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8); 217MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8);
218 218
219/* pci_eqe_cmd_out_param_h 219/* pci_eqe_cmd_out_param_h
220 * Command completion event - output parameter - higher part 220 * Command completion event - output parameter - higher part
221 */ 221 */
222MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32); 222MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32);
223 223
224/* pci_eqe_cmd_out_param_l 224/* pci_eqe_cmd_out_param_l
225 * Command completion event - output parameter - lower part 225 * Command completion event - output parameter - lower part
226 */ 226 */
227MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32); 227MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32);
228 228
229#endif 229#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index d768c7b6c6d6..003093abb170 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -684,6 +684,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
684 dev_kfree_skb_any(skb_orig); 684 dev_kfree_skb_any(skb_orig);
685 return NETDEV_TX_OK; 685 return NETDEV_TX_OK;
686 } 686 }
687 dev_consume_skb_any(skb_orig);
687 } 688 }
688 689
689 if (eth_skb_pad(skb)) { 690 if (eth_skb_pad(skb)) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 150ccf5192a9..2e88115e8735 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -345,6 +345,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
345 dev_kfree_skb_any(skb_orig); 345 dev_kfree_skb_any(skb_orig);
346 return NETDEV_TX_OK; 346 return NETDEV_TX_OK;
347 } 347 }
348 dev_consume_skb_any(skb_orig);
348 } 349 }
349 mlxsw_sx_txhdr_construct(skb, &tx_info); 350 mlxsw_sx_txhdr_construct(skb, &tx_info);
350 /* TX header is consumed by HW on the way so we shouldn't count its 351 /* TX header is consumed by HW on the way so we shouldn't count its
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index 99a14df28b96..2851b4c56570 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -201,6 +201,13 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt)
201 else 201 else
202 adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr); 202 adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr);
203 203
204 /* of_phy_find_device() claims a reference to the phydev,
205 * so we do that here manually as well. When the driver
206 * later unloads, it can unilaterally drop the reference
207 * without worrying about ACPI vs DT.
208 */
209 if (adpt->phydev)
210 get_device(&adpt->phydev->mdio.dev);
204 } else { 211 } else {
205 struct device_node *phy_np; 212 struct device_node *phy_np;
206 213
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 422289c232bc..f46d300bd585 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -719,8 +719,7 @@ static int emac_probe(struct platform_device *pdev)
719err_undo_napi: 719err_undo_napi:
720 netif_napi_del(&adpt->rx_q.napi); 720 netif_napi_del(&adpt->rx_q.napi);
721err_undo_mdiobus: 721err_undo_mdiobus:
722 if (!has_acpi_companion(&pdev->dev)) 722 put_device(&adpt->phydev->mdio.dev);
723 put_device(&adpt->phydev->mdio.dev);
724 mdiobus_unregister(adpt->mii_bus); 723 mdiobus_unregister(adpt->mii_bus);
725err_undo_clocks: 724err_undo_clocks:
726 emac_clks_teardown(adpt); 725 emac_clks_teardown(adpt);
@@ -740,8 +739,7 @@ static int emac_remove(struct platform_device *pdev)
740 739
741 emac_clks_teardown(adpt); 740 emac_clks_teardown(adpt);
742 741
743 if (!has_acpi_companion(&pdev->dev)) 742 put_device(&adpt->phydev->mdio.dev);
744 put_device(&adpt->phydev->mdio.dev);
745 mdiobus_unregister(adpt->mii_bus); 743 mdiobus_unregister(adpt->mii_bus);
746 free_netdev(netdev); 744 free_netdev(netdev);
747 745
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 92d7692c840d..89ac1e3f6175 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -926,14 +926,10 @@ static int ravb_poll(struct napi_struct *napi, int budget)
926 /* Receive error message handling */ 926 /* Receive error message handling */
927 priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; 927 priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
928 priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; 928 priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
929 if (priv->rx_over_errors != ndev->stats.rx_over_errors) { 929 if (priv->rx_over_errors != ndev->stats.rx_over_errors)
930 ndev->stats.rx_over_errors = priv->rx_over_errors; 930 ndev->stats.rx_over_errors = priv->rx_over_errors;
931 netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n"); 931 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
932 }
933 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
934 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; 932 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
935 netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
936 }
937out: 933out:
938 return budget - quota; 934 return budget - quota;
939} 935}
@@ -1508,6 +1504,19 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1508 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + 1504 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
1509 entry / NUM_TX_DESC * DPTR_ALIGN; 1505 entry / NUM_TX_DESC * DPTR_ALIGN;
1510 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; 1506 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
1507 /* Zero length DMA descriptors are problematic as they seem to
1508 * terminate DMA transfers. Avoid them by simply using a length of
1509 * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN.
1510 *
1511 * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of
1512 * data by the call to skb_put_padto() above this is safe with
1513 * respect to both the length of the first DMA descriptor (len)
1514 * overflowing the available data and the length of the second DMA
1515 * descriptor (skb->len - len) being negative.
1516 */
1517 if (len == 0)
1518 len = DPTR_ALIGN;
1519
1511 memcpy(buffer, skb->data, len); 1520 memcpy(buffer, skb->data, len);
1512 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); 1521 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
1513 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 1522 if (dma_mapping_error(ndev->dev.parent, dma_addr))
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a276a32d57f2..e3f6389e1b01 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3326,9 +3326,9 @@ int stmmac_dvr_probe(struct device *device,
3326 (priv->plat->maxmtu >= ndev->min_mtu)) 3326 (priv->plat->maxmtu >= ndev->min_mtu))
3327 ndev->max_mtu = priv->plat->maxmtu; 3327 ndev->max_mtu = priv->plat->maxmtu;
3328 else if (priv->plat->maxmtu < ndev->min_mtu) 3328 else if (priv->plat->maxmtu < ndev->min_mtu)
3329 netdev_warn(priv->dev, 3329 dev_warn(priv->device,
3330 "%s: warning: maxmtu having invalid value (%d)\n", 3330 "%s: warning: maxmtu having invalid value (%d)\n",
3331 __func__, priv->plat->maxmtu); 3331 __func__, priv->plat->maxmtu);
3332 3332
3333 if (flow_ctrl) 3333 if (flow_ctrl)
3334 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 3334 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
@@ -3340,7 +3340,8 @@ int stmmac_dvr_probe(struct device *device,
3340 */ 3340 */
3341 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) { 3341 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
3342 priv->use_riwt = 1; 3342 priv->use_riwt = 1;
3343 netdev_info(priv->dev, "Enable RX Mitigation via HW Watchdog Timer\n"); 3343 dev_info(priv->device,
3344 "Enable RX Mitigation via HW Watchdog Timer\n");
3344 } 3345 }
3345 3346
3346 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); 3347 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
@@ -3366,17 +3367,17 @@ int stmmac_dvr_probe(struct device *device,
3366 /* MDIO bus Registration */ 3367 /* MDIO bus Registration */
3367 ret = stmmac_mdio_register(ndev); 3368 ret = stmmac_mdio_register(ndev);
3368 if (ret < 0) { 3369 if (ret < 0) {
3369 netdev_err(priv->dev, 3370 dev_err(priv->device,
3370 "%s: MDIO bus (id: %d) registration failed", 3371 "%s: MDIO bus (id: %d) registration failed",
3371 __func__, priv->plat->bus_id); 3372 __func__, priv->plat->bus_id);
3372 goto error_mdio_register; 3373 goto error_mdio_register;
3373 } 3374 }
3374 } 3375 }
3375 3376
3376 ret = register_netdev(ndev); 3377 ret = register_netdev(ndev);
3377 if (ret) { 3378 if (ret) {
3378 netdev_err(priv->dev, "%s: ERROR %i registering the device\n", 3379 dev_err(priv->device, "%s: ERROR %i registering the device\n",
3379 __func__, ret); 3380 __func__, ret);
3380 goto error_netdev_register; 3381 goto error_netdev_register;
3381 } 3382 }
3382 3383
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 77c88fcf2b86..9b8a30bf939b 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1210,7 +1210,7 @@ int cpmac_init(void)
1210 goto fail_alloc; 1210 goto fail_alloc;
1211 } 1211 }
1212 1212
1213#warning FIXME: unhardcode gpio&reset bits 1213 /* FIXME: unhardcode gpio&reset bits */
1214 ar7_gpio_disable(26); 1214 ar7_gpio_disable(26);
1215 ar7_gpio_disable(27); 1215 ar7_gpio_disable(27);
1216 ar7_device_reset(AR7_RESET_BIT_CPMAC_LO); 1216 ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index c9414c054852..fcab8019dda0 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -659,6 +659,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
659 * policy filters on the host). Deliver these via the VF 659 * policy filters on the host). Deliver these via the VF
660 * interface in the guest. 660 * interface in the guest.
661 */ 661 */
662 rcu_read_lock();
662 vf_netdev = rcu_dereference(net_device_ctx->vf_netdev); 663 vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
663 if (vf_netdev && (vf_netdev->flags & IFF_UP)) 664 if (vf_netdev && (vf_netdev->flags & IFF_UP))
664 net = vf_netdev; 665 net = vf_netdev;
@@ -667,6 +668,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
667 skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci); 668 skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
668 if (unlikely(!skb)) { 669 if (unlikely(!skb)) {
669 ++net->stats.rx_dropped; 670 ++net->stats.rx_dropped;
671 rcu_read_unlock();
670 return NVSP_STAT_FAIL; 672 return NVSP_STAT_FAIL;
671 } 673 }
672 674
@@ -696,6 +698,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
696 * TODO - use NAPI? 698 * TODO - use NAPI?
697 */ 699 */
698 netif_rx(skb); 700 netif_rx(skb);
701 rcu_read_unlock();
699 702
700 return 0; 703 return 0;
701} 704}
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 46d53a6c8cf8..76ba7ecfe142 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -1715,9 +1715,9 @@ static int at86rf230_probe(struct spi_device *spi)
1715 /* Reset */ 1715 /* Reset */
1716 if (gpio_is_valid(rstn)) { 1716 if (gpio_is_valid(rstn)) {
1717 udelay(1); 1717 udelay(1);
1718 gpio_set_value(rstn, 0); 1718 gpio_set_value_cansleep(rstn, 0);
1719 udelay(1); 1719 udelay(1);
1720 gpio_set_value(rstn, 1); 1720 gpio_set_value_cansleep(rstn, 1);
1721 usleep_range(120, 240); 1721 usleep_range(120, 240);
1722 } 1722 }
1723 1723
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 1253f864737a..ef688518ad77 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -117,13 +117,26 @@ static int atusb_read_reg(struct atusb *atusb, uint8_t reg)
117{ 117{
118 struct usb_device *usb_dev = atusb->usb_dev; 118 struct usb_device *usb_dev = atusb->usb_dev;
119 int ret; 119 int ret;
120 uint8_t *buffer;
120 uint8_t value; 121 uint8_t value;
121 122
123 buffer = kmalloc(1, GFP_KERNEL);
124 if (!buffer)
125 return -ENOMEM;
126
122 dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg); 127 dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg);
123 ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), 128 ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
124 ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, 129 ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
125 0, reg, &value, 1, 1000); 130 0, reg, buffer, 1, 1000);
126 return ret >= 0 ? value : ret; 131
132 if (ret >= 0) {
133 value = buffer[0];
134 kfree(buffer);
135 return value;
136 } else {
137 kfree(buffer);
138 return ret;
139 }
127} 140}
128 141
129static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask, 142static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask,
@@ -549,13 +562,6 @@ static int
549atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries) 562atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
550{ 563{
551 struct atusb *atusb = hw->priv; 564 struct atusb *atusb = hw->priv;
552 struct device *dev = &atusb->usb_dev->dev;
553
554 if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 3) {
555 dev_info(dev, "Automatic frame retransmission is only available from "
556 "firmware version 0.3. Please update if you want this feature.");
557 return -EINVAL;
558 }
559 565
560 return atusb_write_subreg(atusb, SR_MAX_FRAME_RETRIES, retries); 566 return atusb_write_subreg(atusb, SR_MAX_FRAME_RETRIES, retries);
561} 567}
@@ -608,9 +614,13 @@ static const struct ieee802154_ops atusb_ops = {
608static int atusb_get_and_show_revision(struct atusb *atusb) 614static int atusb_get_and_show_revision(struct atusb *atusb)
609{ 615{
610 struct usb_device *usb_dev = atusb->usb_dev; 616 struct usb_device *usb_dev = atusb->usb_dev;
611 unsigned char buffer[3]; 617 unsigned char *buffer;
612 int ret; 618 int ret;
613 619
620 buffer = kmalloc(3, GFP_KERNEL);
621 if (!buffer)
622 return -ENOMEM;
623
614 /* Get a couple of the ATMega Firmware values */ 624 /* Get a couple of the ATMega Firmware values */
615 ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), 625 ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
616 ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0, 626 ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
@@ -631,15 +641,20 @@ static int atusb_get_and_show_revision(struct atusb *atusb)
631 dev_info(&usb_dev->dev, "Please update to version 0.2 or newer"); 641 dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
632 } 642 }
633 643
644 kfree(buffer);
634 return ret; 645 return ret;
635} 646}
636 647
637static int atusb_get_and_show_build(struct atusb *atusb) 648static int atusb_get_and_show_build(struct atusb *atusb)
638{ 649{
639 struct usb_device *usb_dev = atusb->usb_dev; 650 struct usb_device *usb_dev = atusb->usb_dev;
640 char build[ATUSB_BUILD_SIZE + 1]; 651 char *build;
641 int ret; 652 int ret;
642 653
654 build = kmalloc(ATUSB_BUILD_SIZE + 1, GFP_KERNEL);
655 if (!build)
656 return -ENOMEM;
657
643 ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), 658 ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
644 ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0, 659 ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
645 build, ATUSB_BUILD_SIZE, 1000); 660 build, ATUSB_BUILD_SIZE, 1000);
@@ -648,6 +663,7 @@ static int atusb_get_and_show_build(struct atusb *atusb)
648 dev_info(&usb_dev->dev, "Firmware: build %s\n", build); 663 dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
649 } 664 }
650 665
666 kfree(build);
651 return ret; 667 return ret;
652} 668}
653 669
@@ -698,7 +714,7 @@ fail:
698static int atusb_set_extended_addr(struct atusb *atusb) 714static int atusb_set_extended_addr(struct atusb *atusb)
699{ 715{
700 struct usb_device *usb_dev = atusb->usb_dev; 716 struct usb_device *usb_dev = atusb->usb_dev;
701 unsigned char buffer[IEEE802154_EXTENDED_ADDR_LEN]; 717 unsigned char *buffer;
702 __le64 extended_addr; 718 __le64 extended_addr;
703 u64 addr; 719 u64 addr;
704 int ret; 720 int ret;
@@ -710,12 +726,20 @@ static int atusb_set_extended_addr(struct atusb *atusb)
710 return 0; 726 return 0;
711 } 727 }
712 728
729 buffer = kmalloc(IEEE802154_EXTENDED_ADDR_LEN, GFP_KERNEL);
730 if (!buffer)
731 return -ENOMEM;
732
713 /* Firmware is new enough so we fetch the address from EEPROM */ 733 /* Firmware is new enough so we fetch the address from EEPROM */
714 ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), 734 ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
715 ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0, 735 ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0,
716 buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000); 736 buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000);
717 if (ret < 0) 737 if (ret < 0) {
718 dev_err(&usb_dev->dev, "failed to fetch extended address\n"); 738 dev_err(&usb_dev->dev, "failed to fetch extended address, random address set\n");
739 ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr);
740 kfree(buffer);
741 return ret;
742 }
719 743
720 memcpy(&extended_addr, buffer, IEEE802154_EXTENDED_ADDR_LEN); 744 memcpy(&extended_addr, buffer, IEEE802154_EXTENDED_ADDR_LEN);
721 /* Check if read address is not empty and the unicast bit is set correctly */ 745 /* Check if read address is not empty and the unicast bit is set correctly */
@@ -729,6 +753,7 @@ static int atusb_set_extended_addr(struct atusb *atusb)
729 &addr); 753 &addr);
730 } 754 }
731 755
756 kfree(buffer);
732 return ret; 757 return ret;
733} 758}
734 759
@@ -770,8 +795,7 @@ static int atusb_probe(struct usb_interface *interface,
770 795
771 hw->parent = &usb_dev->dev; 796 hw->parent = &usb_dev->dev;
772 hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | 797 hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
773 IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS | 798 IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS;
774 IEEE802154_HW_FRAME_RETRIES;
775 799
776 hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL | 800 hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
777 WPAN_PHY_FLAG_CCA_MODE; 801 WPAN_PHY_FLAG_CCA_MODE;
@@ -800,6 +824,9 @@ static int atusb_probe(struct usb_interface *interface,
800 atusb_get_and_show_build(atusb); 824 atusb_get_and_show_build(atusb);
801 atusb_set_extended_addr(atusb); 825 atusb_set_extended_addr(atusb);
802 826
827 if (atusb->fw_ver_maj >= 0 && atusb->fw_ver_min >= 3)
828 hw->flags |= IEEE802154_HW_FRAME_RETRIES;
829
803 ret = atusb_get_and_clear_error(atusb); 830 ret = atusb_get_and_clear_error(atusb);
804 if (ret) { 831 if (ret) {
805 dev_err(&atusb->usb_dev->dev, 832 dev_err(&atusb->usb_dev->dev,
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index e84ae084e259..ca1b462bf7b2 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -132,12 +132,16 @@ static int dp83867_of_init(struct phy_device *phydev)
132 132
133 ret = of_property_read_u32(of_node, "ti,rx-internal-delay", 133 ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
134 &dp83867->rx_id_delay); 134 &dp83867->rx_id_delay);
135 if (ret) 135 if (ret &&
136 (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
137 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID))
136 return ret; 138 return ret;
137 139
138 ret = of_property_read_u32(of_node, "ti,tx-internal-delay", 140 ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
139 &dp83867->tx_id_delay); 141 &dp83867->tx_id_delay);
140 if (ret) 142 if (ret &&
143 (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
144 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID))
141 return ret; 145 return ret;
142 146
143 return of_property_read_u32(of_node, "ti,fifo-depth", 147 return of_property_read_u32(of_node, "ti,fifo-depth",
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index be418563cb18..f3b48ad90865 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
1730 u8 checksum = CHECKSUM_NONE; 1730 u8 checksum = CHECKSUM_NONE;
1731 u32 opts2, opts3; 1731 u32 opts2, opts3;
1732 1732
1733 if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02) 1733 if (!(tp->netdev->features & NETIF_F_RXCSUM))
1734 goto return_result; 1734 goto return_result;
1735 1735
1736 opts2 = le32_to_cpu(rx_desc->opts2); 1736 opts2 = le32_to_cpu(rx_desc->opts2);
@@ -4356,6 +4356,11 @@ static int rtl8152_probe(struct usb_interface *intf,
4356 NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | 4356 NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
4357 NETIF_F_IPV6_CSUM | NETIF_F_TSO6; 4357 NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
4358 4358
4359 if (tp->version == RTL_VER_01) {
4360 netdev->features &= ~NETIF_F_RXCSUM;
4361 netdev->hw_features &= ~NETIF_F_RXCSUM;
4362 }
4363
4359 netdev->ethtool_ops = &ops; 4364 netdev->ethtool_ops = &ops;
4360 netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE); 4365 netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
4361 4366
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index bb70dd5723b5..ca7196c40060 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1798,7 +1798,7 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
1798static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev, 1798static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev,
1799 struct vxlan_sock *sock4, 1799 struct vxlan_sock *sock4,
1800 struct sk_buff *skb, int oif, u8 tos, 1800 struct sk_buff *skb, int oif, u8 tos,
1801 __be32 daddr, __be32 *saddr, 1801 __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
1802 struct dst_cache *dst_cache, 1802 struct dst_cache *dst_cache,
1803 const struct ip_tunnel_info *info) 1803 const struct ip_tunnel_info *info)
1804{ 1804{
@@ -1824,6 +1824,8 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device
1824 fl4.flowi4_proto = IPPROTO_UDP; 1824 fl4.flowi4_proto = IPPROTO_UDP;
1825 fl4.daddr = daddr; 1825 fl4.daddr = daddr;
1826 fl4.saddr = *saddr; 1826 fl4.saddr = *saddr;
1827 fl4.fl4_dport = dport;
1828 fl4.fl4_sport = sport;
1827 1829
1828 rt = ip_route_output_key(vxlan->net, &fl4); 1830 rt = ip_route_output_key(vxlan->net, &fl4);
1829 if (likely(!IS_ERR(rt))) { 1831 if (likely(!IS_ERR(rt))) {
@@ -1851,6 +1853,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1851 __be32 label, 1853 __be32 label,
1852 const struct in6_addr *daddr, 1854 const struct in6_addr *daddr,
1853 struct in6_addr *saddr, 1855 struct in6_addr *saddr,
1856 __be16 dport, __be16 sport,
1854 struct dst_cache *dst_cache, 1857 struct dst_cache *dst_cache,
1855 const struct ip_tunnel_info *info) 1858 const struct ip_tunnel_info *info)
1856{ 1859{
@@ -1877,6 +1880,8 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1877 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); 1880 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
1878 fl6.flowi6_mark = skb->mark; 1881 fl6.flowi6_mark = skb->mark;
1879 fl6.flowi6_proto = IPPROTO_UDP; 1882 fl6.flowi6_proto = IPPROTO_UDP;
1883 fl6.fl6_dport = dport;
1884 fl6.fl6_sport = sport;
1880 1885
1881 err = ipv6_stub->ipv6_dst_lookup(vxlan->net, 1886 err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
1882 sock6->sock->sk, 1887 sock6->sock->sk,
@@ -2068,6 +2073,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2068 rdst ? rdst->remote_ifindex : 0, tos, 2073 rdst ? rdst->remote_ifindex : 0, tos,
2069 dst->sin.sin_addr.s_addr, 2074 dst->sin.sin_addr.s_addr,
2070 &src->sin.sin_addr.s_addr, 2075 &src->sin.sin_addr.s_addr,
2076 dst_port, src_port,
2071 dst_cache, info); 2077 dst_cache, info);
2072 if (IS_ERR(rt)) { 2078 if (IS_ERR(rt)) {
2073 err = PTR_ERR(rt); 2079 err = PTR_ERR(rt);
@@ -2104,6 +2110,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2104 rdst ? rdst->remote_ifindex : 0, tos, 2110 rdst ? rdst->remote_ifindex : 0, tos,
2105 label, &dst->sin6.sin6_addr, 2111 label, &dst->sin6.sin6_addr,
2106 &src->sin6.sin6_addr, 2112 &src->sin6.sin6_addr,
2113 dst_port, src_port,
2107 dst_cache, info); 2114 dst_cache, info);
2108 if (IS_ERR(ndst)) { 2115 if (IS_ERR(ndst)) {
2109 err = PTR_ERR(ndst); 2116 err = PTR_ERR(ndst);
@@ -2430,7 +2437,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2430 2437
2431 rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos, 2438 rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
2432 info->key.u.ipv4.dst, 2439 info->key.u.ipv4.dst,
2433 &info->key.u.ipv4.src, NULL, info); 2440 &info->key.u.ipv4.src, dport, sport, NULL, info);
2434 if (IS_ERR(rt)) 2441 if (IS_ERR(rt))
2435 return PTR_ERR(rt); 2442 return PTR_ERR(rt);
2436 ip_rt_put(rt); 2443 ip_rt_put(rt);
@@ -2441,7 +2448,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2441 2448
2442 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos, 2449 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
2443 info->key.label, &info->key.u.ipv6.dst, 2450 info->key.label, &info->key.u.ipv6.dst,
2444 &info->key.u.ipv6.src, NULL, info); 2451 &info->key.u.ipv6.src, dport, sport, NULL, info);
2445 if (IS_ERR(ndst)) 2452 if (IS_ERR(ndst))
2446 return PTR_ERR(ndst); 2453 return PTR_ERR(ndst);
2447 dst_release(ndst); 2454 dst_release(ndst);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 6307088b375f..a518cb1b59d4 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -957,6 +957,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
957{ 957{
958 resource_size_t allocated = 0, available = 0; 958 resource_size_t allocated = 0, available = 0;
959 struct nd_region *nd_region = to_nd_region(dev->parent); 959 struct nd_region *nd_region = to_nd_region(dev->parent);
960 struct nd_namespace_common *ndns = to_ndns(dev);
960 struct nd_mapping *nd_mapping; 961 struct nd_mapping *nd_mapping;
961 struct nvdimm_drvdata *ndd; 962 struct nvdimm_drvdata *ndd;
962 struct nd_label_id label_id; 963 struct nd_label_id label_id;
@@ -964,7 +965,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
964 u8 *uuid = NULL; 965 u8 *uuid = NULL;
965 int rc, i; 966 int rc, i;
966 967
967 if (dev->driver || to_ndns(dev)->claim) 968 if (dev->driver || ndns->claim)
968 return -EBUSY; 969 return -EBUSY;
969 970
970 if (is_namespace_pmem(dev)) { 971 if (is_namespace_pmem(dev)) {
@@ -1034,20 +1035,16 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
1034 1035
1035 nd_namespace_pmem_set_resource(nd_region, nspm, 1036 nd_namespace_pmem_set_resource(nd_region, nspm,
1036 val * nd_region->ndr_mappings); 1037 val * nd_region->ndr_mappings);
1037 } else if (is_namespace_blk(dev)) {
1038 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1039
1040 /*
1041 * Try to delete the namespace if we deleted all of its
1042 * allocation, this is not the seed device for the
1043 * region, and it is not actively claimed by a btt
1044 * instance.
1045 */
1046 if (val == 0 && nd_region->ns_seed != dev
1047 && !nsblk->common.claim)
1048 nd_device_unregister(dev, ND_ASYNC);
1049 } 1038 }
1050 1039
1040 /*
1041 * Try to delete the namespace if we deleted all of its
1042 * allocation, this is not the seed device for the region, and
1043 * it is not actively claimed by a btt instance.
1044 */
1045 if (val == 0 && nd_region->ns_seed != dev && !ndns->claim)
1046 nd_device_unregister(dev, ND_ASYNC);
1047
1051 return rc; 1048 return rc;
1052} 1049}
1053 1050
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 7282d7495bf1..5b536be5a12e 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -90,7 +90,9 @@ static int read_pmem(struct page *page, unsigned int off,
90 90
91 rc = memcpy_from_pmem(mem + off, pmem_addr, len); 91 rc = memcpy_from_pmem(mem + off, pmem_addr, len);
92 kunmap_atomic(mem); 92 kunmap_atomic(mem);
93 return rc; 93 if (rc)
94 return -EIO;
95 return 0;
94} 96}
95 97
96static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, 98static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
index 1f38d0836751..f1b633bce525 100644
--- a/drivers/pci/host/pci-xgene-msi.c
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -517,7 +517,7 @@ static int xgene_msi_probe(struct platform_device *pdev)
517 517
518 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online", 518 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
519 xgene_msi_hwirq_alloc, NULL); 519 xgene_msi_hwirq_alloc, NULL);
520 if (rc) 520 if (rc < 0)
521 goto err_cpuhp; 521 goto err_cpuhp;
522 pci_xgene_online = rc; 522 pci_xgene_online = rc;
523 rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL, 523 rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index bed19994c1e9..af8f6e92e885 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -807,11 +807,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
807{ 807{
808 u32 val; 808 u32 val;
809 809
810 /* get iATU unroll support */
811 pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
812 dev_dbg(pp->dev, "iATU unroll: %s\n",
813 pp->iatu_unroll_enabled ? "enabled" : "disabled");
814
815 /* set the number of lanes */ 810 /* set the number of lanes */
816 val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL); 811 val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
817 val &= ~PORT_LINK_MODE_MASK; 812 val &= ~PORT_LINK_MODE_MASK;
@@ -882,6 +877,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
882 * we should not program the ATU here. 877 * we should not program the ATU here.
883 */ 878 */
884 if (!pp->ops->rd_other_conf) { 879 if (!pp->ops->rd_other_conf) {
880 /* get iATU unroll support */
881 pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
882 dev_dbg(pp->dev, "iATU unroll: %s\n",
883 pp->iatu_unroll_enabled ? "enabled" : "disabled");
884
885 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, 885 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
886 PCIE_ATU_TYPE_MEM, pp->mem_base, 886 PCIE_ATU_TYPE_MEM, pp->mem_base,
887 pp->mem_bus_addr, pp->mem_size); 887 pp->mem_bus_addr, pp->mem_size);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e164b5c9f0f0..204960e70333 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1169,6 +1169,7 @@ void set_pcie_port_type(struct pci_dev *pdev)
1169 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 1169 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1170 if (!pos) 1170 if (!pos)
1171 return; 1171 return;
1172
1172 pdev->pcie_cap = pos; 1173 pdev->pcie_cap = pos;
1173 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16); 1174 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
1174 pdev->pcie_flags_reg = reg16; 1175 pdev->pcie_flags_reg = reg16;
@@ -1176,13 +1177,14 @@ void set_pcie_port_type(struct pci_dev *pdev)
1176 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; 1177 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
1177 1178
1178 /* 1179 /*
1179 * A Root Port is always the upstream end of a Link. No PCIe 1180 * A Root Port or a PCI-to-PCIe bridge is always the upstream end
1180 * component has two Links. Two Links are connected by a Switch 1181 * of a Link. No PCIe component has two Links. Two Links are
1181 * that has a Port on each Link and internal logic to connect the 1182 * connected by a Switch that has a Port on each Link and internal
1182 * two Ports. 1183 * logic to connect the two Ports.
1183 */ 1184 */
1184 type = pci_pcie_type(pdev); 1185 type = pci_pcie_type(pdev);
1185 if (type == PCI_EXP_TYPE_ROOT_PORT) 1186 if (type == PCI_EXP_TYPE_ROOT_PORT ||
1187 type == PCI_EXP_TYPE_PCIE_BRIDGE)
1186 pdev->has_secondary_link = 1; 1188 pdev->has_secondary_link = 1;
1187 else if (type == PCI_EXP_TYPE_UPSTREAM || 1189 else if (type == PCI_EXP_TYPE_UPSTREAM ||
1188 type == PCI_EXP_TYPE_DOWNSTREAM) { 1190 type == PCI_EXP_TYPE_DOWNSTREAM) {
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 639ed4e6afd1..070c4da95f48 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -145,6 +145,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
145#define CCW_CMD_WRITE_CONF 0x21 145#define CCW_CMD_WRITE_CONF 0x21
146#define CCW_CMD_WRITE_STATUS 0x31 146#define CCW_CMD_WRITE_STATUS 0x31
147#define CCW_CMD_READ_VQ_CONF 0x32 147#define CCW_CMD_READ_VQ_CONF 0x32
148#define CCW_CMD_READ_STATUS 0x72
148#define CCW_CMD_SET_IND_ADAPTER 0x73 149#define CCW_CMD_SET_IND_ADAPTER 0x73
149#define CCW_CMD_SET_VIRTIO_REV 0x83 150#define CCW_CMD_SET_VIRTIO_REV 0x83
150 151
@@ -160,6 +161,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
160#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000 161#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
161#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000 162#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
162#define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000 163#define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
164#define VIRTIO_CCW_DOING_READ_STATUS 0x20000000
163#define VIRTIO_CCW_INTPARM_MASK 0xffff0000 165#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
164 166
165static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev) 167static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
@@ -452,7 +454,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
452 * This may happen on device detach. 454 * This may happen on device detach.
453 */ 455 */
454 if (ret && (ret != -ENODEV)) 456 if (ret && (ret != -ENODEV))
455 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d", 457 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n",
456 ret, index); 458 ret, index);
457 459
458 vring_del_virtqueue(vq); 460 vring_del_virtqueue(vq);
@@ -892,6 +894,28 @@ out_free:
892static u8 virtio_ccw_get_status(struct virtio_device *vdev) 894static u8 virtio_ccw_get_status(struct virtio_device *vdev)
893{ 895{
894 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 896 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
897 u8 old_status = *vcdev->status;
898 struct ccw1 *ccw;
899
900 if (vcdev->revision < 1)
901 return *vcdev->status;
902
903 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
904 if (!ccw)
905 return old_status;
906
907 ccw->cmd_code = CCW_CMD_READ_STATUS;
908 ccw->flags = 0;
909 ccw->count = sizeof(*vcdev->status);
910 ccw->cda = (__u32)(unsigned long)vcdev->status;
911 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
912/*
913 * If the channel program failed (should only happen if the device
914 * was hotunplugged, and then we clean up via the machine check
915 * handler anyway), vcdev->status was not overwritten and we just
916 * return the old status, which is fine.
917*/
918 kfree(ccw);
895 919
896 return *vcdev->status; 920 return *vcdev->status;
897} 921}
@@ -920,7 +944,7 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
920 kfree(ccw); 944 kfree(ccw);
921} 945}
922 946
923static struct virtio_config_ops virtio_ccw_config_ops = { 947static const struct virtio_config_ops virtio_ccw_config_ops = {
924 .get_features = virtio_ccw_get_features, 948 .get_features = virtio_ccw_get_features,
925 .finalize_features = virtio_ccw_finalize_features, 949 .finalize_features = virtio_ccw_finalize_features,
926 .get = virtio_ccw_get_config, 950 .get = virtio_ccw_get_config,
@@ -987,6 +1011,7 @@ static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev,
987 case VIRTIO_CCW_DOING_READ_CONFIG: 1011 case VIRTIO_CCW_DOING_READ_CONFIG:
988 case VIRTIO_CCW_DOING_WRITE_CONFIG: 1012 case VIRTIO_CCW_DOING_WRITE_CONFIG:
989 case VIRTIO_CCW_DOING_WRITE_STATUS: 1013 case VIRTIO_CCW_DOING_WRITE_STATUS:
1014 case VIRTIO_CCW_DOING_READ_STATUS:
990 case VIRTIO_CCW_DOING_SET_VQ: 1015 case VIRTIO_CCW_DOING_SET_VQ:
991 case VIRTIO_CCW_DOING_SET_IND: 1016 case VIRTIO_CCW_DOING_SET_IND:
992 case VIRTIO_CCW_DOING_SET_CONF_IND: 1017 case VIRTIO_CCW_DOING_SET_CONF_IND:
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index a9a00169ad91..b2e8c0dfc79c 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3363,7 +3363,7 @@ bfad_im_bsg_els_ct_request(struct bsg_job *job)
3363 struct bfad_fcxp *drv_fcxp; 3363 struct bfad_fcxp *drv_fcxp;
3364 struct bfa_fcs_lport_s *fcs_port; 3364 struct bfa_fcs_lport_s *fcs_port;
3365 struct bfa_fcs_rport_s *fcs_rport; 3365 struct bfa_fcs_rport_s *fcs_rport;
3366 struct fc_bsg_request *bsg_request = bsg_request; 3366 struct fc_bsg_request *bsg_request = job->request;
3367 struct fc_bsg_reply *bsg_reply = job->reply; 3367 struct fc_bsg_reply *bsg_reply = job->reply;
3368 uint32_t command_type = bsg_request->msgcode; 3368 uint32_t command_type = bsg_request->msgcode;
3369 unsigned long flags; 3369 unsigned long flags;
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 8fb5c54c7dd3..99b747cedbeb 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -46,6 +46,7 @@
46 46
47#define INITIAL_SRP_LIMIT 800 47#define INITIAL_SRP_LIMIT 800
48#define DEFAULT_MAX_SECTORS 256 48#define DEFAULT_MAX_SECTORS 256
49#define MAX_TXU 1024 * 1024
49 50
50static uint max_vdma_size = MAX_H_COPY_RDMA; 51static uint max_vdma_size = MAX_H_COPY_RDMA;
51 52
@@ -1391,7 +1392,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
1391 } 1392 }
1392 1393
1393 info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token, 1394 info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
1394 GFP_KERNEL); 1395 GFP_ATOMIC);
1395 if (!info) { 1396 if (!info) {
1396 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", 1397 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1397 iue->target); 1398 iue->target);
@@ -1443,7 +1444,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
1443 info->mad_version = cpu_to_be32(MAD_VERSION_1); 1444 info->mad_version = cpu_to_be32(MAD_VERSION_1);
1444 info->os_type = cpu_to_be32(LINUX); 1445 info->os_type = cpu_to_be32(LINUX);
1445 memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu)); 1446 memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
1446 info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE); 1447 info->port_max_txu[0] = cpu_to_be32(MAX_TXU);
1447 1448
1448 dma_wmb(); 1449 dma_wmb();
1449 rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn, 1450 rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
@@ -1509,7 +1510,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1509 } 1510 }
1510 1511
1511 cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token, 1512 cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
1512 GFP_KERNEL); 1513 GFP_ATOMIC);
1513 if (!cap) { 1514 if (!cap) {
1514 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", 1515 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1515 iue->target); 1516 iue->target);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 236e4e51d161..7b6bd8ed0d0b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3590,12 +3590,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
3590 } else { 3590 } else {
3591 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 3591 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
3592 lpfc_els_free_data(phba, buf_ptr1); 3592 lpfc_els_free_data(phba, buf_ptr1);
3593 elsiocb->context2 = NULL;
3593 } 3594 }
3594 } 3595 }
3595 3596
3596 if (elsiocb->context3) { 3597 if (elsiocb->context3) {
3597 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; 3598 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
3598 lpfc_els_free_bpl(phba, buf_ptr); 3599 lpfc_els_free_bpl(phba, buf_ptr);
3600 elsiocb->context3 = NULL;
3599 } 3601 }
3600 lpfc_sli_release_iocbq(phba, elsiocb); 3602 lpfc_sli_release_iocbq(phba, elsiocb);
3601 return 0; 3603 return 0;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 4faa7672fc1d..a78a3df68f67 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -5954,18 +5954,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5954 5954
5955 free_vfi_bmask: 5955 free_vfi_bmask:
5956 kfree(phba->sli4_hba.vfi_bmask); 5956 kfree(phba->sli4_hba.vfi_bmask);
5957 phba->sli4_hba.vfi_bmask = NULL;
5957 free_xri_ids: 5958 free_xri_ids:
5958 kfree(phba->sli4_hba.xri_ids); 5959 kfree(phba->sli4_hba.xri_ids);
5960 phba->sli4_hba.xri_ids = NULL;
5959 free_xri_bmask: 5961 free_xri_bmask:
5960 kfree(phba->sli4_hba.xri_bmask); 5962 kfree(phba->sli4_hba.xri_bmask);
5963 phba->sli4_hba.xri_bmask = NULL;
5961 free_vpi_ids: 5964 free_vpi_ids:
5962 kfree(phba->vpi_ids); 5965 kfree(phba->vpi_ids);
5966 phba->vpi_ids = NULL;
5963 free_vpi_bmask: 5967 free_vpi_bmask:
5964 kfree(phba->vpi_bmask); 5968 kfree(phba->vpi_bmask);
5969 phba->vpi_bmask = NULL;
5965 free_rpi_ids: 5970 free_rpi_ids:
5966 kfree(phba->sli4_hba.rpi_ids); 5971 kfree(phba->sli4_hba.rpi_ids);
5972 phba->sli4_hba.rpi_ids = NULL;
5967 free_rpi_bmask: 5973 free_rpi_bmask:
5968 kfree(phba->sli4_hba.rpi_bmask); 5974 kfree(phba->sli4_hba.rpi_bmask);
5975 phba->sli4_hba.rpi_bmask = NULL;
5969 err_exit: 5976 err_exit:
5970 return rc; 5977 return rc;
5971} 5978}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 394fe1338d09..dcb33f4fa687 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -393,6 +393,7 @@ struct MPT3SAS_TARGET {
393 * @eedp_enable: eedp support enable bit 393 * @eedp_enable: eedp support enable bit
394 * @eedp_type: 0(type_1), 1(type_2), 2(type_3) 394 * @eedp_type: 0(type_1), 1(type_2), 2(type_3)
395 * @eedp_block_length: block size 395 * @eedp_block_length: block size
396 * @ata_command_pending: SATL passthrough outstanding for device
396 */ 397 */
397struct MPT3SAS_DEVICE { 398struct MPT3SAS_DEVICE {
398 struct MPT3SAS_TARGET *sas_target; 399 struct MPT3SAS_TARGET *sas_target;
@@ -404,6 +405,17 @@ struct MPT3SAS_DEVICE {
404 u8 ignore_delay_remove; 405 u8 ignore_delay_remove;
405 /* Iopriority Command Handling */ 406 /* Iopriority Command Handling */
406 u8 ncq_prio_enable; 407 u8 ncq_prio_enable;
408 /*
409 * Bug workaround for SATL handling: the mpt2/3sas firmware
410 * doesn't return BUSY or TASK_SET_FULL for subsequent
411 * commands while a SATL pass through is in operation as the
412 * spec requires, it simply does nothing with them until the
413 * pass through completes, causing them possibly to timeout if
414 * the passthrough is a long executing command (like format or
415 * secure erase). This variable allows us to do the right
416 * thing while a SATL command is pending.
417 */
418 unsigned long ata_command_pending;
407 419
408}; 420};
409 421
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index b5c966e319d3..75f3fce1c867 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3899,9 +3899,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
3899 } 3899 }
3900} 3900}
3901 3901
3902static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) 3902static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
3903{ 3903{
3904 return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); 3904 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
3905
3906 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
3907 return 0;
3908
3909 if (pending)
3910 return test_and_set_bit(0, &priv->ata_command_pending);
3911
3912 clear_bit(0, &priv->ata_command_pending);
3913 return 0;
3905} 3914}
3906 3915
3907/** 3916/**
@@ -3925,9 +3934,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
3925 if (!scmd) 3934 if (!scmd)
3926 continue; 3935 continue;
3927 count++; 3936 count++;
3928 if (ata_12_16_cmd(scmd)) 3937 _scsih_set_satl_pending(scmd, false);
3929 scsi_internal_device_unblock(scmd->device,
3930 SDEV_RUNNING);
3931 mpt3sas_base_free_smid(ioc, smid); 3938 mpt3sas_base_free_smid(ioc, smid);
3932 scsi_dma_unmap(scmd); 3939 scsi_dma_unmap(scmd);
3933 if (ioc->pci_error_recovery) 3940 if (ioc->pci_error_recovery)
@@ -4063,13 +4070,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4063 if (ioc->logging_level & MPT_DEBUG_SCSI) 4070 if (ioc->logging_level & MPT_DEBUG_SCSI)
4064 scsi_print_command(scmd); 4071 scsi_print_command(scmd);
4065 4072
4066 /*
4067 * Lock the device for any subsequent command until command is
4068 * done.
4069 */
4070 if (ata_12_16_cmd(scmd))
4071 scsi_internal_device_block(scmd->device);
4072
4073 sas_device_priv_data = scmd->device->hostdata; 4073 sas_device_priv_data = scmd->device->hostdata;
4074 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 4074 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
4075 scmd->result = DID_NO_CONNECT << 16; 4075 scmd->result = DID_NO_CONNECT << 16;
@@ -4083,6 +4083,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4083 return 0; 4083 return 0;
4084 } 4084 }
4085 4085
4086 /*
4087 * Bug work around for firmware SATL handling. The loop
4088 * is based on atomic operations and ensures consistency
4089 * since we're lockless at this point
4090 */
4091 do {
4092 if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
4093 scmd->result = SAM_STAT_BUSY;
4094 scmd->scsi_done(scmd);
4095 return 0;
4096 }
4097 } while (_scsih_set_satl_pending(scmd, true));
4098
4086 sas_target_priv_data = sas_device_priv_data->sas_target; 4099 sas_target_priv_data = sas_device_priv_data->sas_target;
4087 4100
4088 /* invalid device handle */ 4101 /* invalid device handle */
@@ -4650,8 +4663,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4650 if (scmd == NULL) 4663 if (scmd == NULL)
4651 return 1; 4664 return 1;
4652 4665
4653 if (ata_12_16_cmd(scmd)) 4666 _scsih_set_satl_pending(scmd, false);
4654 scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
4655 4667
4656 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4668 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4657 4669
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 47eb4d545d13..f201f4099620 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -243,12 +243,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
243 struct qla_hw_data *ha = vha->hw; 243 struct qla_hw_data *ha = vha->hw;
244 ssize_t rval = 0; 244 ssize_t rval = 0;
245 245
246 mutex_lock(&ha->optrom_mutex);
247
246 if (ha->optrom_state != QLA_SREADING) 248 if (ha->optrom_state != QLA_SREADING)
247 return 0; 249 goto out;
248 250
249 mutex_lock(&ha->optrom_mutex);
250 rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, 251 rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
251 ha->optrom_region_size); 252 ha->optrom_region_size);
253
254out:
252 mutex_unlock(&ha->optrom_mutex); 255 mutex_unlock(&ha->optrom_mutex);
253 256
254 return rval; 257 return rval;
@@ -263,14 +266,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
263 struct device, kobj))); 266 struct device, kobj)));
264 struct qla_hw_data *ha = vha->hw; 267 struct qla_hw_data *ha = vha->hw;
265 268
266 if (ha->optrom_state != QLA_SWRITING) 269 mutex_lock(&ha->optrom_mutex);
270
271 if (ha->optrom_state != QLA_SWRITING) {
272 mutex_unlock(&ha->optrom_mutex);
267 return -EINVAL; 273 return -EINVAL;
268 if (off > ha->optrom_region_size) 274 }
275 if (off > ha->optrom_region_size) {
276 mutex_unlock(&ha->optrom_mutex);
269 return -ERANGE; 277 return -ERANGE;
278 }
270 if (off + count > ha->optrom_region_size) 279 if (off + count > ha->optrom_region_size)
271 count = ha->optrom_region_size - off; 280 count = ha->optrom_region_size - off;
272 281
273 mutex_lock(&ha->optrom_mutex);
274 memcpy(&ha->optrom_buffer[off], buf, count); 282 memcpy(&ha->optrom_buffer[off], buf, count);
275 mutex_unlock(&ha->optrom_mutex); 283 mutex_unlock(&ha->optrom_mutex);
276 284
@@ -753,7 +761,6 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
753 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 761 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
754 struct device, kobj))); 762 struct device, kobj)));
755 int type; 763 int type;
756 int rval = 0;
757 port_id_t did; 764 port_id_t did;
758 765
759 type = simple_strtol(buf, NULL, 10); 766 type = simple_strtol(buf, NULL, 10);
@@ -767,7 +774,7 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
767 774
768 ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type); 775 ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
769 776
770 rval = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did); 777 qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
771 return count; 778 return count;
772} 779}
773 780
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index f7df01b76714..5b1287a63c49 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1556,7 +1556,8 @@ typedef struct {
1556struct atio { 1556struct atio {
1557 uint8_t entry_type; /* Entry type. */ 1557 uint8_t entry_type; /* Entry type. */
1558 uint8_t entry_count; /* Entry count. */ 1558 uint8_t entry_count; /* Entry count. */
1559 uint8_t data[58]; 1559 __le16 attr_n_length;
1560 uint8_t data[56];
1560 uint32_t signature; 1561 uint32_t signature;
1561#define ATIO_PROCESSED 0xDEADDEAD /* Signature */ 1562#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
1562}; 1563};
@@ -2732,7 +2733,7 @@ struct isp_operations {
2732#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7) 2733#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
2733#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1) 2734#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1)
2734 2735
2735#define QLA_MSIX_DEFAULT 0x00 2736#define QLA_BASE_VECTORS 2 /* default + RSP */
2736#define QLA_MSIX_RSP_Q 0x01 2737#define QLA_MSIX_RSP_Q 0x01
2737#define QLA_ATIO_VECTOR 0x02 2738#define QLA_ATIO_VECTOR 0x02
2738#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03 2739#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03
@@ -2754,7 +2755,6 @@ struct qla_msix_entry {
2754 uint16_t entry; 2755 uint16_t entry;
2755 char name[30]; 2756 char name[30];
2756 void *handle; 2757 void *handle;
2757 struct irq_affinity_notify irq_notify;
2758 int cpuid; 2758 int cpuid;
2759}; 2759};
2760 2760
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 632d5f30386a..7b6317c8c2e9 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1191,7 +1191,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
1191 1191
1192 /* Wait for soft-reset to complete. */ 1192 /* Wait for soft-reset to complete. */
1193 RD_REG_DWORD(&reg->ctrl_status); 1193 RD_REG_DWORD(&reg->ctrl_status);
1194 for (cnt = 0; cnt < 6000000; cnt++) { 1194 for (cnt = 0; cnt < 60; cnt++) {
1195 barrier(); 1195 barrier();
1196 if ((RD_REG_DWORD(&reg->ctrl_status) & 1196 if ((RD_REG_DWORD(&reg->ctrl_status) &
1197 CSRX_ISP_SOFT_RESET) == 0) 1197 CSRX_ISP_SOFT_RESET) == 0)
@@ -1234,7 +1234,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
1234 RD_REG_DWORD(&reg->hccr); 1234 RD_REG_DWORD(&reg->hccr);
1235 1235
1236 RD_REG_WORD(&reg->mailbox0); 1236 RD_REG_WORD(&reg->mailbox0);
1237 for (cnt = 6000000; RD_REG_WORD(&reg->mailbox0) != 0 && 1237 for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
1238 rval == QLA_SUCCESS; cnt--) { 1238 rval == QLA_SUCCESS; cnt--) {
1239 barrier(); 1239 barrier();
1240 if (cnt) 1240 if (cnt)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 5093ca9b02ec..dc88a09f9043 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -19,10 +19,6 @@ static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
19static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 19static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
20static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 20static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
21 sts_entry_t *); 21 sts_entry_t *);
22static void qla_irq_affinity_notify(struct irq_affinity_notify *,
23 const cpumask_t *);
24static void qla_irq_affinity_release(struct kref *);
25
26 22
27/** 23/**
28 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 24 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -2496,6 +2492,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2496 if (pkt->entry_status & RF_BUSY) 2492 if (pkt->entry_status & RF_BUSY)
2497 res = DID_BUS_BUSY << 16; 2493 res = DID_BUS_BUSY << 16;
2498 2494
2495 if (pkt->entry_type == NOTIFY_ACK_TYPE &&
2496 pkt->handle == QLA_TGT_SKIP_HANDLE)
2497 return;
2498
2499 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2499 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2500 if (sp) { 2500 if (sp) {
2501 sp->done(ha, sp, res); 2501 sp->done(ha, sp, res);
@@ -2572,14 +2572,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2572 if (!vha->flags.online) 2572 if (!vha->flags.online)
2573 return; 2573 return;
2574 2574
2575 if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
2576 /* if kernel does not notify qla of IRQ's CPU change,
2577 * then set it here.
2578 */
2579 rsp->msix->cpuid = smp_processor_id();
2580 ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
2581 }
2582
2583 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2575 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2584 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 2576 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2585 2577
@@ -3018,13 +3010,20 @@ static struct qla_init_msix_entry qla82xx_msix_entries[] = {
3018static int 3010static int
3019qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 3011qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3020{ 3012{
3021#define MIN_MSIX_COUNT 2
3022 int i, ret; 3013 int i, ret;
3023 struct qla_msix_entry *qentry; 3014 struct qla_msix_entry *qentry;
3024 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3015 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3016 struct irq_affinity desc = {
3017 .pre_vectors = QLA_BASE_VECTORS,
3018 };
3019
3020 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha))
3021 desc.pre_vectors++;
3022
3023 ret = pci_alloc_irq_vectors_affinity(ha->pdev, QLA_BASE_VECTORS,
3024 ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
3025 &desc);
3025 3026
3026 ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
3027 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3028 if (ret < 0) { 3027 if (ret < 0) {
3029 ql_log(ql_log_fatal, vha, 0x00c7, 3028 ql_log(ql_log_fatal, vha, 0x00c7,
3030 "MSI-X: Failed to enable support, " 3029 "MSI-X: Failed to enable support, "
@@ -3069,13 +3068,10 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3069 qentry->have_irq = 0; 3068 qentry->have_irq = 0;
3070 qentry->in_use = 0; 3069 qentry->in_use = 0;
3071 qentry->handle = NULL; 3070 qentry->handle = NULL;
3072 qentry->irq_notify.notify = qla_irq_affinity_notify;
3073 qentry->irq_notify.release = qla_irq_affinity_release;
3074 qentry->cpuid = -1;
3075 } 3071 }
3076 3072
3077 /* Enable MSI-X vectors for the base queue */ 3073 /* Enable MSI-X vectors for the base queue */
3078 for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) { 3074 for (i = 0; i < QLA_BASE_VECTORS; i++) {
3079 qentry = &ha->msix_entries[i]; 3075 qentry = &ha->msix_entries[i];
3080 qentry->handle = rsp; 3076 qentry->handle = rsp;
3081 rsp->msix = qentry; 3077 rsp->msix = qentry;
@@ -3093,18 +3089,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3093 goto msix_register_fail; 3089 goto msix_register_fail;
3094 qentry->have_irq = 1; 3090 qentry->have_irq = 1;
3095 qentry->in_use = 1; 3091 qentry->in_use = 1;
3096
3097 /* Register for CPU affinity notification. */
3098 irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
3099
3100 /* Schedule work (ie. trigger a notification) to read cpu
3101 * mask for this specific irq.
3102 * kref_get is required because
3103 * irq_affinity_notify() will do
3104 * kref_put().
3105 */
3106 kref_get(&qentry->irq_notify.kref);
3107 schedule_work(&qentry->irq_notify.work);
3108 } 3092 }
3109 3093
3110 /* 3094 /*
@@ -3301,49 +3285,3 @@ int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
3301 msix->handle = qpair; 3285 msix->handle = qpair;
3302 return ret; 3286 return ret;
3303} 3287}
3304
3305
3306/* irq_set_affinity/irqbalance will trigger notification of cpu mask update */
3307static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
3308 const cpumask_t *mask)
3309{
3310 struct qla_msix_entry *e =
3311 container_of(notify, struct qla_msix_entry, irq_notify);
3312 struct qla_hw_data *ha;
3313 struct scsi_qla_host *base_vha;
3314 struct rsp_que *rsp = e->handle;
3315
3316 /* user is recommended to set mask to just 1 cpu */
3317 e->cpuid = cpumask_first(mask);
3318
3319 ha = rsp->hw;
3320 base_vha = pci_get_drvdata(ha->pdev);
3321
3322 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3323 "%s: host %ld : vector %d cpu %d \n", __func__,
3324 base_vha->host_no, e->vector, e->cpuid);
3325
3326 if (e->have_irq) {
3327 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
3328 (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
3329 ha->tgt.rspq_vector_cpuid = e->cpuid;
3330 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3331 "%s: host%ld: rspq vector %d cpu %d runtime change\n",
3332 __func__, base_vha->host_no, e->vector, e->cpuid);
3333 }
3334 }
3335}
3336
3337static void qla_irq_affinity_release(struct kref *ref)
3338{
3339 struct irq_affinity_notify *notify =
3340 container_of(ref, struct irq_affinity_notify, kref);
3341 struct qla_msix_entry *e =
3342 container_of(notify, struct qla_msix_entry, irq_notify);
3343 struct rsp_que *rsp = e->handle;
3344 struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
3345
3346 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3347 "%s: host%ld: vector %d cpu %d\n", __func__,
3348 base_vha->host_no, e->vector, e->cpuid);
3349}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 2819ceb96041..67f64db390b0 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -10,7 +10,7 @@
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/gfp.h> 11#include <linux/gfp.h>
12 12
13struct rom_cmd { 13static struct rom_cmd {
14 uint16_t cmd; 14 uint16_t cmd;
15} rom_cmds[] = { 15} rom_cmds[] = {
16 { MBC_LOAD_RAM }, 16 { MBC_LOAD_RAM },
@@ -101,12 +101,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
101 return QLA_FUNCTION_TIMEOUT; 101 return QLA_FUNCTION_TIMEOUT;
102 } 102 }
103 103
104 /* if PCI error, then avoid mbx processing.*/ 104 /* if PCI error, then avoid mbx processing.*/
105 if (test_bit(PCI_ERR, &base_vha->dpc_flags)) { 105 if (test_bit(PCI_ERR, &base_vha->dpc_flags)) {
106 ql_log(ql_log_warn, vha, 0x1191, 106 ql_log(ql_log_warn, vha, 0x1191,
107 "PCI error, exiting.\n"); 107 "PCI error, exiting.\n");
108 return QLA_FUNCTION_TIMEOUT; 108 return QLA_FUNCTION_TIMEOUT;
109 } 109 }
110 110
111 reg = ha->iobase; 111 reg = ha->iobase;
112 io_lock_on = base_vha->flags.init_done; 112 io_lock_on = base_vha->flags.init_done;
@@ -323,20 +323,33 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
323 } 323 }
324 } else { 324 } else {
325 325
326 uint16_t mb0; 326 uint16_t mb[8];
327 uint32_t ictrl; 327 uint32_t ictrl, host_status, hccr;
328 uint16_t w; 328 uint16_t w;
329 329
330 if (IS_FWI2_CAPABLE(ha)) { 330 if (IS_FWI2_CAPABLE(ha)) {
331 mb0 = RD_REG_WORD(&reg->isp24.mailbox0); 331 mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
332 mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
333 mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
334 mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
335 mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
332 ictrl = RD_REG_DWORD(&reg->isp24.ictrl); 336 ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
337 host_status = RD_REG_DWORD(&reg->isp24.host_status);
338 hccr = RD_REG_DWORD(&reg->isp24.hccr);
339
340 ql_log(ql_log_warn, vha, 0x1119,
341 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
342 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
343 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
344 mb[7], host_status, hccr);
345
333 } else { 346 } else {
334 mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0); 347 mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
335 ictrl = RD_REG_WORD(&reg->isp.ictrl); 348 ictrl = RD_REG_WORD(&reg->isp.ictrl);
349 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
350 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
351 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
336 } 352 }
337 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
338 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
339 "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
340 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); 353 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
341 354
342 /* Capture FW dump only, if PCI device active */ 355 /* Capture FW dump only, if PCI device active */
@@ -684,7 +697,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
684 mbx_cmd_t mc; 697 mbx_cmd_t mc;
685 mbx_cmd_t *mcp = &mc; 698 mbx_cmd_t *mcp = &mc;
686 struct qla_hw_data *ha = vha->hw; 699 struct qla_hw_data *ha = vha->hw;
687 int configured_count;
688 700
689 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, 701 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
690 "Entered %s.\n", __func__); 702 "Entered %s.\n", __func__);
@@ -707,7 +719,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
707 /*EMPTY*/ 719 /*EMPTY*/
708 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval); 720 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
709 } else { 721 } else {
710 configured_count = mcp->mb[11];
711 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 722 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
712 "Done %s.\n", __func__); 723 "Done %s.\n", __func__);
713 } 724 }
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 54380b434b30..0a1723cc08cf 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -42,6 +42,11 @@ static int qla82xx_crb_table_initialized;
42 (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ 42 (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
43 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20) 43 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
44 44
45const int MD_MIU_TEST_AGT_RDDATA[] = {
46 0x410000A8, 0x410000AC,
47 0x410000B8, 0x410000BC
48};
49
45static void qla82xx_crb_addr_transform_setup(void) 50static void qla82xx_crb_addr_transform_setup(void)
46{ 51{
47 qla82xx_crb_addr_transform(XDMA); 52 qla82xx_crb_addr_transform(XDMA);
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 6201dce3553b..77624eac95a4 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1176,8 +1176,7 @@ struct qla82xx_md_entry_queue {
1176#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 1176#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
1177#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 1177#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
1178 1178
1179static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, 1179extern const int MD_MIU_TEST_AGT_RDDATA[4];
1180 0x410000B8, 0x410000BC };
1181 1180
1182#define CRB_NIU_XG_PAUSE_CTL_P0 0x1 1181#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
1183#define CRB_NIU_XG_PAUSE_CTL_P1 0x8 1182#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 007192d7bad8..dc1ec9b61027 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -15,6 +15,23 @@
15 15
16#define TIMEOUT_100_MS 100 16#define TIMEOUT_100_MS 100
17 17
18static const uint32_t qla8044_reg_tbl[] = {
19 QLA8044_PEG_HALT_STATUS1,
20 QLA8044_PEG_HALT_STATUS2,
21 QLA8044_PEG_ALIVE_COUNTER,
22 QLA8044_CRB_DRV_ACTIVE,
23 QLA8044_CRB_DEV_STATE,
24 QLA8044_CRB_DRV_STATE,
25 QLA8044_CRB_DRV_SCRATCH,
26 QLA8044_CRB_DEV_PART_INFO1,
27 QLA8044_CRB_IDC_VER_MAJOR,
28 QLA8044_FW_VER_MAJOR,
29 QLA8044_FW_VER_MINOR,
30 QLA8044_FW_VER_SUB,
31 QLA8044_CMDPEG_STATE,
32 QLA8044_ASIC_TEMP,
33};
34
18/* 8044 Flash Read/Write functions */ 35/* 8044 Flash Read/Write functions */
19uint32_t 36uint32_t
20qla8044_rd_reg(struct qla_hw_data *ha, ulong addr) 37qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h
index 02fe3c4cdf55..83c1b7e17c80 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.h
+++ b/drivers/scsi/qla2xxx/qla_nx2.h
@@ -535,23 +535,6 @@ enum qla_regs {
535#define CRB_CMDPEG_CHECK_RETRY_COUNT 60 535#define CRB_CMDPEG_CHECK_RETRY_COUNT 60
536#define CRB_CMDPEG_CHECK_DELAY 500 536#define CRB_CMDPEG_CHECK_DELAY 500
537 537
538static const uint32_t qla8044_reg_tbl[] = {
539 QLA8044_PEG_HALT_STATUS1,
540 QLA8044_PEG_HALT_STATUS2,
541 QLA8044_PEG_ALIVE_COUNTER,
542 QLA8044_CRB_DRV_ACTIVE,
543 QLA8044_CRB_DEV_STATE,
544 QLA8044_CRB_DRV_STATE,
545 QLA8044_CRB_DRV_SCRATCH,
546 QLA8044_CRB_DEV_PART_INFO1,
547 QLA8044_CRB_IDC_VER_MAJOR,
548 QLA8044_FW_VER_MAJOR,
549 QLA8044_FW_VER_MINOR,
550 QLA8044_FW_VER_SUB,
551 QLA8044_CMDPEG_STATE,
552 QLA8044_ASIC_TEMP,
553};
554
555/* MiniDump Structures */ 538/* MiniDump Structures */
556 539
557/* Driver_code is for driver to write some info about the entry 540/* Driver_code is for driver to write some info about the entry
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8521cfe302e9..0a000ecf0881 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -466,7 +466,7 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
466 continue; 466 continue;
467 467
468 rsp = ha->rsp_q_map[cnt]; 468 rsp = ha->rsp_q_map[cnt];
469 clear_bit(cnt, ha->req_qid_map); 469 clear_bit(cnt, ha->rsp_qid_map);
470 ha->rsp_q_map[cnt] = NULL; 470 ha->rsp_q_map[cnt] = NULL;
471 spin_unlock_irqrestore(&ha->hardware_lock, flags); 471 spin_unlock_irqrestore(&ha->hardware_lock, flags);
472 qla2x00_free_rsp_que(ha, rsp); 472 qla2x00_free_rsp_que(ha, rsp);
@@ -3662,7 +3662,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3662 sizeof(struct ct6_dsd), 0, 3662 sizeof(struct ct6_dsd), 0,
3663 SLAB_HWCACHE_ALIGN, NULL); 3663 SLAB_HWCACHE_ALIGN, NULL);
3664 if (!ctx_cachep) 3664 if (!ctx_cachep)
3665 goto fail_free_gid_list; 3665 goto fail_free_srb_mempool;
3666 } 3666 }
3667 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, 3667 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
3668 ctx_cachep); 3668 ctx_cachep);
@@ -3815,7 +3815,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3815 ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long), 3815 ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
3816 GFP_KERNEL); 3816 GFP_KERNEL);
3817 if (!ha->loop_id_map) 3817 if (!ha->loop_id_map)
3818 goto fail_async_pd; 3818 goto fail_loop_id_map;
3819 else { 3819 else {
3820 qla2x00_set_reserved_loop_ids(ha); 3820 qla2x00_set_reserved_loop_ids(ha);
3821 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, 3821 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
@@ -3824,6 +3824,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3824 3824
3825 return 0; 3825 return 0;
3826 3826
3827fail_loop_id_map:
3828 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
3827fail_async_pd: 3829fail_async_pd:
3828 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); 3830 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
3829fail_ex_init_cb: 3831fail_ex_init_cb:
@@ -3851,6 +3853,10 @@ fail_free_ms_iocb:
3851 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 3853 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
3852 ha->ms_iocb = NULL; 3854 ha->ms_iocb = NULL;
3853 ha->ms_iocb_dma = 0; 3855 ha->ms_iocb_dma = 0;
3856
3857 if (ha->sns_cmd)
3858 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
3859 ha->sns_cmd, ha->sns_cmd_dma);
3854fail_dma_pool: 3860fail_dma_pool:
3855 if (IS_QLA82XX(ha) || ql2xenabledif) { 3861 if (IS_QLA82XX(ha) || ql2xenabledif) {
3856 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 3862 dma_pool_destroy(ha->fcp_cmnd_dma_pool);
@@ -3868,10 +3874,12 @@ fail_free_nvram:
3868 kfree(ha->nvram); 3874 kfree(ha->nvram);
3869 ha->nvram = NULL; 3875 ha->nvram = NULL;
3870fail_free_ctx_mempool: 3876fail_free_ctx_mempool:
3871 mempool_destroy(ha->ctx_mempool); 3877 if (ha->ctx_mempool)
3878 mempool_destroy(ha->ctx_mempool);
3872 ha->ctx_mempool = NULL; 3879 ha->ctx_mempool = NULL;
3873fail_free_srb_mempool: 3880fail_free_srb_mempool:
3874 mempool_destroy(ha->srb_mempool); 3881 if (ha->srb_mempool)
3882 mempool_destroy(ha->srb_mempool);
3875 ha->srb_mempool = NULL; 3883 ha->srb_mempool = NULL;
3876fail_free_gid_list: 3884fail_free_gid_list:
3877 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 3885 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index bff9689f5ca9..e4fda84b959e 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -668,11 +668,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
668{ 668{
669 struct qla_hw_data *ha = vha->hw; 669 struct qla_hw_data *ha = vha->hw;
670 struct qla_tgt_sess *sess = NULL; 670 struct qla_tgt_sess *sess = NULL;
671 uint32_t unpacked_lun, lun = 0;
672 uint16_t loop_id; 671 uint16_t loop_id;
673 int res = 0; 672 int res = 0;
674 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; 673 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
675 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
676 unsigned long flags; 674 unsigned long flags;
677 675
678 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 676 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
@@ -725,11 +723,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
725 "loop_id %d)\n", vha->host_no, sess, sess->port_name, 723 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
726 mcmd, loop_id); 724 mcmd, loop_id);
727 725
728 lun = a->u.isp24.fcp_cmnd.lun; 726 return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
729 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
730
731 return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
732 iocb, QLA24XX_MGMT_SEND_NACK);
733} 727}
734 728
735/* ha->tgt.sess_lock supposed to be held on entry */ 729/* ha->tgt.sess_lock supposed to be held on entry */
@@ -3067,7 +3061,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3067 3061
3068 pkt->entry_type = NOTIFY_ACK_TYPE; 3062 pkt->entry_type = NOTIFY_ACK_TYPE;
3069 pkt->entry_count = 1; 3063 pkt->entry_count = 1;
3070 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 3064 pkt->handle = QLA_TGT_SKIP_HANDLE;
3071 3065
3072 nack = (struct nack_to_isp *)pkt; 3066 nack = (struct nack_to_isp *)pkt;
3073 nack->ox_id = ntfy->ox_id; 3067 nack->ox_id = ntfy->ox_id;
@@ -3110,6 +3104,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3110#if 0 /* Todo */ 3104#if 0 /* Todo */
3111 if (rc == -ENOMEM) 3105 if (rc == -ENOMEM)
3112 qlt_alloc_qfull_cmd(vha, imm, 0, 0); 3106 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
3107#else
3108 if (rc) {
3109 }
3113#endif 3110#endif
3114 goto done; 3111 goto done;
3115 } 3112 }
@@ -6457,12 +6454,29 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6457 if (!vha->flags.online) 6454 if (!vha->flags.online)
6458 return; 6455 return;
6459 6456
6460 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { 6457 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
6458 fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
6461 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6459 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6462 cnt = pkt->u.raw.entry_count; 6460 cnt = pkt->u.raw.entry_count;
6463 6461
6464 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt, 6462 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
6465 ha_locked); 6463 /*
6464 * This packet is corrupted. The header + payload
6465 * can not be trusted. There is no point in passing
6466 * it further up.
6467 */
6468 ql_log(ql_log_warn, vha, 0xffff,
6469 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
6470 pkt->u.isp24.fcp_hdr.s_id,
6471 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
6472 le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
6473
6474 adjust_corrupted_atio(pkt);
6475 qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0);
6476 } else {
6477 qlt_24xx_atio_pkt_all_vps(vha,
6478 (struct atio_from_isp *)pkt, ha_locked);
6479 }
6466 6480
6467 for (i = 0; i < cnt; i++) { 6481 for (i = 0; i < cnt; i++) {
6468 ha->tgt.atio_ring_index++; 6482 ha->tgt.atio_ring_index++;
@@ -6545,6 +6559,13 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6545 6559
6546 /* Disable Full Login after LIP */ 6560 /* Disable Full Login after LIP */
6547 nv->host_p &= cpu_to_le32(~BIT_10); 6561 nv->host_p &= cpu_to_le32(~BIT_10);
6562
6563 /*
6564 * clear BIT 15 explicitly as we have seen at least
6565 * a couple of instances where this was set and this
6566 * was causing the firmware to not be initialized.
6567 */
6568 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6548 /* Enable target PRLI control */ 6569 /* Enable target PRLI control */
6549 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 6570 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6550 } else { 6571 } else {
@@ -6560,9 +6581,6 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6560 return; 6581 return;
6561 } 6582 }
6562 6583
6563 /* out-of-order frames reassembly */
6564 nv->firmware_options_3 |= BIT_6|BIT_9;
6565
6566 if (ha->tgt.enable_class_2) { 6584 if (ha->tgt.enable_class_2) {
6567 if (vha->flags.init_done) 6585 if (vha->flags.init_done)
6568 fc_host_supported_classes(vha->host) = 6586 fc_host_supported_classes(vha->host) =
@@ -6629,11 +6647,17 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6629 /* Disable ini mode, if requested */ 6647 /* Disable ini mode, if requested */
6630 if (!qla_ini_mode_enabled(vha)) 6648 if (!qla_ini_mode_enabled(vha))
6631 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 6649 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6632
6633 /* Disable Full Login after LIP */ 6650 /* Disable Full Login after LIP */
6634 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6651 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6635 /* Enable initial LIP */ 6652 /* Enable initial LIP */
6636 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 6653 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6654 /*
6655 * clear BIT 15 explicitly as we have seen at
6656 * least a couple of instances where this was set
6657 * and this was causing the firmware to not be
6658 * initialized.
6659 */
6660 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6637 if (ql2xtgt_tape_enable) 6661 if (ql2xtgt_tape_enable)
6638 /* Enable FC tape support */ 6662 /* Enable FC tape support */
6639 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6663 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
@@ -6658,9 +6682,6 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6658 return; 6682 return;
6659 } 6683 }
6660 6684
6661 /* out-of-order frames reassembly */
6662 nv->firmware_options_3 |= BIT_6|BIT_9;
6663
6664 if (ha->tgt.enable_class_2) { 6685 if (ha->tgt.enable_class_2) {
6665 if (vha->flags.init_done) 6686 if (vha->flags.init_done)
6666 fc_host_supported_classes(vha->host) = 6687 fc_host_supported_classes(vha->host) =
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index f26c5f60eedd..0824a8164a24 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -427,13 +427,33 @@ struct atio_from_isp {
427 struct { 427 struct {
428 uint8_t entry_type; /* Entry type. */ 428 uint8_t entry_type; /* Entry type. */
429 uint8_t entry_count; /* Entry count. */ 429 uint8_t entry_count; /* Entry count. */
430 uint8_t data[58]; 430 __le16 attr_n_length;
431#define FCP_CMD_LENGTH_MASK 0x0fff
432#define FCP_CMD_LENGTH_MIN 0x38
433 uint8_t data[56];
431 uint32_t signature; 434 uint32_t signature;
432#define ATIO_PROCESSED 0xDEADDEAD /* Signature */ 435#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
433 } raw; 436 } raw;
434 } u; 437 } u;
435} __packed; 438} __packed;
436 439
440static inline int fcpcmd_is_corrupted(struct atio *atio)
441{
442 if (atio->entry_type == ATIO_TYPE7 &&
443 (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
444 FCP_CMD_LENGTH_MIN))
445 return 1;
446 else
447 return 0;
448}
449
450/* adjust corrupted atio so we won't trip over the same entry again. */
451static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
452{
453 atio->u.raw.attr_n_length = cpu_to_le16(FCP_CMD_LENGTH_MIN);
454 atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
455}
456
437#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ 457#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
438 458
439/* 459/*
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 36935c9ed669..8a58ef3adab4 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -433,6 +433,18 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
433 count++; 433 count++;
434 } 434 }
435 } 435 }
436 } else if (QLA_TGT_MODE_ENABLED() &&
437 ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
438 struct qla_hw_data *ha = vha->hw;
439 struct atio *atr = ha->tgt.atio_ring;
440
441 if (atr || !buf) {
442 length = ha->tgt.atio_q_length;
443 qla27xx_insert16(0, buf, len);
444 qla27xx_insert16(length, buf, len);
445 qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
446 count++;
447 }
436 } else { 448 } else {
437 ql_dbg(ql_dbg_misc, vha, 0xd026, 449 ql_dbg(ql_dbg_misc, vha, 0xd026,
438 "%s: unknown queue %x\n", __func__, ent->t263.queue_type); 450 "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
@@ -676,6 +688,18 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
676 count++; 688 count++;
677 } 689 }
678 } 690 }
691 } else if (QLA_TGT_MODE_ENABLED() &&
692 ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
693 struct qla_hw_data *ha = vha->hw;
694 struct atio *atr = ha->tgt.atio_ring_ptr;
695
696 if (atr || !buf) {
697 qla27xx_insert16(0, buf, len);
698 qla27xx_insert16(1, buf, len);
699 qla27xx_insert32(ha->tgt.atio_q_in ?
700 readl(ha->tgt.atio_q_in) : 0, buf, len);
701 count++;
702 }
679 } else { 703 } else {
680 ql_dbg(ql_dbg_misc, vha, 0xd02f, 704 ql_dbg(ql_dbg_misc, vha, 0xd02f,
681 "%s: unknown queue %x\n", __func__, ent->t274.queue_type); 705 "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 6643f6fc7795..d925910be761 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1800,7 +1800,7 @@ static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item,
1800{ 1800{
1801 return sprintf(page, 1801 return sprintf(page,
1802 "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on " 1802 "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
1803 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, 1803 UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
1804 utsname()->machine); 1804 utsname()->machine);
1805} 1805}
1806 1806
@@ -1906,7 +1906,7 @@ static int tcm_qla2xxx_register_configfs(void)
1906 int ret; 1906 int ret;
1907 1907
1908 pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on " 1908 pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
1909 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, 1909 UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
1910 utsname()->machine); 1910 utsname()->machine);
1911 1911
1912 ret = target_register_template(&tcm_qla2xxx_ops); 1912 ret = target_register_template(&tcm_qla2xxx_ops);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 37e026a4823d..cf8430be183b 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -1,7 +1,6 @@
1#include <target/target_core_base.h> 1#include <target/target_core_base.h>
2#include <linux/btree.h> 2#include <linux/btree.h>
3 3
4#define TCM_QLA2XXX_VERSION "v0.1"
5/* length of ASCII WWPNs including pad */ 4/* length of ASCII WWPNs including pad */
6#define TCM_QLA2XXX_NAMELEN 32 5#define TCM_QLA2XXX_NAMELEN 32
7/* 6/*
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 1fbb1ecf49f2..0b09638fa39b 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2585,7 +2585,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2585 if (sdp->broken_fua) { 2585 if (sdp->broken_fua) {
2586 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); 2586 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
2587 sdkp->DPOFUA = 0; 2587 sdkp->DPOFUA = 0;
2588 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { 2588 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
2589 !sdkp->device->use_16_for_rw) {
2589 sd_first_printk(KERN_NOTICE, sdkp, 2590 sd_first_printk(KERN_NOTICE, sdkp,
2590 "Uses READ/WRITE(6), disabling FUA\n"); 2591 "Uses READ/WRITE(6), disabling FUA\n");
2591 sdkp->DPOFUA = 0; 2592 sdkp->DPOFUA = 0;
@@ -2768,13 +2769,21 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
2768 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); 2769 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
2769 } 2770 }
2770 2771
2771 sdkp->zoned = (buffer[8] >> 4) & 3; 2772 if (sdkp->device->type == TYPE_ZBC) {
2772 if (sdkp->zoned == 1) 2773 /* Host-managed */
2773 q->limits.zoned = BLK_ZONED_HA;
2774 else if (sdkp->device->type == TYPE_ZBC)
2775 q->limits.zoned = BLK_ZONED_HM; 2774 q->limits.zoned = BLK_ZONED_HM;
2776 else 2775 } else {
2777 q->limits.zoned = BLK_ZONED_NONE; 2776 sdkp->zoned = (buffer[8] >> 4) & 3;
2777 if (sdkp->zoned == 1)
2778 /* Host-aware */
2779 q->limits.zoned = BLK_ZONED_HA;
2780 else
2781 /*
2782 * Treat drive-managed devices as
2783 * regular block devices.
2784 */
2785 q->limits.zoned = BLK_ZONED_NONE;
2786 }
2778 if (blk_queue_is_zoned(q) && sdkp->first_scan) 2787 if (blk_queue_is_zoned(q) && sdkp->first_scan)
2779 sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", 2788 sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
2780 q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware"); 2789 q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 8c9a35c91705..50adabbb5808 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
587 587
588 ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); 588 ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
589 589
590 if (scsi_is_sas_rphy(&sdev->sdev_gendev)) 590 if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent))
591 efd.addr = sas_get_address(sdev); 591 efd.addr = sas_get_address(sdev);
592 592
593 if (efd.addr) { 593 if (efd.addr) {
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 8823cc81ae45..5bb376009d98 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -459,6 +459,7 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
459 459
460 if (IS_ERR(task)) { 460 if (IS_ERR(task)) {
461 dev_err(dev, "can't create rproc_boot thread\n"); 461 dev_err(dev, "can't create rproc_boot thread\n");
462 ret = PTR_ERR(task);
462 goto err_put_rproc; 463 goto err_put_rproc;
463 } 464 }
464 465
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ec4aa252d6e8..2922a9908302 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -378,6 +378,7 @@ config SPI_FSL_SPI
378config SPI_FSL_DSPI 378config SPI_FSL_DSPI
379 tristate "Freescale DSPI controller" 379 tristate "Freescale DSPI controller"
380 select REGMAP_MMIO 380 select REGMAP_MMIO
381 depends on HAS_DMA
381 depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST 382 depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
382 help 383 help
383 This enables support for the Freescale DSPI controller in master 384 This enables support for the Freescale DSPI controller in master
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index e89da0af45d2..0314c6b9e044 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -800,7 +800,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
800 struct spi_master *master; 800 struct spi_master *master;
801 struct a3700_spi *spi; 801 struct a3700_spi *spi;
802 u32 num_cs = 0; 802 u32 num_cs = 0;
803 int ret = 0; 803 int irq, ret = 0;
804 804
805 master = spi_alloc_master(dev, sizeof(*spi)); 805 master = spi_alloc_master(dev, sizeof(*spi));
806 if (!master) { 806 if (!master) {
@@ -825,7 +825,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
825 master->unprepare_message = a3700_spi_unprepare_message; 825 master->unprepare_message = a3700_spi_unprepare_message;
826 master->set_cs = a3700_spi_set_cs; 826 master->set_cs = a3700_spi_set_cs;
827 master->flags = SPI_MASTER_HALF_DUPLEX; 827 master->flags = SPI_MASTER_HALF_DUPLEX;
828 master->mode_bits |= (SPI_RX_DUAL | SPI_RX_DUAL | 828 master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
829 SPI_RX_QUAD | SPI_TX_QUAD); 829 SPI_RX_QUAD | SPI_TX_QUAD);
830 830
831 platform_set_drvdata(pdev, master); 831 platform_set_drvdata(pdev, master);
@@ -846,12 +846,13 @@ static int a3700_spi_probe(struct platform_device *pdev)
846 goto error; 846 goto error;
847 } 847 }
848 848
849 spi->irq = platform_get_irq(pdev, 0); 849 irq = platform_get_irq(pdev, 0);
850 if (spi->irq < 0) { 850 if (irq < 0) {
851 dev_err(dev, "could not get irq: %d\n", spi->irq); 851 dev_err(dev, "could not get irq: %d\n", irq);
852 ret = -ENXIO; 852 ret = -ENXIO;
853 goto error; 853 goto error;
854 } 854 }
855 spi->irq = irq;
855 856
856 init_completion(&spi->done); 857 init_completion(&spi->done);
857 858
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 319225d7e761..6ab4c7700228 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -494,7 +494,8 @@ static int spi_engine_probe(struct platform_device *pdev)
494 SPI_ENGINE_VERSION_MAJOR(version), 494 SPI_ENGINE_VERSION_MAJOR(version),
495 SPI_ENGINE_VERSION_MINOR(version), 495 SPI_ENGINE_VERSION_MINOR(version),
496 SPI_ENGINE_VERSION_PATCH(version)); 496 SPI_ENGINE_VERSION_PATCH(version));
497 return -ENODEV; 497 ret = -ENODEV;
498 goto err_put_master;
498 } 499 }
499 500
500 spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk"); 501 spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index d36c11b73a35..02fb96797ac8 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -646,7 +646,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
646 buf = t->rx_buf; 646 buf = t->rx_buf;
647 t->rx_dma = dma_map_single(&spi->dev, buf, 647 t->rx_dma = dma_map_single(&spi->dev, buf,
648 t->len, DMA_FROM_DEVICE); 648 t->len, DMA_FROM_DEVICE);
649 if (!t->rx_dma) { 649 if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
650 ret = -EFAULT; 650 ret = -EFAULT;
651 goto err_rx_map; 651 goto err_rx_map;
652 } 652 }
@@ -660,7 +660,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
660 buf = (void *)t->tx_buf; 660 buf = (void *)t->tx_buf;
661 t->tx_dma = dma_map_single(&spi->dev, buf, 661 t->tx_dma = dma_map_single(&spi->dev, buf,
662 t->len, DMA_TO_DEVICE); 662 t->len, DMA_TO_DEVICE);
663 if (!t->tx_dma) { 663 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
664 ret = -EFAULT; 664 ret = -EFAULT;
665 goto err_tx_map; 665 goto err_tx_map;
666 } 666 }
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index e31971f91475..837cb8d0bac6 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -274,11 +274,11 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
274static void mid_spi_dma_stop(struct dw_spi *dws) 274static void mid_spi_dma_stop(struct dw_spi *dws)
275{ 275{
276 if (test_bit(TX_BUSY, &dws->dma_chan_busy)) { 276 if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
277 dmaengine_terminate_all(dws->txchan); 277 dmaengine_terminate_sync(dws->txchan);
278 clear_bit(TX_BUSY, &dws->dma_chan_busy); 278 clear_bit(TX_BUSY, &dws->dma_chan_busy);
279 } 279 }
280 if (test_bit(RX_BUSY, &dws->dma_chan_busy)) { 280 if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
281 dmaengine_terminate_all(dws->rxchan); 281 dmaengine_terminate_sync(dws->rxchan);
282 clear_bit(RX_BUSY, &dws->dma_chan_busy); 282 clear_bit(RX_BUSY, &dws->dma_chan_busy);
283 } 283 }
284} 284}
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index b715a26a9148..054012f87567 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -107,7 +107,10 @@ static const struct file_operations dw_spi_regs_ops = {
107 107
108static int dw_spi_debugfs_init(struct dw_spi *dws) 108static int dw_spi_debugfs_init(struct dw_spi *dws)
109{ 109{
110 dws->debugfs = debugfs_create_dir("dw_spi", NULL); 110 char name[128];
111
112 snprintf(name, 128, "dw_spi-%s", dev_name(&dws->master->dev));
113 dws->debugfs = debugfs_create_dir(name, NULL);
111 if (!dws->debugfs) 114 if (!dws->debugfs)
112 return -ENOMEM; 115 return -ENOMEM;
113 116
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index dd7b5b47291d..d6239fa718be 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1690,6 +1690,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1690 pxa2xx_spi_write(drv_data, SSCR1, tmp); 1690 pxa2xx_spi_write(drv_data, SSCR1, tmp);
1691 tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8); 1691 tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
1692 pxa2xx_spi_write(drv_data, SSCR0, tmp); 1692 pxa2xx_spi_write(drv_data, SSCR0, tmp);
1693 break;
1693 default: 1694 default:
1694 tmp = SSCR1_RxTresh(RX_THRESH_DFLT) | 1695 tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
1695 SSCR1_TxTresh(TX_THRESH_DFLT); 1696 SSCR1_TxTresh(TX_THRESH_DFLT);
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 0012ad02e569..1f00eeb0b5a3 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -973,14 +973,16 @@ static const struct sh_msiof_chipdata r8a779x_data = {
973}; 973};
974 974
975static const struct of_device_id sh_msiof_match[] = { 975static const struct of_device_id sh_msiof_match[] = {
976 { .compatible = "renesas,sh-msiof", .data = &sh_data },
977 { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data }, 976 { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
978 { .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data }, 977 { .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data },
979 { .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data }, 978 { .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data },
980 { .compatible = "renesas,msiof-r8a7792", .data = &r8a779x_data }, 979 { .compatible = "renesas,msiof-r8a7792", .data = &r8a779x_data },
981 { .compatible = "renesas,msiof-r8a7793", .data = &r8a779x_data }, 980 { .compatible = "renesas,msiof-r8a7793", .data = &r8a779x_data },
982 { .compatible = "renesas,msiof-r8a7794", .data = &r8a779x_data }, 981 { .compatible = "renesas,msiof-r8a7794", .data = &r8a779x_data },
982 { .compatible = "renesas,rcar-gen2-msiof", .data = &r8a779x_data },
983 { .compatible = "renesas,msiof-r8a7796", .data = &r8a779x_data }, 983 { .compatible = "renesas,msiof-r8a7796", .data = &r8a779x_data },
984 { .compatible = "renesas,rcar-gen3-msiof", .data = &r8a779x_data },
985 { .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */
984 {}, 986 {},
985}; 987};
986MODULE_DEVICE_TABLE(of, sh_msiof_match); 988MODULE_DEVICE_TABLE(of, sh_msiof_match);
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index b811b0fb61b1..4c7796512453 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -118,12 +118,12 @@ struct rockchip_tsadc_chip {
118 void (*control)(void __iomem *reg, bool on); 118 void (*control)(void __iomem *reg, bool on);
119 119
120 /* Per-sensor methods */ 120 /* Per-sensor methods */
121 int (*get_temp)(struct chip_tsadc_table table, 121 int (*get_temp)(const struct chip_tsadc_table *table,
122 int chn, void __iomem *reg, int *temp); 122 int chn, void __iomem *reg, int *temp);
123 void (*set_alarm_temp)(struct chip_tsadc_table table, 123 int (*set_alarm_temp)(const struct chip_tsadc_table *table,
124 int chn, void __iomem *reg, int temp); 124 int chn, void __iomem *reg, int temp);
125 void (*set_tshut_temp)(struct chip_tsadc_table table, 125 int (*set_tshut_temp)(const struct chip_tsadc_table *table,
126 int chn, void __iomem *reg, int temp); 126 int chn, void __iomem *reg, int temp);
127 void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m); 127 void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
128 128
129 /* Per-table methods */ 129 /* Per-table methods */
@@ -317,6 +317,7 @@ static const struct tsadc_table rk3288_code_table[] = {
317 {3452, 115000}, 317 {3452, 115000},
318 {3437, 120000}, 318 {3437, 120000},
319 {3421, 125000}, 319 {3421, 125000},
320 {0, 125000},
320}; 321};
321 322
322static const struct tsadc_table rk3368_code_table[] = { 323static const struct tsadc_table rk3368_code_table[] = {
@@ -397,59 +398,80 @@ static const struct tsadc_table rk3399_code_table[] = {
397 {TSADCV3_DATA_MASK, 125000}, 398 {TSADCV3_DATA_MASK, 125000},
398}; 399};
399 400
400static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table, 401static u32 rk_tsadcv2_temp_to_code(const struct chip_tsadc_table *table,
401 int temp) 402 int temp)
402{ 403{
403 int high, low, mid; 404 int high, low, mid;
404 u32 error = 0; 405 unsigned long num;
406 unsigned int denom;
407 u32 error = table->data_mask;
405 408
406 low = 0; 409 low = 0;
407 high = table.length - 1; 410 high = (table->length - 1) - 1; /* ignore the last check for table */
408 mid = (high + low) / 2; 411 mid = (high + low) / 2;
409 412
410 /* Return mask code data when the temp is over table range */ 413 /* Return mask code data when the temp is over table range */
411 if (temp < table.id[low].temp || temp > table.id[high].temp) { 414 if (temp < table->id[low].temp || temp > table->id[high].temp)
412 error = table.data_mask;
413 goto exit; 415 goto exit;
414 }
415 416
416 while (low <= high) { 417 while (low <= high) {
417 if (temp == table.id[mid].temp) 418 if (temp == table->id[mid].temp)
418 return table.id[mid].code; 419 return table->id[mid].code;
419 else if (temp < table.id[mid].temp) 420 else if (temp < table->id[mid].temp)
420 high = mid - 1; 421 high = mid - 1;
421 else 422 else
422 low = mid + 1; 423 low = mid + 1;
423 mid = (low + high) / 2; 424 mid = (low + high) / 2;
424 } 425 }
425 426
427 /*
428 * The conversion code granularity provided by the table. Let's
429 * assume that the relationship between temperature and
430 * analog value between 2 table entries is linear and interpolate
431 * to produce less granular result.
432 */
433 num = abs(table->id[mid + 1].code - table->id[mid].code);
434 num *= temp - table->id[mid].temp;
435 denom = table->id[mid + 1].temp - table->id[mid].temp;
436
437 switch (table->mode) {
438 case ADC_DECREMENT:
439 return table->id[mid].code - (num / denom);
440 case ADC_INCREMENT:
441 return table->id[mid].code + (num / denom);
442 default:
443 pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
444 return error;
445 }
446
426exit: 447exit:
427 pr_err("Invalid the conversion, error=%d\n", error); 448 pr_err("%s: invalid temperature, temp=%d error=%d\n",
449 __func__, temp, error);
428 return error; 450 return error;
429} 451}
430 452
431static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, 453static int rk_tsadcv2_code_to_temp(const struct chip_tsadc_table *table,
432 int *temp) 454 u32 code, int *temp)
433{ 455{
434 unsigned int low = 1; 456 unsigned int low = 1;
435 unsigned int high = table.length - 1; 457 unsigned int high = table->length - 1;
436 unsigned int mid = (low + high) / 2; 458 unsigned int mid = (low + high) / 2;
437 unsigned int num; 459 unsigned int num;
438 unsigned long denom; 460 unsigned long denom;
439 461
440 WARN_ON(table.length < 2); 462 WARN_ON(table->length < 2);
441 463
442 switch (table.mode) { 464 switch (table->mode) {
443 case ADC_DECREMENT: 465 case ADC_DECREMENT:
444 code &= table.data_mask; 466 code &= table->data_mask;
445 if (code < table.id[high].code) 467 if (code <= table->id[high].code)
446 return -EAGAIN; /* Incorrect reading */ 468 return -EAGAIN; /* Incorrect reading */
447 469
448 while (low <= high) { 470 while (low <= high) {
449 if (code >= table.id[mid].code && 471 if (code >= table->id[mid].code &&
450 code < table.id[mid - 1].code) 472 code < table->id[mid - 1].code)
451 break; 473 break;
452 else if (code < table.id[mid].code) 474 else if (code < table->id[mid].code)
453 low = mid + 1; 475 low = mid + 1;
454 else 476 else
455 high = mid - 1; 477 high = mid - 1;
@@ -458,15 +480,15 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
458 } 480 }
459 break; 481 break;
460 case ADC_INCREMENT: 482 case ADC_INCREMENT:
461 code &= table.data_mask; 483 code &= table->data_mask;
462 if (code < table.id[low].code) 484 if (code < table->id[low].code)
463 return -EAGAIN; /* Incorrect reading */ 485 return -EAGAIN; /* Incorrect reading */
464 486
465 while (low <= high) { 487 while (low <= high) {
466 if (code <= table.id[mid].code && 488 if (code <= table->id[mid].code &&
467 code > table.id[mid - 1].code) 489 code > table->id[mid - 1].code)
468 break; 490 break;
469 else if (code > table.id[mid].code) 491 else if (code > table->id[mid].code)
470 low = mid + 1; 492 low = mid + 1;
471 else 493 else
472 high = mid - 1; 494 high = mid - 1;
@@ -475,7 +497,8 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
475 } 497 }
476 break; 498 break;
477 default: 499 default:
478 pr_err("Invalid the conversion table\n"); 500 pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
501 return -EINVAL;
479 } 502 }
480 503
481 /* 504 /*
@@ -484,10 +507,10 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
484 * temperature between 2 table entries is linear and interpolate 507 * temperature between 2 table entries is linear and interpolate
485 * to produce less granular result. 508 * to produce less granular result.
486 */ 509 */
487 num = table.id[mid].temp - table.id[mid - 1].temp; 510 num = table->id[mid].temp - table->id[mid - 1].temp;
488 num *= abs(table.id[mid - 1].code - code); 511 num *= abs(table->id[mid - 1].code - code);
489 denom = abs(table.id[mid - 1].code - table.id[mid].code); 512 denom = abs(table->id[mid - 1].code - table->id[mid].code);
490 *temp = table.id[mid - 1].temp + (num / denom); 513 *temp = table->id[mid - 1].temp + (num / denom);
491 514
492 return 0; 515 return 0;
493} 516}
@@ -638,7 +661,7 @@ static void rk_tsadcv3_control(void __iomem *regs, bool enable)
638 writel_relaxed(val, regs + TSADCV2_AUTO_CON); 661 writel_relaxed(val, regs + TSADCV2_AUTO_CON);
639} 662}
640 663
641static int rk_tsadcv2_get_temp(struct chip_tsadc_table table, 664static int rk_tsadcv2_get_temp(const struct chip_tsadc_table *table,
642 int chn, void __iomem *regs, int *temp) 665 int chn, void __iomem *regs, int *temp)
643{ 666{
644 u32 val; 667 u32 val;
@@ -648,39 +671,57 @@ static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
648 return rk_tsadcv2_code_to_temp(table, val, temp); 671 return rk_tsadcv2_code_to_temp(table, val, temp);
649} 672}
650 673
651static void rk_tsadcv2_alarm_temp(struct chip_tsadc_table table, 674static int rk_tsadcv2_alarm_temp(const struct chip_tsadc_table *table,
652 int chn, void __iomem *regs, int temp) 675 int chn, void __iomem *regs, int temp)
653{ 676{
654 u32 alarm_value, int_en; 677 u32 alarm_value;
678 u32 int_en, int_clr;
679
680 /*
681 * In some cases, some sensors didn't need the trip points, the
682 * set_trips will pass {-INT_MAX, INT_MAX} to trigger tsadc alarm
683 * in the end, ignore this case and disable the high temperature
684 * interrupt.
685 */
686 if (temp == INT_MAX) {
687 int_clr = readl_relaxed(regs + TSADCV2_INT_EN);
688 int_clr &= ~TSADCV2_INT_SRC_EN(chn);
689 writel_relaxed(int_clr, regs + TSADCV2_INT_EN);
690 return 0;
691 }
655 692
656 /* Make sure the value is valid */ 693 /* Make sure the value is valid */
657 alarm_value = rk_tsadcv2_temp_to_code(table, temp); 694 alarm_value = rk_tsadcv2_temp_to_code(table, temp);
658 if (alarm_value == table.data_mask) 695 if (alarm_value == table->data_mask)
659 return; 696 return -ERANGE;
660 697
661 writel_relaxed(alarm_value & table.data_mask, 698 writel_relaxed(alarm_value & table->data_mask,
662 regs + TSADCV2_COMP_INT(chn)); 699 regs + TSADCV2_COMP_INT(chn));
663 700
664 int_en = readl_relaxed(regs + TSADCV2_INT_EN); 701 int_en = readl_relaxed(regs + TSADCV2_INT_EN);
665 int_en |= TSADCV2_INT_SRC_EN(chn); 702 int_en |= TSADCV2_INT_SRC_EN(chn);
666 writel_relaxed(int_en, regs + TSADCV2_INT_EN); 703 writel_relaxed(int_en, regs + TSADCV2_INT_EN);
704
705 return 0;
667} 706}
668 707
669static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table, 708static int rk_tsadcv2_tshut_temp(const struct chip_tsadc_table *table,
670 int chn, void __iomem *regs, int temp) 709 int chn, void __iomem *regs, int temp)
671{ 710{
672 u32 tshut_value, val; 711 u32 tshut_value, val;
673 712
674 /* Make sure the value is valid */ 713 /* Make sure the value is valid */
675 tshut_value = rk_tsadcv2_temp_to_code(table, temp); 714 tshut_value = rk_tsadcv2_temp_to_code(table, temp);
676 if (tshut_value == table.data_mask) 715 if (tshut_value == table->data_mask)
677 return; 716 return -ERANGE;
678 717
679 writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn)); 718 writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn));
680 719
681 /* TSHUT will be valid */ 720 /* TSHUT will be valid */
682 val = readl_relaxed(regs + TSADCV2_AUTO_CON); 721 val = readl_relaxed(regs + TSADCV2_AUTO_CON);
683 writel_relaxed(val | TSADCV2_AUTO_SRC_EN(chn), regs + TSADCV2_AUTO_CON); 722 writel_relaxed(val | TSADCV2_AUTO_SRC_EN(chn), regs + TSADCV2_AUTO_CON);
723
724 return 0;
684} 725}
685 726
686static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs, 727static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
@@ -883,10 +924,8 @@ static int rockchip_thermal_set_trips(void *_sensor, int low, int high)
883 dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n", 924 dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n",
884 __func__, sensor->id, low, high); 925 __func__, sensor->id, low, high);
885 926
886 tsadc->set_alarm_temp(tsadc->table, 927 return tsadc->set_alarm_temp(&tsadc->table,
887 sensor->id, thermal->regs, high); 928 sensor->id, thermal->regs, high);
888
889 return 0;
890} 929}
891 930
892static int rockchip_thermal_get_temp(void *_sensor, int *out_temp) 931static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
@@ -896,7 +935,7 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
896 const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip; 935 const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip;
897 int retval; 936 int retval;
898 937
899 retval = tsadc->get_temp(tsadc->table, 938 retval = tsadc->get_temp(&tsadc->table,
900 sensor->id, thermal->regs, out_temp); 939 sensor->id, thermal->regs, out_temp);
901 dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n", 940 dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n",
902 sensor->id, *out_temp, retval); 941 sensor->id, *out_temp, retval);
@@ -982,8 +1021,12 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
982 int error; 1021 int error;
983 1022
984 tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); 1023 tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode);
985 tsadc->set_tshut_temp(tsadc->table, id, thermal->regs, 1024
1025 error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs,
986 thermal->tshut_temp); 1026 thermal->tshut_temp);
1027 if (error)
1028 dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n",
1029 __func__, thermal->tshut_temp, error);
987 1030
988 sensor->thermal = thermal; 1031 sensor->thermal = thermal;
989 sensor->id = id; 1032 sensor->id = id;
@@ -1196,9 +1239,13 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
1196 1239
1197 thermal->chip->set_tshut_mode(id, thermal->regs, 1240 thermal->chip->set_tshut_mode(id, thermal->regs,
1198 thermal->tshut_mode); 1241 thermal->tshut_mode);
1199 thermal->chip->set_tshut_temp(thermal->chip->table, 1242
1243 error = thermal->chip->set_tshut_temp(&thermal->chip->table,
1200 id, thermal->regs, 1244 id, thermal->regs,
1201 thermal->tshut_temp); 1245 thermal->tshut_temp);
1246 if (error)
1247 dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n",
1248 __func__, thermal->tshut_temp, error);
1202 } 1249 }
1203 1250
1204 thermal->chip->control(thermal->regs, true); 1251 thermal->chip->control(thermal->regs, true);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 641faab6e24b..655591316a88 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -799,6 +799,11 @@ static void thermal_release(struct device *dev)
799 if (!strncmp(dev_name(dev), "thermal_zone", 799 if (!strncmp(dev_name(dev), "thermal_zone",
800 sizeof("thermal_zone") - 1)) { 800 sizeof("thermal_zone") - 1)) {
801 tz = to_thermal_zone(dev); 801 tz = to_thermal_zone(dev);
802 kfree(tz->trip_type_attrs);
803 kfree(tz->trip_temp_attrs);
804 kfree(tz->trip_hyst_attrs);
805 kfree(tz->trips_attribute_group.attrs);
806 kfree(tz->device.groups);
802 kfree(tz); 807 kfree(tz);
803 } else if (!strncmp(dev_name(dev), "cooling_device", 808 } else if (!strncmp(dev_name(dev), "cooling_device",
804 sizeof("cooling_device") - 1)) { 809 sizeof("cooling_device") - 1)) {
@@ -1305,10 +1310,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
1305 1310
1306 thermal_zone_device_set_polling(tz, 0); 1311 thermal_zone_device_set_polling(tz, 0);
1307 1312
1308 kfree(tz->trip_type_attrs);
1309 kfree(tz->trip_temp_attrs);
1310 kfree(tz->trip_hyst_attrs);
1311 kfree(tz->trips_attribute_group.attrs);
1312 thermal_set_governor(tz, NULL); 1313 thermal_set_governor(tz, NULL);
1313 1314
1314 thermal_remove_hwmon_sysfs(tz); 1315 thermal_remove_hwmon_sysfs(tz);
@@ -1316,7 +1317,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
1316 idr_destroy(&tz->idr); 1317 idr_destroy(&tz->idr);
1317 mutex_destroy(&tz->lock); 1318 mutex_destroy(&tz->lock);
1318 device_unregister(&tz->device); 1319 device_unregister(&tz->device);
1319 kfree(tz->device.groups);
1320} 1320}
1321EXPORT_SYMBOL_GPL(thermal_zone_device_unregister); 1321EXPORT_SYMBOL_GPL(thermal_zone_device_unregister);
1322 1322
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index 541af5946203..c4a508a124dc 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -59,14 +59,6 @@ static LIST_HEAD(thermal_hwmon_list);
59static DEFINE_MUTEX(thermal_hwmon_list_lock); 59static DEFINE_MUTEX(thermal_hwmon_list_lock);
60 60
61static ssize_t 61static ssize_t
62name_show(struct device *dev, struct device_attribute *attr, char *buf)
63{
64 struct thermal_hwmon_device *hwmon = dev_get_drvdata(dev);
65 return sprintf(buf, "%s\n", hwmon->type);
66}
67static DEVICE_ATTR_RO(name);
68
69static ssize_t
70temp_input_show(struct device *dev, struct device_attribute *attr, char *buf) 62temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
71{ 63{
72 int temperature; 64 int temperature;
@@ -165,15 +157,12 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
165 157
166 INIT_LIST_HEAD(&hwmon->tz_list); 158 INIT_LIST_HEAD(&hwmon->tz_list);
167 strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH); 159 strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
168 hwmon->device = hwmon_device_register(NULL); 160 hwmon->device = hwmon_device_register_with_info(NULL, hwmon->type,
161 hwmon, NULL, NULL);
169 if (IS_ERR(hwmon->device)) { 162 if (IS_ERR(hwmon->device)) {
170 result = PTR_ERR(hwmon->device); 163 result = PTR_ERR(hwmon->device);
171 goto free_mem; 164 goto free_mem;
172 } 165 }
173 dev_set_drvdata(hwmon->device, hwmon);
174 result = device_create_file(hwmon->device, &dev_attr_name);
175 if (result)
176 goto free_mem;
177 166
178 register_sys_interface: 167 register_sys_interface:
179 temp = kzalloc(sizeof(*temp), GFP_KERNEL); 168 temp = kzalloc(sizeof(*temp), GFP_KERNEL);
@@ -222,10 +211,8 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
222 free_temp_mem: 211 free_temp_mem:
223 kfree(temp); 212 kfree(temp);
224 unregister_name: 213 unregister_name:
225 if (new_hwmon_device) { 214 if (new_hwmon_device)
226 device_remove_file(hwmon->device, &dev_attr_name);
227 hwmon_device_unregister(hwmon->device); 215 hwmon_device_unregister(hwmon->device);
228 }
229 free_mem: 216 free_mem:
230 if (new_hwmon_device) 217 if (new_hwmon_device)
231 kfree(hwmon); 218 kfree(hwmon);
@@ -267,7 +254,6 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
267 list_del(&hwmon->node); 254 list_del(&hwmon->node);
268 mutex_unlock(&thermal_hwmon_list_lock); 255 mutex_unlock(&thermal_hwmon_list_lock);
269 256
270 device_remove_file(hwmon->device, &dev_attr_name);
271 hwmon_device_unregister(hwmon->device); 257 hwmon_device_unregister(hwmon->device);
272 kfree(hwmon); 258 kfree(hwmon);
273} 259}
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 9548d3e03453..302b8f5f7d27 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -513,8 +513,8 @@ struct dwc2_core_params {
513 /* Gadget parameters */ 513 /* Gadget parameters */
514 bool g_dma; 514 bool g_dma;
515 bool g_dma_desc; 515 bool g_dma_desc;
516 u16 g_rx_fifo_size; 516 u32 g_rx_fifo_size;
517 u16 g_np_tx_fifo_size; 517 u32 g_np_tx_fifo_size;
518 u32 g_tx_fifo_size[MAX_EPS_CHANNELS]; 518 u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
519}; 519};
520 520
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index c55db4aa54d6..77c5fcf3a5bf 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3169,7 +3169,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
3169 /* keep other bits untouched (so e.g. forced modes are not lost) */ 3169 /* keep other bits untouched (so e.g. forced modes are not lost) */
3170 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 3170 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
3171 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | 3171 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
3172 GUSBCFG_HNPCAP); 3172 GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
3173 3173
3174 if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS && 3174 if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS &&
3175 (hsotg->params.speed == DWC2_SPEED_PARAM_FULL || 3175 (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
@@ -3749,8 +3749,8 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
3749 __func__, epctrl, epctrl_reg); 3749 __func__, epctrl, epctrl_reg);
3750 3750
3751 /* Allocate DMA descriptor chain for non-ctrl endpoints */ 3751 /* Allocate DMA descriptor chain for non-ctrl endpoints */
3752 if (using_desc_dma(hsotg)) { 3752 if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
3753 hs_ep->desc_list = dma_alloc_coherent(hsotg->dev, 3753 hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
3754 MAX_DMA_DESC_NUM_GENERIC * 3754 MAX_DMA_DESC_NUM_GENERIC *
3755 sizeof(struct dwc2_dma_desc), 3755 sizeof(struct dwc2_dma_desc),
3756 &hs_ep->desc_list_dma, GFP_ATOMIC); 3756 &hs_ep->desc_list_dma, GFP_ATOMIC);
@@ -3872,7 +3872,7 @@ error1:
3872 3872
3873error2: 3873error2:
3874 if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) { 3874 if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
3875 dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * 3875 dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
3876 sizeof(struct dwc2_dma_desc), 3876 sizeof(struct dwc2_dma_desc),
3877 hs_ep->desc_list, hs_ep->desc_list_dma); 3877 hs_ep->desc_list, hs_ep->desc_list_dma);
3878 hs_ep->desc_list = NULL; 3878 hs_ep->desc_list = NULL;
@@ -3902,14 +3902,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
3902 return -EINVAL; 3902 return -EINVAL;
3903 } 3903 }
3904 3904
3905 /* Remove DMA memory allocated for non-control Endpoints */
3906 if (using_desc_dma(hsotg)) {
3907 dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
3908 sizeof(struct dwc2_dma_desc),
3909 hs_ep->desc_list, hs_ep->desc_list_dma);
3910 hs_ep->desc_list = NULL;
3911 }
3912
3913 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); 3905 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
3914 3906
3915 spin_lock_irqsave(&hsotg->lock, flags); 3907 spin_lock_irqsave(&hsotg->lock, flags);
@@ -4131,7 +4123,7 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
4131 /* keep other bits untouched (so e.g. forced modes are not lost) */ 4123 /* keep other bits untouched (so e.g. forced modes are not lost) */
4132 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 4124 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
4133 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | 4125 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
4134 GUSBCFG_HNPCAP); 4126 GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
4135 4127
4136 /* set the PLL on, remove the HNP/SRP and set the PHY */ 4128 /* set the PLL on, remove the HNP/SRP and set the PHY */
4137 trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; 4129 trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 911c3b36ac06..46d0ad5105e4 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -4367,6 +4367,9 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
4367 if (!HCD_HW_ACCESSIBLE(hcd)) 4367 if (!HCD_HW_ACCESSIBLE(hcd))
4368 goto unlock; 4368 goto unlock;
4369 4369
4370 if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
4371 goto unlock;
4372
4370 if (!hsotg->params.hibernation) 4373 if (!hsotg->params.hibernation)
4371 goto skip_power_saving; 4374 goto skip_power_saving;
4372 4375
@@ -4489,8 +4492,8 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
4489{ 4492{
4490#ifdef VERBOSE_DEBUG 4493#ifdef VERBOSE_DEBUG
4491 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 4494 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4492 char *pipetype; 4495 char *pipetype = NULL;
4493 char *speed; 4496 char *speed = NULL;
4494 4497
4495 dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb); 4498 dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb);
4496 dev_vdbg(hsotg->dev, " Device address: %d\n", 4499 dev_vdbg(hsotg->dev, " Device address: %d\n",
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 11fe68a4627b..bcd1e19b4076 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -385,16 +385,16 @@ static void dwc2_set_param(struct dwc2_hsotg *hsotg, void *param,
385} 385}
386 386
387/** 387/**
388 * dwc2_set_param_u16() - Set a u16 parameter 388 * dwc2_set_param_u32() - Set a u32 parameter
389 * 389 *
390 * See dwc2_set_param(). 390 * See dwc2_set_param().
391 */ 391 */
392static void dwc2_set_param_u16(struct dwc2_hsotg *hsotg, u16 *param, 392static void dwc2_set_param_u32(struct dwc2_hsotg *hsotg, u32 *param,
393 bool lookup, char *property, u16 legacy, 393 bool lookup, char *property, u16 legacy,
394 u16 def, u16 min, u16 max) 394 u16 def, u16 min, u16 max)
395{ 395{
396 dwc2_set_param(hsotg, param, lookup, property, 396 dwc2_set_param(hsotg, param, lookup, property,
397 legacy, def, min, max, 2); 397 legacy, def, min, max, 4);
398} 398}
399 399
400/** 400/**
@@ -1178,12 +1178,12 @@ static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
1178 * auto-detect if the hardware does not support the 1178 * auto-detect if the hardware does not support the
1179 * default. 1179 * default.
1180 */ 1180 */
1181 dwc2_set_param_u16(hsotg, &p->g_rx_fifo_size, 1181 dwc2_set_param_u32(hsotg, &p->g_rx_fifo_size,
1182 true, "g-rx-fifo-size", 2048, 1182 true, "g-rx-fifo-size", 2048,
1183 hw->rx_fifo_size, 1183 hw->rx_fifo_size,
1184 16, hw->rx_fifo_size); 1184 16, hw->rx_fifo_size);
1185 1185
1186 dwc2_set_param_u16(hsotg, &p->g_np_tx_fifo_size, 1186 dwc2_set_param_u32(hsotg, &p->g_np_tx_fifo_size,
1187 true, "g-np-tx-fifo-size", 1024, 1187 true, "g-np-tx-fifo-size", 1024,
1188 hw->dev_nperio_tx_fifo_size, 1188 hw->dev_nperio_tx_fifo_size,
1189 16, hw->dev_nperio_tx_fifo_size); 1189 16, hw->dev_nperio_tx_fifo_size);
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index e27899bb5706..e956306d9b0f 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -138,7 +138,8 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
138 exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk"); 138 exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
139 if (IS_ERR(exynos->axius_clk)) { 139 if (IS_ERR(exynos->axius_clk)) {
140 dev_err(dev, "no AXI UpScaler clk specified\n"); 140 dev_err(dev, "no AXI UpScaler clk specified\n");
141 return -ENODEV; 141 ret = -ENODEV;
142 goto axius_clk_err;
142 } 143 }
143 clk_prepare_enable(exynos->axius_clk); 144 clk_prepare_enable(exynos->axius_clk);
144 } else { 145 } else {
@@ -196,6 +197,7 @@ err3:
196 regulator_disable(exynos->vdd33); 197 regulator_disable(exynos->vdd33);
197err2: 198err2:
198 clk_disable_unprepare(exynos->axius_clk); 199 clk_disable_unprepare(exynos->axius_clk);
200axius_clk_err:
199 clk_disable_unprepare(exynos->susp_clk); 201 clk_disable_unprepare(exynos->susp_clk);
200 clk_disable_unprepare(exynos->clk); 202 clk_disable_unprepare(exynos->clk);
201 return ret; 203 return ret;
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 002822d98fda..49d685ad0da9 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2147,7 +2147,7 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
2147 cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); 2147 cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
2148 if (!cdev->os_desc_req->buf) { 2148 if (!cdev->os_desc_req->buf) {
2149 ret = -ENOMEM; 2149 ret = -ENOMEM;
2150 kfree(cdev->os_desc_req); 2150 usb_ep_free_request(ep0, cdev->os_desc_req);
2151 goto end; 2151 goto end;
2152 } 2152 }
2153 cdev->os_desc_req->context = cdev; 2153 cdev->os_desc_req->context = cdev;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 5e746adc8a2d..5490fc51638e 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1806,7 +1806,7 @@ static void ffs_func_eps_disable(struct ffs_function *func)
1806 unsigned long flags; 1806 unsigned long flags;
1807 1807
1808 spin_lock_irqsave(&func->ffs->eps_lock, flags); 1808 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1809 do { 1809 while (count--) {
1810 /* pending requests get nuked */ 1810 /* pending requests get nuked */
1811 if (likely(ep->ep)) 1811 if (likely(ep->ep))
1812 usb_ep_disable(ep->ep); 1812 usb_ep_disable(ep->ep);
@@ -1817,7 +1817,7 @@ static void ffs_func_eps_disable(struct ffs_function *func)
1817 __ffs_epfile_read_buffer_free(epfile); 1817 __ffs_epfile_read_buffer_free(epfile);
1818 ++epfile; 1818 ++epfile;
1819 } 1819 }
1820 } while (--count); 1820 }
1821 spin_unlock_irqrestore(&func->ffs->eps_lock, flags); 1821 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1822} 1822}
1823 1823
@@ -1831,7 +1831,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
1831 int ret = 0; 1831 int ret = 0;
1832 1832
1833 spin_lock_irqsave(&func->ffs->eps_lock, flags); 1833 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1834 do { 1834 while(count--) {
1835 struct usb_endpoint_descriptor *ds; 1835 struct usb_endpoint_descriptor *ds;
1836 int desc_idx; 1836 int desc_idx;
1837 1837
@@ -1867,7 +1867,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
1867 1867
1868 ++ep; 1868 ++ep;
1869 ++epfile; 1869 ++epfile;
1870 } while (--count); 1870 }
1871 spin_unlock_irqrestore(&func->ffs->eps_lock, flags); 1871 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1872 1872
1873 return ret; 1873 return ret;
@@ -3448,12 +3448,12 @@ static void ffs_func_unbind(struct usb_configuration *c,
3448 3448
3449 /* cleanup after autoconfig */ 3449 /* cleanup after autoconfig */
3450 spin_lock_irqsave(&func->ffs->eps_lock, flags); 3450 spin_lock_irqsave(&func->ffs->eps_lock, flags);
3451 do { 3451 while (count--) {
3452 if (ep->ep && ep->req) 3452 if (ep->ep && ep->req)
3453 usb_ep_free_request(ep->ep, ep->req); 3453 usb_ep_free_request(ep->ep, ep->req);
3454 ep->req = NULL; 3454 ep->req = NULL;
3455 ++ep; 3455 ++ep;
3456 } while (--count); 3456 }
3457 spin_unlock_irqrestore(&func->ffs->eps_lock, flags); 3457 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
3458 kfree(func->eps); 3458 kfree(func->eps);
3459 func->eps = NULL; 3459 func->eps = NULL;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index f3212db9bc37..12c7687216e6 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1978,7 +1978,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
1978 dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret); 1978 dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
1979 goto err; 1979 goto err;
1980 } 1980 }
1981 ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index); 1981 sprintf(ep->name, "ep%d", ep->index);
1982 ep->ep.name = ep->name;
1982 1983
1983 ep->ep_regs = udc->regs + USBA_EPT_BASE(i); 1984 ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
1984 ep->dma_regs = udc->regs + USBA_DMA_BASE(i); 1985 ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
index 3e1c9d589dfa..b03b2ebfc53a 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.h
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
@@ -280,6 +280,7 @@ struct usba_ep {
280 void __iomem *ep_regs; 280 void __iomem *ep_regs;
281 void __iomem *dma_regs; 281 void __iomem *dma_regs;
282 void __iomem *fifo; 282 void __iomem *fifo;
283 char name[8];
283 struct usb_ep ep; 284 struct usb_ep ep;
284 struct usba_udc *udc; 285 struct usba_udc *udc;
285 286
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index ddfab301e366..e5834dd9bcde 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -165,7 +165,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
165 return -ENODEV; 165 return -ENODEV;
166 166
167 /* Try to set 64-bit DMA first */ 167 /* Try to set 64-bit DMA first */
168 if (WARN_ON(!pdev->dev.dma_mask)) 168 if (!pdev->dev.dma_mask)
169 /* Platform did not initialize dma_mask */ 169 /* Platform did not initialize dma_mask */
170 ret = dma_coerce_mask_and_coherent(&pdev->dev, 170 ret = dma_coerce_mask_and_coherent(&pdev->dev,
171 DMA_BIT_MASK(64)); 171 DMA_BIT_MASK(64));
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 253310cdaaca..fd6c8b66f06f 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -843,7 +843,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
843 struct iov_iter out_iter, in_iter, prot_iter, data_iter; 843 struct iov_iter out_iter, in_iter, prot_iter, data_iter;
844 u64 tag; 844 u64 tag;
845 u32 exp_data_len, data_direction; 845 u32 exp_data_len, data_direction;
846 unsigned out, in; 846 unsigned int out = 0, in = 0;
847 int head, ret, prot_bytes; 847 int head, ret, prot_bytes;
848 size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp); 848 size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
849 size_t out_size, in_size; 849 size_t out_size, in_size;
@@ -2087,7 +2087,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2087 NULL, 2087 NULL,
2088}; 2088};
2089 2089
2090static struct target_core_fabric_ops vhost_scsi_ops = { 2090static const struct target_core_fabric_ops vhost_scsi_ops = {
2091 .module = THIS_MODULE, 2091 .module = THIS_MODULE,
2092 .name = "vhost", 2092 .name = "vhost",
2093 .get_fabric_name = vhost_scsi_get_fabric_name, 2093 .get_fabric_name = vhost_scsi_get_fabric_name,
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 112ce422dc22..2a165cc8a43c 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -42,6 +42,7 @@
42static unsigned long platform_mmio; 42static unsigned long platform_mmio;
43static unsigned long platform_mmio_alloc; 43static unsigned long platform_mmio_alloc;
44static unsigned long platform_mmiolen; 44static unsigned long platform_mmiolen;
45static uint64_t callback_via;
45 46
46static unsigned long alloc_xen_mmio(unsigned long len) 47static unsigned long alloc_xen_mmio(unsigned long len)
47{ 48{
@@ -54,6 +55,51 @@ static unsigned long alloc_xen_mmio(unsigned long len)
54 return addr; 55 return addr;
55} 56}
56 57
58static uint64_t get_callback_via(struct pci_dev *pdev)
59{
60 u8 pin;
61 int irq;
62
63 irq = pdev->irq;
64 if (irq < 16)
65 return irq; /* ISA IRQ */
66
67 pin = pdev->pin;
68
69 /* We don't know the GSI. Specify the PCI INTx line instead. */
70 return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx identifier */
71 ((uint64_t)pci_domain_nr(pdev->bus) << 32) |
72 ((uint64_t)pdev->bus->number << 16) |
73 ((uint64_t)(pdev->devfn & 0xff) << 8) |
74 ((uint64_t)(pin - 1) & 3);
75}
76
77static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
78{
79 xen_hvm_evtchn_do_upcall();
80 return IRQ_HANDLED;
81}
82
83static int xen_allocate_irq(struct pci_dev *pdev)
84{
85 return request_irq(pdev->irq, do_hvm_evtchn_intr,
86 IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
87 "xen-platform-pci", pdev);
88}
89
90static int platform_pci_resume(struct pci_dev *pdev)
91{
92 int err;
93 if (!xen_pv_domain())
94 return 0;
95 err = xen_set_callback_via(callback_via);
96 if (err) {
97 dev_err(&pdev->dev, "platform_pci_resume failure!\n");
98 return err;
99 }
100 return 0;
101}
102
57static int platform_pci_probe(struct pci_dev *pdev, 103static int platform_pci_probe(struct pci_dev *pdev,
58 const struct pci_device_id *ent) 104 const struct pci_device_id *ent)
59{ 105{
@@ -92,6 +138,28 @@ static int platform_pci_probe(struct pci_dev *pdev,
92 platform_mmio = mmio_addr; 138 platform_mmio = mmio_addr;
93 platform_mmiolen = mmio_len; 139 platform_mmiolen = mmio_len;
94 140
141 /*
142 * Xen HVM guests always use the vector callback mechanism.
143 * L1 Dom0 in a nested Xen environment is a PV guest inside in an
144 * HVM environment. It needs the platform-pci driver to get
145 * notifications from L0 Xen, but it cannot use the vector callback
146 * as it is not exported by L1 Xen.
147 */
148 if (xen_pv_domain()) {
149 ret = xen_allocate_irq(pdev);
150 if (ret) {
151 dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
152 goto out;
153 }
154 callback_via = get_callback_via(pdev);
155 ret = xen_set_callback_via(callback_via);
156 if (ret) {
157 dev_warn(&pdev->dev, "Unable to set the evtchn callback "
158 "err=%d\n", ret);
159 goto out;
160 }
161 }
162
95 max_nr_gframes = gnttab_max_grant_frames(); 163 max_nr_gframes = gnttab_max_grant_frames();
96 grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); 164 grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
97 ret = gnttab_setup_auto_xlat_frames(grant_frames); 165 ret = gnttab_setup_auto_xlat_frames(grant_frames);
@@ -123,6 +191,9 @@ static struct pci_driver platform_driver = {
123 .name = DRV_NAME, 191 .name = DRV_NAME,
124 .probe = platform_pci_probe, 192 .probe = platform_pci_probe,
125 .id_table = platform_pci_tbl, 193 .id_table = platform_pci_tbl,
194#ifdef CONFIG_PM
195 .resume_early = platform_pci_resume,
196#endif
126}; 197};
127 198
128builtin_pci_driver(platform_driver); 199builtin_pci_driver(platform_driver);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index baea866a6751..94fd76d04683 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2591,8 +2591,13 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
2591 add_wait_queue(&ci->i_cap_wq, &wait); 2591 add_wait_queue(&ci->i_cap_wq, &wait);
2592 2592
2593 while (!try_get_cap_refs(ci, need, want, endoff, 2593 while (!try_get_cap_refs(ci, need, want, endoff,
2594 true, &_got, &err)) 2594 true, &_got, &err)) {
2595 if (signal_pending(current)) {
2596 ret = -ERESTARTSYS;
2597 break;
2598 }
2595 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 2599 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2600 }
2596 2601
2597 remove_wait_queue(&ci->i_cap_wq, &wait); 2602 remove_wait_queue(&ci->i_cap_wq, &wait);
2598 2603
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index d7a93696663b..8ab1fdf0bd49 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1230,7 +1230,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
1230 struct ceph_mds_client *mdsc = 1230 struct ceph_mds_client *mdsc =
1231 ceph_sb_to_client(dir->i_sb)->mdsc; 1231 ceph_sb_to_client(dir->i_sb)->mdsc;
1232 struct ceph_mds_request *req; 1232 struct ceph_mds_request *req;
1233 int op, mask, err; 1233 int op, err;
1234 u32 mask;
1234 1235
1235 if (flags & LOOKUP_RCU) 1236 if (flags & LOOKUP_RCU)
1236 return -ECHILD; 1237 return -ECHILD;
@@ -1245,7 +1246,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
1245 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; 1246 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
1246 if (ceph_security_xattr_wanted(dir)) 1247 if (ceph_security_xattr_wanted(dir))
1247 mask |= CEPH_CAP_XATTR_SHARED; 1248 mask |= CEPH_CAP_XATTR_SHARED;
1248 req->r_args.getattr.mask = mask; 1249 req->r_args.getattr.mask = cpu_to_le32(mask);
1249 1250
1250 err = ceph_mdsc_do_request(mdsc, NULL, req); 1251 err = ceph_mdsc_do_request(mdsc, NULL, req);
1251 switch (err) { 1252 switch (err) {
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 398e5328b309..5e659d054b40 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -305,7 +305,8 @@ static int frag_tree_split_cmp(const void *l, const void *r)
305{ 305{
306 struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l; 306 struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
307 struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r; 307 struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
308 return ceph_frag_compare(ls->frag, rs->frag); 308 return ceph_frag_compare(le32_to_cpu(ls->frag),
309 le32_to_cpu(rs->frag));
309} 310}
310 311
311static bool is_frag_child(u32 f, struct ceph_inode_frag *frag) 312static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index ec6b35e9f966..c9d2e553a6c4 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -288,12 +288,13 @@ static int parse_reply_info_extra(void **p, void *end,
288 struct ceph_mds_reply_info_parsed *info, 288 struct ceph_mds_reply_info_parsed *info,
289 u64 features) 289 u64 features)
290{ 290{
291 if (info->head->op == CEPH_MDS_OP_GETFILELOCK) 291 u32 op = le32_to_cpu(info->head->op);
292
293 if (op == CEPH_MDS_OP_GETFILELOCK)
292 return parse_reply_info_filelock(p, end, info, features); 294 return parse_reply_info_filelock(p, end, info, features);
293 else if (info->head->op == CEPH_MDS_OP_READDIR || 295 else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
294 info->head->op == CEPH_MDS_OP_LSSNAP)
295 return parse_reply_info_dir(p, end, info, features); 296 return parse_reply_info_dir(p, end, info, features);
296 else if (info->head->op == CEPH_MDS_OP_CREATE) 297 else if (op == CEPH_MDS_OP_CREATE)
297 return parse_reply_info_create(p, end, info, features); 298 return parse_reply_info_create(p, end, info, features);
298 else 299 else
299 return -EIO; 300 return -EIO;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 70ea57c7b6bb..4e06a27ed7f8 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -2025,7 +2025,6 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
2025 struct fuse_req *req; 2025 struct fuse_req *req;
2026 req = list_entry(head->next, struct fuse_req, list); 2026 req = list_entry(head->next, struct fuse_req, list);
2027 req->out.h.error = -ECONNABORTED; 2027 req->out.h.error = -ECONNABORTED;
2028 clear_bit(FR_PENDING, &req->flags);
2029 clear_bit(FR_SENT, &req->flags); 2028 clear_bit(FR_SENT, &req->flags);
2030 list_del_init(&req->list); 2029 list_del_init(&req->list);
2031 request_end(fc, req); 2030 request_end(fc, req);
@@ -2103,6 +2102,8 @@ void fuse_abort_conn(struct fuse_conn *fc)
2103 spin_lock(&fiq->waitq.lock); 2102 spin_lock(&fiq->waitq.lock);
2104 fiq->connected = 0; 2103 fiq->connected = 0;
2105 list_splice_init(&fiq->pending, &to_end2); 2104 list_splice_init(&fiq->pending, &to_end2);
2105 list_for_each_entry(req, &to_end2, list)
2106 clear_bit(FR_PENDING, &req->flags);
2106 while (forget_pending(fiq)) 2107 while (forget_pending(fiq))
2107 kfree(dequeue_forget(fiq, 1, NULL)); 2108 kfree(dequeue_forget(fiq, 1, NULL));
2108 wake_up_all_locked(&fiq->waitq); 2109 wake_up_all_locked(&fiq->waitq);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 1f7c732f32b0..811fd8929a18 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -68,7 +68,7 @@ static u64 time_to_jiffies(u64 sec, u32 nsec)
68 if (sec || nsec) { 68 if (sec || nsec) {
69 struct timespec64 ts = { 69 struct timespec64 ts = {
70 sec, 70 sec,
71 max_t(u32, nsec, NSEC_PER_SEC - 1) 71 min_t(u32, nsec, NSEC_PER_SEC - 1)
72 }; 72 };
73 73
74 return get_jiffies_64() + timespec64_to_jiffies(&ts); 74 return get_jiffies_64() + timespec64_to_jiffies(&ts);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 6dcbc5defb7a..ecc151697fd4 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -38,7 +38,6 @@
38#include <linux/mm.h> 38#include <linux/mm.h>
39#include <linux/delay.h> 39#include <linux/delay.h>
40#include <linux/errno.h> 40#include <linux/errno.h>
41#include <linux/file.h>
42#include <linux/string.h> 41#include <linux/string.h>
43#include <linux/ratelimit.h> 42#include <linux/ratelimit.h>
44#include <linux/printk.h> 43#include <linux/printk.h>
@@ -1083,7 +1082,8 @@ int nfs4_call_sync(struct rpc_clnt *clnt,
1083 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 1082 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
1084} 1083}
1085 1084
1086static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) 1085static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
1086 unsigned long timestamp)
1087{ 1087{
1088 struct nfs_inode *nfsi = NFS_I(dir); 1088 struct nfs_inode *nfsi = NFS_I(dir);
1089 1089
@@ -1099,6 +1099,7 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
1099 NFS_INO_INVALID_ACL; 1099 NFS_INO_INVALID_ACL;
1100 } 1100 }
1101 dir->i_version = cinfo->after; 1101 dir->i_version = cinfo->after;
1102 nfsi->read_cache_jiffies = timestamp;
1102 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 1103 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
1103 nfs_fscache_invalidate(dir); 1104 nfs_fscache_invalidate(dir);
1104 spin_unlock(&dir->i_lock); 1105 spin_unlock(&dir->i_lock);
@@ -2391,11 +2392,13 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
2391 nfs_fattr_map_and_free_names(server, &data->f_attr); 2392 nfs_fattr_map_and_free_names(server, &data->f_attr);
2392 2393
2393 if (o_arg->open_flags & O_CREAT) { 2394 if (o_arg->open_flags & O_CREAT) {
2394 update_changeattr(dir, &o_res->cinfo);
2395 if (o_arg->open_flags & O_EXCL) 2395 if (o_arg->open_flags & O_EXCL)
2396 data->file_created = 1; 2396 data->file_created = 1;
2397 else if (o_res->cinfo.before != o_res->cinfo.after) 2397 else if (o_res->cinfo.before != o_res->cinfo.after)
2398 data->file_created = 1; 2398 data->file_created = 1;
2399 if (data->file_created || dir->i_version != o_res->cinfo.after)
2400 update_changeattr(dir, &o_res->cinfo,
2401 o_res->f_attr->time_start);
2399 } 2402 }
2400 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2403 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2401 server->caps &= ~NFS_CAP_POSIX_LOCK; 2404 server->caps &= ~NFS_CAP_POSIX_LOCK;
@@ -4073,11 +4076,12 @@ static int _nfs4_proc_remove(struct inode *dir, const struct qstr *name)
4073 .rpc_argp = &args, 4076 .rpc_argp = &args,
4074 .rpc_resp = &res, 4077 .rpc_resp = &res,
4075 }; 4078 };
4079 unsigned long timestamp = jiffies;
4076 int status; 4080 int status;
4077 4081
4078 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 4082 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
4079 if (status == 0) 4083 if (status == 0)
4080 update_changeattr(dir, &res.cinfo); 4084 update_changeattr(dir, &res.cinfo, timestamp);
4081 return status; 4085 return status;
4082} 4086}
4083 4087
@@ -4125,7 +4129,8 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
4125 if (nfs4_async_handle_error(task, res->server, NULL, 4129 if (nfs4_async_handle_error(task, res->server, NULL,
4126 &data->timeout) == -EAGAIN) 4130 &data->timeout) == -EAGAIN)
4127 return 0; 4131 return 0;
4128 update_changeattr(dir, &res->cinfo); 4132 if (task->tk_status == 0)
4133 update_changeattr(dir, &res->cinfo, res->dir_attr->time_start);
4129 return 1; 4134 return 1;
4130} 4135}
4131 4136
@@ -4159,8 +4164,11 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
4159 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 4164 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
4160 return 0; 4165 return 0;
4161 4166
4162 update_changeattr(old_dir, &res->old_cinfo); 4167 if (task->tk_status == 0) {
4163 update_changeattr(new_dir, &res->new_cinfo); 4168 update_changeattr(old_dir, &res->old_cinfo, res->old_fattr->time_start);
4169 if (new_dir != old_dir)
4170 update_changeattr(new_dir, &res->new_cinfo, res->new_fattr->time_start);
4171 }
4164 return 1; 4172 return 1;
4165} 4173}
4166 4174
@@ -4197,7 +4205,7 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct
4197 4205
4198 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4206 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4199 if (!status) { 4207 if (!status) {
4200 update_changeattr(dir, &res.cinfo); 4208 update_changeattr(dir, &res.cinfo, res.fattr->time_start);
4201 status = nfs_post_op_update_inode(inode, res.fattr); 4209 status = nfs_post_op_update_inode(inode, res.fattr);
4202 if (!status) 4210 if (!status)
4203 nfs_setsecurity(inode, res.fattr, res.label); 4211 nfs_setsecurity(inode, res.fattr, res.label);
@@ -4272,7 +4280,8 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_
4272 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 4280 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
4273 &data->arg.seq_args, &data->res.seq_res, 1); 4281 &data->arg.seq_args, &data->res.seq_res, 1);
4274 if (status == 0) { 4282 if (status == 0) {
4275 update_changeattr(dir, &data->res.dir_cinfo); 4283 update_changeattr(dir, &data->res.dir_cinfo,
4284 data->res.fattr->time_start);
4276 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label); 4285 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
4277 } 4286 }
4278 return status; 4287 return status;
@@ -6127,7 +6136,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
6127 p->server = server; 6136 p->server = server;
6128 atomic_inc(&lsp->ls_count); 6137 atomic_inc(&lsp->ls_count);
6129 p->ctx = get_nfs_open_context(ctx); 6138 p->ctx = get_nfs_open_context(ctx);
6130 get_file(fl->fl_file);
6131 memcpy(&p->fl, fl, sizeof(p->fl)); 6139 memcpy(&p->fl, fl, sizeof(p->fl));
6132 return p; 6140 return p;
6133out_free_seqid: 6141out_free_seqid:
@@ -6240,7 +6248,6 @@ static void nfs4_lock_release(void *calldata)
6240 nfs_free_seqid(data->arg.lock_seqid); 6248 nfs_free_seqid(data->arg.lock_seqid);
6241 nfs4_put_lock_state(data->lsp); 6249 nfs4_put_lock_state(data->lsp);
6242 put_nfs_open_context(data->ctx); 6250 put_nfs_open_context(data->ctx);
6243 fput(data->fl.fl_file);
6244 kfree(data); 6251 kfree(data);
6245 dprintk("%s: done!\n", __func__); 6252 dprintk("%s: done!\n", __func__);
6246} 6253}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 1d152f4470cd..90e6193ce6be 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1729,7 +1729,6 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1729 break; 1729 break;
1730 case -NFS4ERR_STALE_CLIENTID: 1730 case -NFS4ERR_STALE_CLIENTID:
1731 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1731 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1732 nfs4_state_clear_reclaim_reboot(clp);
1733 nfs4_state_start_reclaim_reboot(clp); 1732 nfs4_state_start_reclaim_reboot(clp);
1734 break; 1733 break;
1735 case -NFS4ERR_EXPIRED: 1734 case -NFS4ERR_EXPIRED:
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 7ecf16be4a44..8fae53ce21d1 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2440,7 +2440,9 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
2440 p++; /* to be backfilled later */ 2440 p++; /* to be backfilled later */
2441 2441
2442 if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) { 2442 if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
2443 u32 *supp = nfsd_suppattrs[minorversion]; 2443 u32 supp[3];
2444
2445 memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp));
2444 2446
2445 if (!IS_POSIXACL(dentry->d_inode)) 2447 if (!IS_POSIXACL(dentry->d_inode))
2446 supp[0] &= ~FATTR4_WORD0_ACL; 2448 supp[0] &= ~FATTR4_WORD0_ACL;
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 9ad48d9202a9..023bb0b03352 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -154,29 +154,38 @@ out_err:
154static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d, 154static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d,
155 struct dentry **ret) 155 struct dentry **ret)
156{ 156{
157 const char *s = d->name.name; 157 /* Counting down from the end, since the prefix can change */
158 size_t rem = d->name.len - 1;
158 struct dentry *dentry = NULL; 159 struct dentry *dentry = NULL;
159 int err; 160 int err;
160 161
161 if (*s != '/') 162 if (d->name.name[0] != '/')
162 return ovl_lookup_single(base, d, d->name.name, d->name.len, 163 return ovl_lookup_single(base, d, d->name.name, d->name.len,
163 0, "", ret); 164 0, "", ret);
164 165
165 while (*s++ == '/' && !IS_ERR_OR_NULL(base) && d_can_lookup(base)) { 166 while (!IS_ERR_OR_NULL(base) && d_can_lookup(base)) {
167 const char *s = d->name.name + d->name.len - rem;
166 const char *next = strchrnul(s, '/'); 168 const char *next = strchrnul(s, '/');
167 size_t slen = strlen(s); 169 size_t thislen = next - s;
170 bool end = !next[0];
168 171
169 if (WARN_ON(slen > d->name.len) || 172 /* Verify we did not go off the rails */
170 WARN_ON(strcmp(d->name.name + d->name.len - slen, s))) 173 if (WARN_ON(s[-1] != '/'))
171 return -EIO; 174 return -EIO;
172 175
173 err = ovl_lookup_single(base, d, s, next - s, 176 err = ovl_lookup_single(base, d, s, thislen,
174 d->name.len - slen, next, &base); 177 d->name.len - rem, next, &base);
175 dput(dentry); 178 dput(dentry);
176 if (err) 179 if (err)
177 return err; 180 return err;
178 dentry = base; 181 dentry = base;
179 s = next; 182 if (end)
183 break;
184
185 rem -= thislen + 1;
186
187 if (WARN_ON(rem >= d->name.len))
188 return -EIO;
180 } 189 }
181 *ret = dentry; 190 *ret = dentry;
182 return 0; 191 return 0;
diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig
index 0a908ae7af13..b0d0623c83ed 100644
--- a/fs/ubifs/Kconfig
+++ b/fs/ubifs/Kconfig
@@ -53,7 +53,7 @@ config UBIFS_ATIME_SUPPORT
53 53
54config UBIFS_FS_ENCRYPTION 54config UBIFS_FS_ENCRYPTION
55 bool "UBIFS Encryption" 55 bool "UBIFS Encryption"
56 depends on UBIFS_FS 56 depends on UBIFS_FS && BLOCK
57 select FS_ENCRYPTION 57 select FS_ENCRYPTION
58 default n 58 default n
59 help 59 help
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 1c5331ac9614..528369f3e472 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -390,16 +390,6 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
390 dbg_gen("dent '%pd', mode %#hx in dir ino %lu", 390 dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
391 dentry, mode, dir->i_ino); 391 dentry, mode, dir->i_ino);
392 392
393 if (ubifs_crypt_is_encrypted(dir)) {
394 err = fscrypt_get_encryption_info(dir);
395 if (err)
396 return err;
397
398 if (!fscrypt_has_encryption_key(dir)) {
399 return -EPERM;
400 }
401 }
402
403 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); 393 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
404 if (err) 394 if (err)
405 return err; 395 return err;
@@ -741,17 +731,9 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
741 ubifs_assert(inode_is_locked(dir)); 731 ubifs_assert(inode_is_locked(dir));
742 ubifs_assert(inode_is_locked(inode)); 732 ubifs_assert(inode_is_locked(inode));
743 733
744 if (ubifs_crypt_is_encrypted(dir)) { 734 if (ubifs_crypt_is_encrypted(dir) &&
745 if (!fscrypt_has_permitted_context(dir, inode)) 735 !fscrypt_has_permitted_context(dir, inode))
746 return -EPERM; 736 return -EPERM;
747
748 err = fscrypt_get_encryption_info(inode);
749 if (err)
750 return err;
751
752 if (!fscrypt_has_encryption_key(inode))
753 return -EPERM;
754 }
755 737
756 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); 738 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
757 if (err) 739 if (err)
@@ -1000,17 +982,6 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1000 if (err) 982 if (err)
1001 return err; 983 return err;
1002 984
1003 if (ubifs_crypt_is_encrypted(dir)) {
1004 err = fscrypt_get_encryption_info(dir);
1005 if (err)
1006 goto out_budg;
1007
1008 if (!fscrypt_has_encryption_key(dir)) {
1009 err = -EPERM;
1010 goto out_budg;
1011 }
1012 }
1013
1014 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); 985 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
1015 if (err) 986 if (err)
1016 goto out_budg; 987 goto out_budg;
@@ -1096,17 +1067,6 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
1096 return err; 1067 return err;
1097 } 1068 }
1098 1069
1099 if (ubifs_crypt_is_encrypted(dir)) {
1100 err = fscrypt_get_encryption_info(dir);
1101 if (err)
1102 goto out_budg;
1103
1104 if (!fscrypt_has_encryption_key(dir)) {
1105 err = -EPERM;
1106 goto out_budg;
1107 }
1108 }
1109
1110 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); 1070 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
1111 if (err) 1071 if (err)
1112 goto out_budg; 1072 goto out_budg;
@@ -1231,18 +1191,6 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
1231 goto out_inode; 1191 goto out_inode;
1232 } 1192 }
1233 1193
1234 err = fscrypt_get_encryption_info(inode);
1235 if (err) {
1236 kfree(sd);
1237 goto out_inode;
1238 }
1239
1240 if (!fscrypt_has_encryption_key(inode)) {
1241 kfree(sd);
1242 err = -EPERM;
1243 goto out_inode;
1244 }
1245
1246 ostr.name = sd->encrypted_path; 1194 ostr.name = sd->encrypted_path;
1247 ostr.len = disk_link.len; 1195 ostr.len = disk_link.len;
1248 1196
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 78d713644df3..da519ba205f6 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -217,6 +217,9 @@ long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
217 case FS_IOC32_SETFLAGS: 217 case FS_IOC32_SETFLAGS:
218 cmd = FS_IOC_SETFLAGS; 218 cmd = FS_IOC_SETFLAGS;
219 break; 219 break;
220 case FS_IOC_SET_ENCRYPTION_POLICY:
221 case FS_IOC_GET_ENCRYPTION_POLICY:
222 break;
220 default: 223 default:
221 return -ENOIOCTLCMD; 224 return -ENOIOCTLCMD;
222 } 225 }
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index a459211a1c21..294519b98874 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -744,6 +744,7 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
744 744
745 } else { 745 } else {
746 data->compr_size = 0; 746 data->compr_size = 0;
747 out_len = compr_len;
747 } 748 }
748 749
749 dlen = UBIFS_DATA_NODE_SZ + out_len; 750 dlen = UBIFS_DATA_NODE_SZ + out_len;
@@ -1319,6 +1320,7 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
1319 dn->compr_type = cpu_to_le16(compr_type); 1320 dn->compr_type = cpu_to_le16(compr_type);
1320 dn->size = cpu_to_le32(*new_len); 1321 dn->size = cpu_to_le32(*new_len);
1321 *new_len = UBIFS_DATA_NODE_SZ + out_len; 1322 *new_len = UBIFS_DATA_NODE_SZ + out_len;
1323 err = 0;
1322out: 1324out:
1323 kfree(buf); 1325 kfree(buf);
1324 return err; 1326 return err;
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 74ae2de949df..709aa098dd46 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -34,6 +34,11 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include "ubifs.h" 35#include "ubifs.h"
36 36
37static int try_read_node(const struct ubifs_info *c, void *buf, int type,
38 int len, int lnum, int offs);
39static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
40 struct ubifs_zbranch *zbr, void *node);
41
37/* 42/*
38 * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions. 43 * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions.
39 * @NAME_LESS: name corresponding to the first argument is less than second 44 * @NAME_LESS: name corresponding to the first argument is less than second
@@ -402,7 +407,19 @@ static int tnc_read_hashed_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
402 return 0; 407 return 0;
403 } 408 }
404 409
405 err = ubifs_tnc_read_node(c, zbr, node); 410 if (c->replaying) {
411 err = fallible_read_node(c, &zbr->key, zbr, node);
412 /*
413 * When the node was not found, return -ENOENT, 0 otherwise.
414 * Negative return codes stay as-is.
415 */
416 if (err == 0)
417 err = -ENOENT;
418 else if (err == 1)
419 err = 0;
420 } else {
421 err = ubifs_tnc_read_node(c, zbr, node);
422 }
406 if (err) 423 if (err)
407 return err; 424 return err;
408 425
@@ -2857,7 +2874,11 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
2857 if (fname_len(nm) > 0) { 2874 if (fname_len(nm) > 0) {
2858 if (err) { 2875 if (err) {
2859 /* Handle collisions */ 2876 /* Handle collisions */
2860 err = resolve_collision(c, key, &znode, &n, nm); 2877 if (c->replaying)
2878 err = fallible_resolve_collision(c, key, &znode, &n,
2879 nm, 0);
2880 else
2881 err = resolve_collision(c, key, &znode, &n, nm);
2861 dbg_tnc("rc returned %d, znode %p, n %d", 2882 dbg_tnc("rc returned %d, znode %p, n %d",
2862 err, znode, n); 2883 err, znode, n);
2863 if (unlikely(err < 0)) 2884 if (unlikely(err < 0))
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index c58d72c220f5..2f389d366e93 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -36,21 +36,29 @@
36struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR }; 36struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR };
37 37
38/* 38/*
39 * @mode, if set, indicates that the type field needs to be set up. 39 * Convert inode mode to directory entry filetype
40 * This uses the transformation from file mode to DT_* as defined in linux/fs.h
41 * for file type specification. This will be propagated into the directory
42 * structure if appropriate for the given operation and filesystem config.
43 */ 40 */
44const unsigned char xfs_mode_to_ftype[S_IFMT >> S_SHIFT] = { 41unsigned char xfs_mode_to_ftype(int mode)
45 [0] = XFS_DIR3_FT_UNKNOWN, 42{
46 [S_IFREG >> S_SHIFT] = XFS_DIR3_FT_REG_FILE, 43 switch (mode & S_IFMT) {
47 [S_IFDIR >> S_SHIFT] = XFS_DIR3_FT_DIR, 44 case S_IFREG:
48 [S_IFCHR >> S_SHIFT] = XFS_DIR3_FT_CHRDEV, 45 return XFS_DIR3_FT_REG_FILE;
49 [S_IFBLK >> S_SHIFT] = XFS_DIR3_FT_BLKDEV, 46 case S_IFDIR:
50 [S_IFIFO >> S_SHIFT] = XFS_DIR3_FT_FIFO, 47 return XFS_DIR3_FT_DIR;
51 [S_IFSOCK >> S_SHIFT] = XFS_DIR3_FT_SOCK, 48 case S_IFCHR:
52 [S_IFLNK >> S_SHIFT] = XFS_DIR3_FT_SYMLINK, 49 return XFS_DIR3_FT_CHRDEV;
53}; 50 case S_IFBLK:
51 return XFS_DIR3_FT_BLKDEV;
52 case S_IFIFO:
53 return XFS_DIR3_FT_FIFO;
54 case S_IFSOCK:
55 return XFS_DIR3_FT_SOCK;
56 case S_IFLNK:
57 return XFS_DIR3_FT_SYMLINK;
58 default:
59 return XFS_DIR3_FT_UNKNOWN;
60 }
61}
54 62
55/* 63/*
56 * ASCII case-insensitive (ie. A-Z) support for directories that was 64 * ASCII case-insensitive (ie. A-Z) support for directories that was
@@ -631,7 +639,8 @@ xfs_dir2_isblock(
631 if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK))) 639 if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK)))
632 return rval; 640 return rval;
633 rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize; 641 rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize;
634 ASSERT(rval == 0 || args->dp->i_d.di_size == args->geo->blksize); 642 if (rval != 0 && args->dp->i_d.di_size != args->geo->blksize)
643 return -EFSCORRUPTED;
635 *vp = rval; 644 *vp = rval;
636 return 0; 645 return 0;
637} 646}
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index 0197590fa7d7..d6e6d9d16f6c 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -18,6 +18,9 @@
18#ifndef __XFS_DIR2_H__ 18#ifndef __XFS_DIR2_H__
19#define __XFS_DIR2_H__ 19#define __XFS_DIR2_H__
20 20
21#include "xfs_da_format.h"
22#include "xfs_da_btree.h"
23
21struct xfs_defer_ops; 24struct xfs_defer_ops;
22struct xfs_da_args; 25struct xfs_da_args;
23struct xfs_inode; 26struct xfs_inode;
@@ -32,10 +35,9 @@ struct xfs_dir2_data_unused;
32extern struct xfs_name xfs_name_dotdot; 35extern struct xfs_name xfs_name_dotdot;
33 36
34/* 37/*
35 * directory filetype conversion tables. 38 * Convert inode mode to directory entry filetype
36 */ 39 */
37#define S_SHIFT 12 40extern unsigned char xfs_mode_to_ftype(int mode);
38extern const unsigned char xfs_mode_to_ftype[];
39 41
40/* 42/*
41 * directory operations vector for encode/decode routines 43 * directory operations vector for encode/decode routines
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index dd483e2767f7..d93f9d918cfc 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -29,6 +29,7 @@
29#include "xfs_icache.h" 29#include "xfs_icache.h"
30#include "xfs_trans.h" 30#include "xfs_trans.h"
31#include "xfs_ialloc.h" 31#include "xfs_ialloc.h"
32#include "xfs_dir2.h"
32 33
33/* 34/*
34 * Check that none of the inode's in the buffer have a next 35 * Check that none of the inode's in the buffer have a next
@@ -386,6 +387,7 @@ xfs_dinode_verify(
386 xfs_ino_t ino, 387 xfs_ino_t ino,
387 struct xfs_dinode *dip) 388 struct xfs_dinode *dip)
388{ 389{
390 uint16_t mode;
389 uint16_t flags; 391 uint16_t flags;
390 uint64_t flags2; 392 uint64_t flags2;
391 393
@@ -396,8 +398,12 @@ xfs_dinode_verify(
396 if (be64_to_cpu(dip->di_size) & (1ULL << 63)) 398 if (be64_to_cpu(dip->di_size) & (1ULL << 63))
397 return false; 399 return false;
398 400
399 /* No zero-length symlinks. */ 401 mode = be16_to_cpu(dip->di_mode);
400 if (S_ISLNK(be16_to_cpu(dip->di_mode)) && dip->di_size == 0) 402 if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
403 return false;
404
405 /* No zero-length symlinks/dirs. */
406 if ((S_ISLNK(mode) || S_ISDIR(mode)) && dip->di_size == 0)
401 return false; 407 return false;
402 408
403 /* only version 3 or greater inodes are extensively verified here */ 409 /* only version 3 or greater inodes are extensively verified here */
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 7a30b8f11db7..9d06cc30e875 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -710,6 +710,10 @@ xfs_dq_get_next_id(
710 /* Simple advance */ 710 /* Simple advance */
711 next_id = *id + 1; 711 next_id = *id + 1;
712 712
713 /* If we'd wrap past the max ID, stop */
714 if (next_id < *id)
715 return -ENOENT;
716
713 /* If new ID is within the current chunk, advancing it sufficed */ 717 /* If new ID is within the current chunk, advancing it sufficed */
714 if (next_id % mp->m_quotainfo->qi_dqperchunk) { 718 if (next_id % mp->m_quotainfo->qi_dqperchunk) {
715 *id = next_id; 719 *id = next_id;
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 308bebb6dfd2..22c16155f1b4 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -98,12 +98,27 @@ xfs_init_security(
98static void 98static void
99xfs_dentry_to_name( 99xfs_dentry_to_name(
100 struct xfs_name *namep, 100 struct xfs_name *namep,
101 struct dentry *dentry)
102{
103 namep->name = dentry->d_name.name;
104 namep->len = dentry->d_name.len;
105 namep->type = XFS_DIR3_FT_UNKNOWN;
106}
107
108static int
109xfs_dentry_mode_to_name(
110 struct xfs_name *namep,
101 struct dentry *dentry, 111 struct dentry *dentry,
102 int mode) 112 int mode)
103{ 113{
104 namep->name = dentry->d_name.name; 114 namep->name = dentry->d_name.name;
105 namep->len = dentry->d_name.len; 115 namep->len = dentry->d_name.len;
106 namep->type = xfs_mode_to_ftype[(mode & S_IFMT) >> S_SHIFT]; 116 namep->type = xfs_mode_to_ftype(mode);
117
118 if (unlikely(namep->type == XFS_DIR3_FT_UNKNOWN))
119 return -EFSCORRUPTED;
120
121 return 0;
107} 122}
108 123
109STATIC void 124STATIC void
@@ -119,7 +134,7 @@ xfs_cleanup_inode(
119 * xfs_init_security we must back out. 134 * xfs_init_security we must back out.
120 * ENOSPC can hit here, among other things. 135 * ENOSPC can hit here, among other things.
121 */ 136 */
122 xfs_dentry_to_name(&teardown, dentry, 0); 137 xfs_dentry_to_name(&teardown, dentry);
123 138
124 xfs_remove(XFS_I(dir), &teardown, XFS_I(inode)); 139 xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
125} 140}
@@ -154,8 +169,12 @@ xfs_generic_create(
154 if (error) 169 if (error)
155 return error; 170 return error;
156 171
172 /* Verify mode is valid also for tmpfile case */
173 error = xfs_dentry_mode_to_name(&name, dentry, mode);
174 if (unlikely(error))
175 goto out_free_acl;
176
157 if (!tmpfile) { 177 if (!tmpfile) {
158 xfs_dentry_to_name(&name, dentry, mode);
159 error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip); 178 error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
160 } else { 179 } else {
161 error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip); 180 error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
@@ -248,7 +267,7 @@ xfs_vn_lookup(
248 if (dentry->d_name.len >= MAXNAMELEN) 267 if (dentry->d_name.len >= MAXNAMELEN)
249 return ERR_PTR(-ENAMETOOLONG); 268 return ERR_PTR(-ENAMETOOLONG);
250 269
251 xfs_dentry_to_name(&name, dentry, 0); 270 xfs_dentry_to_name(&name, dentry);
252 error = xfs_lookup(XFS_I(dir), &name, &cip, NULL); 271 error = xfs_lookup(XFS_I(dir), &name, &cip, NULL);
253 if (unlikely(error)) { 272 if (unlikely(error)) {
254 if (unlikely(error != -ENOENT)) 273 if (unlikely(error != -ENOENT))
@@ -275,7 +294,7 @@ xfs_vn_ci_lookup(
275 if (dentry->d_name.len >= MAXNAMELEN) 294 if (dentry->d_name.len >= MAXNAMELEN)
276 return ERR_PTR(-ENAMETOOLONG); 295 return ERR_PTR(-ENAMETOOLONG);
277 296
278 xfs_dentry_to_name(&xname, dentry, 0); 297 xfs_dentry_to_name(&xname, dentry);
279 error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name); 298 error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name);
280 if (unlikely(error)) { 299 if (unlikely(error)) {
281 if (unlikely(error != -ENOENT)) 300 if (unlikely(error != -ENOENT))
@@ -310,7 +329,9 @@ xfs_vn_link(
310 struct xfs_name name; 329 struct xfs_name name;
311 int error; 330 int error;
312 331
313 xfs_dentry_to_name(&name, dentry, inode->i_mode); 332 error = xfs_dentry_mode_to_name(&name, dentry, inode->i_mode);
333 if (unlikely(error))
334 return error;
314 335
315 error = xfs_link(XFS_I(dir), XFS_I(inode), &name); 336 error = xfs_link(XFS_I(dir), XFS_I(inode), &name);
316 if (unlikely(error)) 337 if (unlikely(error))
@@ -329,7 +350,7 @@ xfs_vn_unlink(
329 struct xfs_name name; 350 struct xfs_name name;
330 int error; 351 int error;
331 352
332 xfs_dentry_to_name(&name, dentry, 0); 353 xfs_dentry_to_name(&name, dentry);
333 354
334 error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry))); 355 error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry)));
335 if (error) 356 if (error)
@@ -359,7 +380,9 @@ xfs_vn_symlink(
359 380
360 mode = S_IFLNK | 381 mode = S_IFLNK |
361 (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO); 382 (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
362 xfs_dentry_to_name(&name, dentry, mode); 383 error = xfs_dentry_mode_to_name(&name, dentry, mode);
384 if (unlikely(error))
385 goto out;
363 386
364 error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip); 387 error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip);
365 if (unlikely(error)) 388 if (unlikely(error))
@@ -395,6 +418,7 @@ xfs_vn_rename(
395{ 418{
396 struct inode *new_inode = d_inode(ndentry); 419 struct inode *new_inode = d_inode(ndentry);
397 int omode = 0; 420 int omode = 0;
421 int error;
398 struct xfs_name oname; 422 struct xfs_name oname;
399 struct xfs_name nname; 423 struct xfs_name nname;
400 424
@@ -405,8 +429,14 @@ xfs_vn_rename(
405 if (flags & RENAME_EXCHANGE) 429 if (flags & RENAME_EXCHANGE)
406 omode = d_inode(ndentry)->i_mode; 430 omode = d_inode(ndentry)->i_mode;
407 431
408 xfs_dentry_to_name(&oname, odentry, omode); 432 error = xfs_dentry_mode_to_name(&oname, odentry, omode);
409 xfs_dentry_to_name(&nname, ndentry, d_inode(odentry)->i_mode); 433 if (omode && unlikely(error))
434 return error;
435
436 error = xfs_dentry_mode_to_name(&nname, ndentry,
437 d_inode(odentry)->i_mode);
438 if (unlikely(error))
439 return error;
410 440
411 return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)), 441 return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)),
412 XFS_I(ndir), &nname, 442 XFS_I(ndir), &nname,
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index e467218c0098..7a989de224f4 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -331,11 +331,11 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
331} 331}
332 332
333#define ASSERT_ALWAYS(expr) \ 333#define ASSERT_ALWAYS(expr) \
334 (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) 334 (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
335 335
336#ifdef DEBUG 336#ifdef DEBUG
337#define ASSERT(expr) \ 337#define ASSERT(expr) \
338 (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) 338 (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
339 339
340#ifndef STATIC 340#ifndef STATIC
341# define STATIC noinline 341# define STATIC noinline
@@ -346,7 +346,7 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
346#ifdef XFS_WARN 346#ifdef XFS_WARN
347 347
348#define ASSERT(expr) \ 348#define ASSERT(expr) \
349 (unlikely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__)) 349 (likely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
350 350
351#ifndef STATIC 351#ifndef STATIC
352# define STATIC static noinline 352# define STATIC static noinline
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index b717ed9d2b75..fe797d6ef89d 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -23,20 +23,24 @@
23#include <linux/hrtimer.h> 23#include <linux/hrtimer.h>
24#include <linux/workqueue.h> 24#include <linux/workqueue.h>
25 25
26struct arch_timer_kvm { 26struct arch_timer_context {
27 /* Registers: control register, timer value */
28 u32 cnt_ctl;
29 u64 cnt_cval;
30
31 /* Timer IRQ */
32 struct kvm_irq_level irq;
33
34 /* Active IRQ state caching */
35 bool active_cleared_last;
36
27 /* Virtual offset */ 37 /* Virtual offset */
28 u64 cntvoff; 38 u64 cntvoff;
29}; 39};
30 40
31struct arch_timer_cpu { 41struct arch_timer_cpu {
32 /* Registers: control register, timer value */ 42 struct arch_timer_context vtimer;
33 u32 cntv_ctl; /* Saved/restored */ 43 struct arch_timer_context ptimer;
34 u64 cntv_cval; /* Saved/restored */
35
36 /*
37 * Anything that is not used directly from assembly code goes
38 * here.
39 */
40 44
41 /* Background timer used when the guest is not running */ 45 /* Background timer used when the guest is not running */
42 struct hrtimer timer; 46 struct hrtimer timer;
@@ -47,21 +51,15 @@ struct arch_timer_cpu {
47 /* Background timer active */ 51 /* Background timer active */
48 bool armed; 52 bool armed;
49 53
50 /* Timer IRQ */
51 struct kvm_irq_level irq;
52
53 /* Active IRQ state caching */
54 bool active_cleared_last;
55
56 /* Is the timer enabled */ 54 /* Is the timer enabled */
57 bool enabled; 55 bool enabled;
58}; 56};
59 57
60int kvm_timer_hyp_init(void); 58int kvm_timer_hyp_init(void);
61int kvm_timer_enable(struct kvm_vcpu *vcpu); 59int kvm_timer_enable(struct kvm_vcpu *vcpu);
62void kvm_timer_init(struct kvm *kvm);
63int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, 60int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
64 const struct kvm_irq_level *irq); 61 const struct kvm_irq_level *virt_irq,
62 const struct kvm_irq_level *phys_irq);
65void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); 63void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
66void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu); 64void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu);
67void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); 65void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
@@ -70,10 +68,16 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
70u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); 68u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
71int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); 69int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
72 70
73bool kvm_timer_should_fire(struct kvm_vcpu *vcpu); 71bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
74void kvm_timer_schedule(struct kvm_vcpu *vcpu); 72void kvm_timer_schedule(struct kvm_vcpu *vcpu);
75void kvm_timer_unschedule(struct kvm_vcpu *vcpu); 73void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
76 74
75u64 kvm_phys_timer_read(void);
76
77void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu); 77void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
78 78
79void kvm_timer_init_vhe(void);
80
81#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer)
82#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer)
79#endif 83#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 002f0922cd92..b72dd2ad5f44 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -71,6 +71,8 @@ struct vgic_global {
71 71
72 /* GIC system register CPU interface */ 72 /* GIC system register CPU interface */
73 struct static_key_false gicv3_cpuif; 73 struct static_key_false gicv3_cpuif;
74
75 u32 ich_vtr_el2;
74}; 76};
75 77
76extern struct vgic_global kvm_vgic_global_state; 78extern struct vgic_global kvm_vgic_global_state;
@@ -101,9 +103,10 @@ struct vgic_irq {
101 */ 103 */
102 104
103 u32 intid; /* Guest visible INTID */ 105 u32 intid; /* Guest visible INTID */
104 bool pending;
105 bool line_level; /* Level only */ 106 bool line_level; /* Level only */
106 bool soft_pending; /* Level only */ 107 bool pending_latch; /* The pending latch state used to calculate
108 * the pending state for both level
109 * and edge triggered IRQs. */
107 bool active; /* not used for LPIs */ 110 bool active; /* not used for LPIs */
108 bool enabled; 111 bool enabled;
109 bool hw; /* Tied to HW IRQ */ 112 bool hw; /* Tied to HW IRQ */
@@ -165,6 +168,8 @@ struct vgic_its {
165 struct list_head collection_list; 168 struct list_head collection_list;
166}; 169};
167 170
171struct vgic_state_iter;
172
168struct vgic_dist { 173struct vgic_dist {
169 bool in_kernel; 174 bool in_kernel;
170 bool ready; 175 bool ready;
@@ -212,6 +217,9 @@ struct vgic_dist {
212 spinlock_t lpi_list_lock; 217 spinlock_t lpi_list_lock;
213 struct list_head lpi_list_head; 218 struct list_head lpi_list_head;
214 int lpi_list_count; 219 int lpi_list_count;
220
221 /* used by vgic-debug */
222 struct vgic_state_iter *iter;
215}; 223};
216 224
217struct vgic_v2_cpu_if { 225struct vgic_v2_cpu_if {
@@ -269,6 +277,12 @@ struct vgic_cpu {
269 u64 pendbaser; 277 u64 pendbaser;
270 278
271 bool lpis_enabled; 279 bool lpis_enabled;
280
281 /* Cache guest priority bits */
282 u32 num_pri_bits;
283
284 /* Cache guest interrupt ID bits */
285 u32 num_id_bits;
272}; 286};
273 287
274extern struct static_key_false vgic_v2_cpuif_trap; 288extern struct static_key_false vgic_v2_cpuif_trap;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f74ae68086dc..05cf951df3fe 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -216,7 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
216u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 216u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
217 217
218bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 218bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
219int bpf_prog_calc_digest(struct bpf_prog *fp); 219int bpf_prog_calc_tag(struct bpf_prog *fp);
220 220
221const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 221const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
222 222
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 20bfefbe7594..d936a0021839 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -74,6 +74,8 @@ enum cpuhp_state {
74 CPUHP_ZCOMP_PREPARE, 74 CPUHP_ZCOMP_PREPARE,
75 CPUHP_TIMERS_DEAD, 75 CPUHP_TIMERS_DEAD,
76 CPUHP_MIPS_SOC_PREPARE, 76 CPUHP_MIPS_SOC_PREPARE,
77 CPUHP_BP_PREPARE_DYN,
78 CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
77 CPUHP_BRINGUP_CPU, 79 CPUHP_BRINGUP_CPU,
78 CPUHP_AP_IDLE_DEAD, 80 CPUHP_AP_IDLE_DEAD,
79 CPUHP_AP_OFFLINE, 81 CPUHP_AP_OFFLINE,
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a0934e6c9bab..e4eb2546339a 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -57,6 +57,8 @@ struct bpf_prog_aux;
57/* BPF program can access up to 512 bytes of stack space. */ 57/* BPF program can access up to 512 bytes of stack space. */
58#define MAX_BPF_STACK 512 58#define MAX_BPF_STACK 512
59 59
60#define BPF_TAG_SIZE 8
61
60/* Helper macros for filter block array initializers. */ 62/* Helper macros for filter block array initializers. */
61 63
62/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ 64/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
@@ -408,7 +410,7 @@ struct bpf_prog {
408 kmemcheck_bitfield_end(meta); 410 kmemcheck_bitfield_end(meta);
409 enum bpf_prog_type type; /* Type of BPF program */ 411 enum bpf_prog_type type; /* Type of BPF program */
410 u32 len; /* Number of filter blocks */ 412 u32 len; /* Number of filter blocks */
411 u32 digest[SHA_DIGEST_WORDS]; /* Program digest */ 413 u8 tag[BPF_TAG_SIZE];
412 struct bpf_prog_aux *aux; /* Auxiliary fields */ 414 struct bpf_prog_aux *aux; /* Auxiliary fields */
413 struct sock_fprog_kern *orig_prog; /* Original BPF program */ 415 struct sock_fprog_kern *orig_prog; /* Original BPF program */
414 unsigned int (*bpf_func)(const void *ctx, 416 unsigned int (*bpf_func)(const void *ctx,
@@ -519,7 +521,7 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
519 return prog->len * sizeof(struct bpf_insn); 521 return prog->len * sizeof(struct bpf_insn);
520} 522}
521 523
522static inline u32 bpf_prog_digest_scratch_size(const struct bpf_prog *prog) 524static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
523{ 525{
524 return round_up(bpf_prog_insn_size(prog) + 526 return round_up(bpf_prog_insn_size(prog) +
525 sizeof(__be64) + 1, SHA_MESSAGE_BYTES); 527 sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index e808f8ae6f14..170e00a40826 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -352,8 +352,30 @@
352/* 352/*
353 * CPU interface registers 353 * CPU interface registers
354 */ 354 */
355#define ICC_CTLR_EL1_EOImode_drop_dir (0U << 1) 355#define ICC_CTLR_EL1_EOImode_SHIFT (1)
356#define ICC_CTLR_EL1_EOImode_drop (1U << 1) 356#define ICC_CTLR_EL1_EOImode_drop_dir (0U << ICC_CTLR_EL1_EOImode_SHIFT)
357#define ICC_CTLR_EL1_EOImode_drop (1U << ICC_CTLR_EL1_EOImode_SHIFT)
358#define ICC_CTLR_EL1_EOImode_MASK (1 << ICC_CTLR_EL1_EOImode_SHIFT)
359#define ICC_CTLR_EL1_CBPR_SHIFT 0
360#define ICC_CTLR_EL1_CBPR_MASK (1 << ICC_CTLR_EL1_CBPR_SHIFT)
361#define ICC_CTLR_EL1_PRI_BITS_SHIFT 8
362#define ICC_CTLR_EL1_PRI_BITS_MASK (0x7 << ICC_CTLR_EL1_PRI_BITS_SHIFT)
363#define ICC_CTLR_EL1_ID_BITS_SHIFT 11
364#define ICC_CTLR_EL1_ID_BITS_MASK (0x7 << ICC_CTLR_EL1_ID_BITS_SHIFT)
365#define ICC_CTLR_EL1_SEIS_SHIFT 14
366#define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT)
367#define ICC_CTLR_EL1_A3V_SHIFT 15
368#define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT)
369#define ICC_PMR_EL1_SHIFT 0
370#define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT)
371#define ICC_BPR0_EL1_SHIFT 0
372#define ICC_BPR0_EL1_MASK (0x7 << ICC_BPR0_EL1_SHIFT)
373#define ICC_BPR1_EL1_SHIFT 0
374#define ICC_BPR1_EL1_MASK (0x7 << ICC_BPR1_EL1_SHIFT)
375#define ICC_IGRPEN0_EL1_SHIFT 0
376#define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT)
377#define ICC_IGRPEN1_EL1_SHIFT 0
378#define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT)
357#define ICC_SRE_EL1_SRE (1U << 0) 379#define ICC_SRE_EL1_SRE (1U << 0)
358 380
359/* 381/*
@@ -382,14 +404,29 @@
382#define ICH_HCR_EN (1 << 0) 404#define ICH_HCR_EN (1 << 0)
383#define ICH_HCR_UIE (1 << 1) 405#define ICH_HCR_UIE (1 << 1)
384 406
385#define ICH_VMCR_CTLR_SHIFT 0 407#define ICH_VMCR_CBPR_SHIFT 4
386#define ICH_VMCR_CTLR_MASK (0x21f << ICH_VMCR_CTLR_SHIFT) 408#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT)
409#define ICH_VMCR_EOIM_SHIFT 9
410#define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT)
387#define ICH_VMCR_BPR1_SHIFT 18 411#define ICH_VMCR_BPR1_SHIFT 18
388#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT) 412#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT)
389#define ICH_VMCR_BPR0_SHIFT 21 413#define ICH_VMCR_BPR0_SHIFT 21
390#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT) 414#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT)
391#define ICH_VMCR_PMR_SHIFT 24 415#define ICH_VMCR_PMR_SHIFT 24
392#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) 416#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
417#define ICH_VMCR_ENG0_SHIFT 0
418#define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT)
419#define ICH_VMCR_ENG1_SHIFT 1
420#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT)
421
422#define ICH_VTR_PRI_BITS_SHIFT 29
423#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT)
424#define ICH_VTR_ID_BITS_SHIFT 23
425#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT)
426#define ICH_VTR_SEIS_SHIFT 22
427#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT)
428#define ICH_VTR_A3V_SHIFT 21
429#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT)
393 430
394#define ICC_IAR1_EL1_SPURIOUS 0x3ff 431#define ICC_IAR1_EL1_SPURIOUS 0x3ff
395 432
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 56aec84237ad..cb09238f6d32 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -514,8 +514,8 @@ extern enum system_states {
514#define TAINT_FLAGS_COUNT 16 514#define TAINT_FLAGS_COUNT 16
515 515
516struct taint_flag { 516struct taint_flag {
517 char true; /* character printed when tainted */ 517 char c_true; /* character printed when tainted */
518 char false; /* character printed when not tainted */ 518 char c_false; /* character printed when not tainted */
519 bool module; /* also show as a per-module taint flag */ 519 bool module; /* also show as a per-module taint flag */
520}; 520};
521 521
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 1c5190dab2c1..cda457bcedc1 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -45,7 +45,6 @@
45 * include/linux/kvm_h. 45 * include/linux/kvm_h.
46 */ 46 */
47#define KVM_MEMSLOT_INVALID (1UL << 16) 47#define KVM_MEMSLOT_INVALID (1UL << 16)
48#define KVM_MEMSLOT_INCOHERENT (1UL << 17)
49 48
50/* Two fragments for cross MMIO pages. */ 49/* Two fragments for cross MMIO pages. */
51#define KVM_MAX_MMIO_FRAGMENTS 2 50#define KVM_MAX_MMIO_FRAGMENTS 2
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 321f9ed552a9..01f71e1d2e94 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -444,6 +444,10 @@ bool __rcu_is_watching(void);
444#error "Unknown RCU implementation specified to kernel configuration" 444#error "Unknown RCU implementation specified to kernel configuration"
445#endif 445#endif
446 446
447#define RCU_SCHEDULER_INACTIVE 0
448#define RCU_SCHEDULER_INIT 1
449#define RCU_SCHEDULER_RUNNING 2
450
447/* 451/*
448 * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic 452 * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
449 * initialization and destruction of rcu_head on the stack. rcu_head structures 453 * initialization and destruction of rcu_head on the stack. rcu_head structures
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index e5d193440374..7440290f64ac 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -66,6 +66,7 @@ struct svc_xprt {
66#define XPT_LISTENER 10 /* listening endpoint */ 66#define XPT_LISTENER 10 /* listening endpoint */
67#define XPT_CACHE_AUTH 11 /* cache auth info */ 67#define XPT_CACHE_AUTH 11 /* cache auth info */
68#define XPT_LOCAL 12 /* connection from loopback interface */ 68#define XPT_LOCAL 12 /* connection from loopback interface */
69#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */
69 70
70 struct svc_serv *xpt_server; /* service for transport */ 71 struct svc_serv *xpt_server; /* service for transport */
71 atomic_t xpt_reserved; /* space on outq that is rsvd */ 72 atomic_t xpt_reserved; /* space on outq that is rsvd */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index fc5848dad7a4..c93f4b3a59cb 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -62,8 +62,13 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
62 62
63/* TCP Fast Open Cookie as stored in memory */ 63/* TCP Fast Open Cookie as stored in memory */
64struct tcp_fastopen_cookie { 64struct tcp_fastopen_cookie {
65 union {
66 u8 val[TCP_FASTOPEN_COOKIE_MAX];
67#if IS_ENABLED(CONFIG_IPV6)
68 struct in6_addr addr;
69#endif
70 };
65 s8 len; 71 s8 len;
66 u8 val[TCP_FASTOPEN_COOKIE_MAX];
67 bool exp; /* In RFC6994 experimental option format */ 72 bool exp; /* In RFC6994 experimental option format */
68}; 73};
69 74
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 96dd0b3f70d7..da5033dd8cbc 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -809,11 +809,11 @@ static inline void fc_set_wwnn(struct fc_lport *lport, u64 wwnn)
809/** 809/**
810 * fc_set_wwpn() - Set the World Wide Port Name of a local port 810 * fc_set_wwpn() - Set the World Wide Port Name of a local port
811 * @lport: The local port whose WWPN is to be set 811 * @lport: The local port whose WWPN is to be set
812 * @wwnn: The new WWPN 812 * @wwpn: The new WWPN
813 */ 813 */
814static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwnn) 814static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwpn)
815{ 815{
816 lport->wwpn = wwnn; 816 lport->wwpn = wwpn;
817} 817}
818 818
819/** 819/**
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 6b76e3b0c18e..bea982af9cfb 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1772,7 +1772,9 @@ enum nl80211_commands {
1772 * 1772 *
1773 * @NL80211_ATTR_OPMODE_NOTIF: Operating mode field from Operating Mode 1773 * @NL80211_ATTR_OPMODE_NOTIF: Operating mode field from Operating Mode
1774 * Notification Element based on association request when used with 1774 * Notification Element based on association request when used with
1775 * %NL80211_CMD_NEW_STATION; u8 attribute. 1775 * %NL80211_CMD_NEW_STATION or %NL80211_CMD_SET_STATION (only when
1776 * %NL80211_FEATURE_FULL_AP_CLIENT_STATE is supported, or with TDLS);
1777 * u8 attribute.
1776 * 1778 *
1777 * @NL80211_ATTR_VENDOR_ID: The vendor ID, either a 24-bit OUI or, if 1779 * @NL80211_ATTR_VENDOR_ID: The vendor ID, either a 24-bit OUI or, if
1778 * %NL80211_VENDOR_ID_IS_LINUX is set, a special Linux ID (not used yet) 1780 * %NL80211_VENDOR_ID_IS_LINUX is set, a special Linux ID (not used yet)
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index cb4bcdc58543..a4dcd88ec271 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -397,7 +397,7 @@ enum {
397 TCA_BPF_NAME, 397 TCA_BPF_NAME,
398 TCA_BPF_FLAGS, 398 TCA_BPF_FLAGS,
399 TCA_BPF_FLAGS_GEN, 399 TCA_BPF_FLAGS_GEN,
400 TCA_BPF_DIGEST, 400 TCA_BPF_TAG,
401 __TCA_BPF_MAX, 401 __TCA_BPF_MAX,
402}; 402};
403 403
diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h
index a6b88a6f7f71..975b50dc8d1d 100644
--- a/include/uapi/linux/tc_act/tc_bpf.h
+++ b/include/uapi/linux/tc_act/tc_bpf.h
@@ -27,7 +27,7 @@ enum {
27 TCA_ACT_BPF_FD, 27 TCA_ACT_BPF_FD,
28 TCA_ACT_BPF_NAME, 28 TCA_ACT_BPF_NAME,
29 TCA_ACT_BPF_PAD, 29 TCA_ACT_BPF_PAD,
30 TCA_ACT_BPF_DIGEST, 30 TCA_ACT_BPF_TAG,
31 __TCA_ACT_BPF_MAX, 31 __TCA_ACT_BPF_MAX,
32}; 32};
33#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1) 33#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 1eb4f1303756..503d4211988a 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -146,10 +146,11 @@ void __bpf_prog_free(struct bpf_prog *fp)
146 vfree(fp); 146 vfree(fp);
147} 147}
148 148
149int bpf_prog_calc_digest(struct bpf_prog *fp) 149int bpf_prog_calc_tag(struct bpf_prog *fp)
150{ 150{
151 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64); 151 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
152 u32 raw_size = bpf_prog_digest_scratch_size(fp); 152 u32 raw_size = bpf_prog_tag_scratch_size(fp);
153 u32 digest[SHA_DIGEST_WORDS];
153 u32 ws[SHA_WORKSPACE_WORDS]; 154 u32 ws[SHA_WORKSPACE_WORDS];
154 u32 i, bsize, psize, blocks; 155 u32 i, bsize, psize, blocks;
155 struct bpf_insn *dst; 156 struct bpf_insn *dst;
@@ -162,7 +163,7 @@ int bpf_prog_calc_digest(struct bpf_prog *fp)
162 if (!raw) 163 if (!raw)
163 return -ENOMEM; 164 return -ENOMEM;
164 165
165 sha_init(fp->digest); 166 sha_init(digest);
166 memset(ws, 0, sizeof(ws)); 167 memset(ws, 0, sizeof(ws));
167 168
168 /* We need to take out the map fd for the digest calculation 169 /* We need to take out the map fd for the digest calculation
@@ -204,13 +205,14 @@ int bpf_prog_calc_digest(struct bpf_prog *fp)
204 *bits = cpu_to_be64((psize - 1) << 3); 205 *bits = cpu_to_be64((psize - 1) << 3);
205 206
206 while (blocks--) { 207 while (blocks--) {
207 sha_transform(fp->digest, todo, ws); 208 sha_transform(digest, todo, ws);
208 todo += SHA_MESSAGE_BYTES; 209 todo += SHA_MESSAGE_BYTES;
209 } 210 }
210 211
211 result = (__force __be32 *)fp->digest; 212 result = (__force __be32 *)digest;
212 for (i = 0; i < SHA_DIGEST_WORDS; i++) 213 for (i = 0; i < SHA_DIGEST_WORDS; i++)
213 result[i] = cpu_to_be32(fp->digest[i]); 214 result[i] = cpu_to_be32(digest[i]);
215 memcpy(fp->tag, result, sizeof(fp->tag));
214 216
215 vfree(raw); 217 vfree(raw);
216 return 0; 218 return 0;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index e89acea22ecf..1d6b29e4e2c3 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -688,17 +688,17 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
688static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 688static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
689{ 689{
690 const struct bpf_prog *prog = filp->private_data; 690 const struct bpf_prog *prog = filp->private_data;
691 char prog_digest[sizeof(prog->digest) * 2 + 1] = { }; 691 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
692 692
693 bin2hex(prog_digest, prog->digest, sizeof(prog->digest)); 693 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
694 seq_printf(m, 694 seq_printf(m,
695 "prog_type:\t%u\n" 695 "prog_type:\t%u\n"
696 "prog_jited:\t%u\n" 696 "prog_jited:\t%u\n"
697 "prog_digest:\t%s\n" 697 "prog_tag:\t%s\n"
698 "memlock:\t%llu\n", 698 "memlock:\t%llu\n",
699 prog->type, 699 prog->type,
700 prog->jited, 700 prog->jited,
701 prog_digest, 701 prog_tag,
702 prog->pages * 1ULL << PAGE_SHIFT); 702 prog->pages * 1ULL << PAGE_SHIFT);
703} 703}
704#endif 704#endif
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 83ed2f8f6f22..cdc43b899f28 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2936,7 +2936,7 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
2936 int insn_cnt = env->prog->len; 2936 int insn_cnt = env->prog->len;
2937 int i, j, err; 2937 int i, j, err;
2938 2938
2939 err = bpf_prog_calc_digest(env->prog); 2939 err = bpf_prog_calc_tag(env->prog);
2940 if (err) 2940 if (err)
2941 return err; 2941 return err;
2942 2942
diff --git a/kernel/cpu.c b/kernel/cpu.c
index f75c4d031eeb..0a5f630f5c54 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -764,7 +764,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
764{ 764{
765 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 765 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
766 int prev_state, ret = 0; 766 int prev_state, ret = 0;
767 bool hasdied = false;
768 767
769 if (num_online_cpus() == 1) 768 if (num_online_cpus() == 1)
770 return -EBUSY; 769 return -EBUSY;
@@ -809,7 +808,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
809 cpuhp_kick_ap_work(cpu); 808 cpuhp_kick_ap_work(cpu);
810 } 809 }
811 810
812 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
813out: 811out:
814 cpu_hotplug_done(); 812 cpu_hotplug_done();
815 return ret; 813 return ret;
@@ -1302,10 +1300,24 @@ static int cpuhp_cb_check(enum cpuhp_state state)
1302 */ 1300 */
1303static int cpuhp_reserve_state(enum cpuhp_state state) 1301static int cpuhp_reserve_state(enum cpuhp_state state)
1304{ 1302{
1305 enum cpuhp_state i; 1303 enum cpuhp_state i, end;
1304 struct cpuhp_step *step;
1306 1305
1307 for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) { 1306 switch (state) {
1308 if (!cpuhp_ap_states[i].name) 1307 case CPUHP_AP_ONLINE_DYN:
1308 step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN;
1309 end = CPUHP_AP_ONLINE_DYN_END;
1310 break;
1311 case CPUHP_BP_PREPARE_DYN:
1312 step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN;
1313 end = CPUHP_BP_PREPARE_DYN_END;
1314 break;
1315 default:
1316 return -EINVAL;
1317 }
1318
1319 for (i = state; i <= end; i++, step++) {
1320 if (!step->name)
1309 return i; 1321 return i;
1310 } 1322 }
1311 WARN(1, "No more dynamic states available for CPU hotplug\n"); 1323 WARN(1, "No more dynamic states available for CPU hotplug\n");
@@ -1323,7 +1335,7 @@ static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1323 1335
1324 mutex_lock(&cpuhp_state_mutex); 1336 mutex_lock(&cpuhp_state_mutex);
1325 1337
1326 if (state == CPUHP_AP_ONLINE_DYN) { 1338 if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
1327 ret = cpuhp_reserve_state(state); 1339 ret = cpuhp_reserve_state(state);
1328 if (ret < 0) 1340 if (ret < 0)
1329 goto out; 1341 goto out;
diff --git a/kernel/module.c b/kernel/module.c
index 5088784c0cf9..38d4270925d4 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1145,7 +1145,7 @@ static size_t module_flags_taint(struct module *mod, char *buf)
1145 1145
1146 for (i = 0; i < TAINT_FLAGS_COUNT; i++) { 1146 for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
1147 if (taint_flags[i].module && test_bit(i, &mod->taints)) 1147 if (taint_flags[i].module && test_bit(i, &mod->taints))
1148 buf[l++] = taint_flags[i].true; 1148 buf[l++] = taint_flags[i].c_true;
1149 } 1149 }
1150 1150
1151 return l; 1151 return l;
diff --git a/kernel/panic.c b/kernel/panic.c
index c51edaa04fce..901c4fb46002 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -355,7 +355,7 @@ const char *print_tainted(void)
355 for (i = 0; i < TAINT_FLAGS_COUNT; i++) { 355 for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
356 const struct taint_flag *t = &taint_flags[i]; 356 const struct taint_flag *t = &taint_flags[i];
357 *s++ = test_bit(i, &tainted_mask) ? 357 *s++ = test_bit(i, &tainted_mask) ?
358 t->true : t->false; 358 t->c_true : t->c_false;
359 } 359 }
360 *s = 0; 360 *s = 0;
361 } else 361 } else
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 80adef7d4c3d..0d6ff3e471be 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -136,6 +136,7 @@ int rcu_jiffies_till_stall_check(void);
136#define TPS(x) tracepoint_string(x) 136#define TPS(x) tracepoint_string(x)
137 137
138void rcu_early_boot_tests(void); 138void rcu_early_boot_tests(void);
139void rcu_test_sync_prims(void);
139 140
140/* 141/*
141 * This function really isn't for public consumption, but RCU is special in 142 * This function really isn't for public consumption, but RCU is special in
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 1898559e6b60..b23a4d076f3d 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -185,9 +185,6 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
185 * benefits of doing might_sleep() to reduce latency.) 185 * benefits of doing might_sleep() to reduce latency.)
186 * 186 *
187 * Cool, huh? (Due to Josh Triplett.) 187 * Cool, huh? (Due to Josh Triplett.)
188 *
189 * But we want to make this a static inline later. The cond_resched()
190 * currently makes this problematic.
191 */ 188 */
192void synchronize_sched(void) 189void synchronize_sched(void)
193{ 190{
@@ -195,7 +192,6 @@ void synchronize_sched(void)
195 lock_is_held(&rcu_lock_map) || 192 lock_is_held(&rcu_lock_map) ||
196 lock_is_held(&rcu_sched_lock_map), 193 lock_is_held(&rcu_sched_lock_map),
197 "Illegal synchronize_sched() in RCU read-side critical section"); 194 "Illegal synchronize_sched() in RCU read-side critical section");
198 cond_resched();
199} 195}
200EXPORT_SYMBOL_GPL(synchronize_sched); 196EXPORT_SYMBOL_GPL(synchronize_sched);
201 197
diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
index 196f0302e2f4..c64b827ecbca 100644
--- a/kernel/rcu/tiny_plugin.h
+++ b/kernel/rcu/tiny_plugin.h
@@ -60,12 +60,17 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
60 60
61/* 61/*
62 * During boot, we forgive RCU lockdep issues. After this function is 62 * During boot, we forgive RCU lockdep issues. After this function is
63 * invoked, we start taking RCU lockdep issues seriously. 63 * invoked, we start taking RCU lockdep issues seriously. Note that unlike
64 * Tree RCU, Tiny RCU transitions directly from RCU_SCHEDULER_INACTIVE
65 * to RCU_SCHEDULER_RUNNING, skipping the RCU_SCHEDULER_INIT stage.
66 * The reason for this is that Tiny RCU does not need kthreads, so does
67 * not have to care about the fact that the scheduler is half-initialized
68 * at a certain phase of the boot process.
64 */ 69 */
65void __init rcu_scheduler_starting(void) 70void __init rcu_scheduler_starting(void)
66{ 71{
67 WARN_ON(nr_context_switches() > 0); 72 WARN_ON(nr_context_switches() > 0);
68 rcu_scheduler_active = 1; 73 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
69} 74}
70 75
71#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 76#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 96c52e43f7ca..cb4e2056ccf3 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -127,13 +127,16 @@ int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
127int sysctl_panic_on_rcu_stall __read_mostly; 127int sysctl_panic_on_rcu_stall __read_mostly;
128 128
129/* 129/*
130 * The rcu_scheduler_active variable transitions from zero to one just 130 * The rcu_scheduler_active variable is initialized to the value
131 * before the first task is spawned. So when this variable is zero, RCU 131 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
132 * can assume that there is but one task, allowing RCU to (for example) 132 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
133 * RCU can assume that there is but one task, allowing RCU to (for example)
133 * optimize synchronize_rcu() to a simple barrier(). When this variable 134 * optimize synchronize_rcu() to a simple barrier(). When this variable
134 * is one, RCU must actually do all the hard work required to detect real 135 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
135 * grace periods. This variable is also used to suppress boot-time false 136 * to detect real grace periods. This variable is also used to suppress
136 * positives from lockdep-RCU error checking. 137 * boot-time false positives from lockdep-RCU error checking. Finally, it
138 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
139 * is fully initialized, including all of its kthreads having been spawned.
137 */ 140 */
138int rcu_scheduler_active __read_mostly; 141int rcu_scheduler_active __read_mostly;
139EXPORT_SYMBOL_GPL(rcu_scheduler_active); 142EXPORT_SYMBOL_GPL(rcu_scheduler_active);
@@ -3980,18 +3983,22 @@ static int __init rcu_spawn_gp_kthread(void)
3980early_initcall(rcu_spawn_gp_kthread); 3983early_initcall(rcu_spawn_gp_kthread);
3981 3984
3982/* 3985/*
3983 * This function is invoked towards the end of the scheduler's initialization 3986 * This function is invoked towards the end of the scheduler's
3984 * process. Before this is called, the idle task might contain 3987 * initialization process. Before this is called, the idle task might
3985 * RCU read-side critical sections (during which time, this idle 3988 * contain synchronous grace-period primitives (during which time, this idle
3986 * task is booting the system). After this function is called, the 3989 * task is booting the system, and such primitives are no-ops). After this
3987 * idle tasks are prohibited from containing RCU read-side critical 3990 * function is called, any synchronous grace-period primitives are run as
3988 * sections. This function also enables RCU lockdep checking. 3991 * expedited, with the requesting task driving the grace period forward.
3992 * A later core_initcall() rcu_exp_runtime_mode() will switch to full
3993 * runtime RCU functionality.
3989 */ 3994 */
3990void rcu_scheduler_starting(void) 3995void rcu_scheduler_starting(void)
3991{ 3996{
3992 WARN_ON(num_online_cpus() != 1); 3997 WARN_ON(num_online_cpus() != 1);
3993 WARN_ON(nr_context_switches() > 0); 3998 WARN_ON(nr_context_switches() > 0);
3994 rcu_scheduler_active = 1; 3999 rcu_test_sync_prims();
4000 rcu_scheduler_active = RCU_SCHEDULER_INIT;
4001 rcu_test_sync_prims();
3995} 4002}
3996 4003
3997/* 4004/*
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index d3053e99fdb6..e59e1849b89a 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -532,18 +532,28 @@ struct rcu_exp_work {
532}; 532};
533 533
534/* 534/*
535 * Common code to drive an expedited grace period forward, used by
536 * workqueues and mid-boot-time tasks.
537 */
538static void rcu_exp_sel_wait_wake(struct rcu_state *rsp,
539 smp_call_func_t func, unsigned long s)
540{
541 /* Initialize the rcu_node tree in preparation for the wait. */
542 sync_rcu_exp_select_cpus(rsp, func);
543
544 /* Wait and clean up, including waking everyone. */
545 rcu_exp_wait_wake(rsp, s);
546}
547
548/*
535 * Work-queue handler to drive an expedited grace period forward. 549 * Work-queue handler to drive an expedited grace period forward.
536 */ 550 */
537static void wait_rcu_exp_gp(struct work_struct *wp) 551static void wait_rcu_exp_gp(struct work_struct *wp)
538{ 552{
539 struct rcu_exp_work *rewp; 553 struct rcu_exp_work *rewp;
540 554
541 /* Initialize the rcu_node tree in preparation for the wait. */
542 rewp = container_of(wp, struct rcu_exp_work, rew_work); 555 rewp = container_of(wp, struct rcu_exp_work, rew_work);
543 sync_rcu_exp_select_cpus(rewp->rew_rsp, rewp->rew_func); 556 rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s);
544
545 /* Wait and clean up, including waking everyone. */
546 rcu_exp_wait_wake(rewp->rew_rsp, rewp->rew_s);
547} 557}
548 558
549/* 559/*
@@ -569,12 +579,18 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
569 if (exp_funnel_lock(rsp, s)) 579 if (exp_funnel_lock(rsp, s))
570 return; /* Someone else did our work for us. */ 580 return; /* Someone else did our work for us. */
571 581
572 /* Marshall arguments and schedule the expedited grace period. */ 582 /* Ensure that load happens before action based on it. */
573 rew.rew_func = func; 583 if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
574 rew.rew_rsp = rsp; 584 /* Direct call during scheduler init and early_initcalls(). */
575 rew.rew_s = s; 585 rcu_exp_sel_wait_wake(rsp, func, s);
576 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); 586 } else {
577 schedule_work(&rew.rew_work); 587 /* Marshall arguments & schedule the expedited grace period. */
588 rew.rew_func = func;
589 rew.rew_rsp = rsp;
590 rew.rew_s = s;
591 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
592 schedule_work(&rew.rew_work);
593 }
578 594
579 /* Wait for expedited grace period to complete. */ 595 /* Wait for expedited grace period to complete. */
580 rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); 596 rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
@@ -676,6 +692,8 @@ void synchronize_rcu_expedited(void)
676{ 692{
677 struct rcu_state *rsp = rcu_state_p; 693 struct rcu_state *rsp = rcu_state_p;
678 694
695 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
696 return;
679 _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler); 697 _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
680} 698}
681EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 699EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
@@ -693,3 +711,15 @@ void synchronize_rcu_expedited(void)
693EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 711EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
694 712
695#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 713#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
714
715/*
716 * Switch to run-time mode once Tree RCU has fully initialized.
717 */
718static int __init rcu_exp_runtime_mode(void)
719{
720 rcu_test_sync_prims();
721 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
722 rcu_test_sync_prims();
723 return 0;
724}
725core_initcall(rcu_exp_runtime_mode);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 85c5a883c6e3..56583e764ebf 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -670,7 +670,7 @@ void synchronize_rcu(void)
670 lock_is_held(&rcu_lock_map) || 670 lock_is_held(&rcu_lock_map) ||
671 lock_is_held(&rcu_sched_lock_map), 671 lock_is_held(&rcu_sched_lock_map),
672 "Illegal synchronize_rcu() in RCU read-side critical section"); 672 "Illegal synchronize_rcu() in RCU read-side critical section");
673 if (!rcu_scheduler_active) 673 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
674 return; 674 return;
675 if (rcu_gp_is_expedited()) 675 if (rcu_gp_is_expedited())
676 synchronize_rcu_expedited(); 676 synchronize_rcu_expedited();
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index f19271dce0a9..4f6db7e6a117 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -121,11 +121,14 @@ EXPORT_SYMBOL(rcu_read_lock_sched_held);
121 * Should expedited grace-period primitives always fall back to their 121 * Should expedited grace-period primitives always fall back to their
122 * non-expedited counterparts? Intended for use within RCU. Note 122 * non-expedited counterparts? Intended for use within RCU. Note
123 * that if the user specifies both rcu_expedited and rcu_normal, then 123 * that if the user specifies both rcu_expedited and rcu_normal, then
124 * rcu_normal wins. 124 * rcu_normal wins. (Except during the time period during boot from
125 * when the first task is spawned until the rcu_exp_runtime_mode()
126 * core_initcall() is invoked, at which point everything is expedited.)
125 */ 127 */
126bool rcu_gp_is_normal(void) 128bool rcu_gp_is_normal(void)
127{ 129{
128 return READ_ONCE(rcu_normal); 130 return READ_ONCE(rcu_normal) &&
131 rcu_scheduler_active != RCU_SCHEDULER_INIT;
129} 132}
130EXPORT_SYMBOL_GPL(rcu_gp_is_normal); 133EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
131 134
@@ -135,13 +138,14 @@ static atomic_t rcu_expedited_nesting =
135/* 138/*
136 * Should normal grace-period primitives be expedited? Intended for 139 * Should normal grace-period primitives be expedited? Intended for
137 * use within RCU. Note that this function takes the rcu_expedited 140 * use within RCU. Note that this function takes the rcu_expedited
138 * sysfs/boot variable into account as well as the rcu_expedite_gp() 141 * sysfs/boot variable and rcu_scheduler_active into account as well
139 * nesting. So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited() 142 * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp()
140 * returns false is a -really- bad idea. 143 * until rcu_gp_is_expedited() returns false is a -really- bad idea.
141 */ 144 */
142bool rcu_gp_is_expedited(void) 145bool rcu_gp_is_expedited(void)
143{ 146{
144 return rcu_expedited || atomic_read(&rcu_expedited_nesting); 147 return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
148 rcu_scheduler_active == RCU_SCHEDULER_INIT;
145} 149}
146EXPORT_SYMBOL_GPL(rcu_gp_is_expedited); 150EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
147 151
@@ -257,7 +261,7 @@ EXPORT_SYMBOL_GPL(rcu_callback_map);
257 261
258int notrace debug_lockdep_rcu_enabled(void) 262int notrace debug_lockdep_rcu_enabled(void)
259{ 263{
260 return rcu_scheduler_active && debug_locks && 264 return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
261 current->lockdep_recursion == 0; 265 current->lockdep_recursion == 0;
262} 266}
263EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); 267EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
@@ -591,7 +595,7 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks);
591void synchronize_rcu_tasks(void) 595void synchronize_rcu_tasks(void)
592{ 596{
593 /* Complain if the scheduler has not started. */ 597 /* Complain if the scheduler has not started. */
594 RCU_LOCKDEP_WARN(!rcu_scheduler_active, 598 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
595 "synchronize_rcu_tasks called too soon"); 599 "synchronize_rcu_tasks called too soon");
596 600
597 /* Wait for the grace period. */ 601 /* Wait for the grace period. */
@@ -813,6 +817,23 @@ static void rcu_spawn_tasks_kthread(void)
813 817
814#endif /* #ifdef CONFIG_TASKS_RCU */ 818#endif /* #ifdef CONFIG_TASKS_RCU */
815 819
820/*
821 * Test each non-SRCU synchronous grace-period wait API. This is
822 * useful just after a change in mode for these primitives, and
823 * during early boot.
824 */
825void rcu_test_sync_prims(void)
826{
827 if (!IS_ENABLED(CONFIG_PROVE_RCU))
828 return;
829 synchronize_rcu();
830 synchronize_rcu_bh();
831 synchronize_sched();
832 synchronize_rcu_expedited();
833 synchronize_rcu_bh_expedited();
834 synchronize_sched_expedited();
835}
836
816#ifdef CONFIG_PROVE_RCU 837#ifdef CONFIG_PROVE_RCU
817 838
818/* 839/*
@@ -865,6 +886,7 @@ void rcu_early_boot_tests(void)
865 early_boot_test_call_rcu_bh(); 886 early_boot_test_call_rcu_bh();
866 if (rcu_self_test_sched) 887 if (rcu_self_test_sched)
867 early_boot_test_call_rcu_sched(); 888 early_boot_test_call_rcu_sched();
889 rcu_test_sync_prims();
868} 890}
869 891
870static int rcu_verify_early_boot_tests(void) 892static int rcu_verify_early_boot_tests(void)
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 975b8fc4f1e1..a8d74a733a38 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -483,11 +483,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
483 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); 483 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
484 484
485 /* 485 /*
486 * For mappings greater than a page, we limit the stride (and 486 * For mappings greater than or equal to a page, we limit the stride
487 * hence alignment) to a page size. 487 * (and hence alignment) to a page size.
488 */ 488 */
489 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 489 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
490 if (size > PAGE_SIZE) 490 if (size >= PAGE_SIZE)
491 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); 491 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
492 else 492 else
493 stride = 1; 493 stride = 1;
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 4855d18a8511..038b109b2be7 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -264,7 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
264{ 264{
265 ax25_clear_queues(ax25); 265 ax25_clear_queues(ax25);
266 266
267 if (!sock_flag(ax25->sk, SOCK_DESTROY)) 267 if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
268 ax25_stop_heartbeat(ax25); 268 ax25_stop_heartbeat(ax25);
269 ax25_stop_t1timer(ax25); 269 ax25_stop_t1timer(ax25);
270 ax25_stop_t2timer(ax25); 270 ax25_stop_t2timer(ax25);
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 3949ce70be07..292e33bd916e 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -214,7 +214,7 @@ static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
214 SKCIPHER_REQUEST_ON_STACK(req, key->tfm); 214 SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
215 struct sg_table sgt; 215 struct sg_table sgt;
216 struct scatterlist prealloc_sg; 216 struct scatterlist prealloc_sg;
217 char iv[AES_BLOCK_SIZE]; 217 char iv[AES_BLOCK_SIZE] __aligned(8);
218 int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1)); 218 int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
219 int crypt_len = encrypt ? in_len + pad_byte : in_len; 219 int crypt_len = encrypt ? in_len + pad_byte : in_len;
220 int ret; 220 int ret;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index eba1546b5031..9a375b908d01 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1279,8 +1279,9 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1279 nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid)) 1279 nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
1280 goto nla_put_failure; 1280 goto nla_put_failure;
1281#endif 1281#endif
1282 if (fi->fib_nh->nh_lwtstate) 1282 if (fi->fib_nh->nh_lwtstate &&
1283 lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate); 1283 lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate) < 0)
1284 goto nla_put_failure;
1284 } 1285 }
1285#ifdef CONFIG_IP_ROUTE_MULTIPATH 1286#ifdef CONFIG_IP_ROUTE_MULTIPATH
1286 if (fi->fib_nhs > 1) { 1287 if (fi->fib_nhs > 1) {
@@ -1316,8 +1317,10 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1316 nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid)) 1317 nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
1317 goto nla_put_failure; 1318 goto nla_put_failure;
1318#endif 1319#endif
1319 if (nh->nh_lwtstate) 1320 if (nh->nh_lwtstate &&
1320 lwtunnel_fill_encap(skb, nh->nh_lwtstate); 1321 lwtunnel_fill_encap(skb, nh->nh_lwtstate) < 0)
1322 goto nla_put_failure;
1323
1321 /* length of rtnetlink header + attributes */ 1324 /* length of rtnetlink header + attributes */
1322 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh; 1325 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
1323 } endfor_nexthops(fi); 1326 } endfor_nexthops(fi);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 0fcac8e7a2b2..709ffe67d1de 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2472,7 +2472,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
2472 r->rtm_dst_len = 32; 2472 r->rtm_dst_len = 32;
2473 r->rtm_src_len = 0; 2473 r->rtm_src_len = 0;
2474 r->rtm_tos = fl4->flowi4_tos; 2474 r->rtm_tos = fl4->flowi4_tos;
2475 r->rtm_table = table_id; 2475 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2476 if (nla_put_u32(skb, RTA_TABLE, table_id)) 2476 if (nla_put_u32(skb, RTA_TABLE, table_id))
2477 goto nla_put_failure; 2477 goto nla_put_failure;
2478 r->rtm_type = rt->rt_type; 2478 r->rtm_type = rt->rt_type;
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 4e777a3243f9..f51919535ca7 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -113,7 +113,7 @@ static bool tcp_fastopen_cookie_gen(struct request_sock *req,
113 struct tcp_fastopen_cookie tmp; 113 struct tcp_fastopen_cookie tmp;
114 114
115 if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) { 115 if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) {
116 struct in6_addr *buf = (struct in6_addr *) tmp.val; 116 struct in6_addr *buf = &tmp.addr;
117 int i; 117 int i;
118 118
119 for (i = 0; i < 4; i++) 119 for (i = 0; i < 4; i++)
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 36d292180942..753d6d0860fb 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1108,7 +1108,7 @@ route_lookup:
1108 t->parms.name); 1108 t->parms.name);
1109 goto tx_err_dst_release; 1109 goto tx_err_dst_release;
1110 } 1110 }
1111 mtu = dst_mtu(dst) - psh_hlen; 1111 mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen;
1112 if (encap_limit >= 0) { 1112 if (encap_limit >= 0) {
1113 max_headroom += 8; 1113 max_headroom += 8;
1114 mtu -= 8; 1114 mtu -= 8;
@@ -1117,7 +1117,7 @@ route_lookup:
1117 mtu = IPV6_MIN_MTU; 1117 mtu = IPV6_MIN_MTU;
1118 if (skb_dst(skb) && !t->parms.collect_md) 1118 if (skb_dst(skb) && !t->parms.collect_md)
1119 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); 1119 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
1120 if (skb->len > mtu && !skb_is_gso(skb)) { 1120 if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) {
1121 *pmtu = mtu; 1121 *pmtu = mtu;
1122 err = -EMSGSIZE; 1122 err = -EMSGSIZE;
1123 goto tx_err_dst_release; 1123 goto tx_err_dst_release;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 14a3903f1c82..7139fffd61b6 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -81,7 +81,7 @@ static void mld_gq_timer_expire(unsigned long data);
81static void mld_ifc_timer_expire(unsigned long data); 81static void mld_ifc_timer_expire(unsigned long data);
82static void mld_ifc_event(struct inet6_dev *idev); 82static void mld_ifc_event(struct inet6_dev *idev);
83static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc); 83static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
84static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr); 84static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
85static void mld_clear_delrec(struct inet6_dev *idev); 85static void mld_clear_delrec(struct inet6_dev *idev);
86static bool mld_in_v1_mode(const struct inet6_dev *idev); 86static bool mld_in_v1_mode(const struct inet6_dev *idev);
87static int sf_setstate(struct ifmcaddr6 *pmc); 87static int sf_setstate(struct ifmcaddr6 *pmc);
@@ -692,9 +692,9 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
692 dev_mc_del(dev, buf); 692 dev_mc_del(dev, buf);
693 } 693 }
694 694
695 if (mc->mca_flags & MAF_NOREPORT)
696 goto done;
697 spin_unlock_bh(&mc->mca_lock); 695 spin_unlock_bh(&mc->mca_lock);
696 if (mc->mca_flags & MAF_NOREPORT)
697 return;
698 698
699 if (!mc->idev->dead) 699 if (!mc->idev->dead)
700 igmp6_leave_group(mc); 700 igmp6_leave_group(mc);
@@ -702,8 +702,6 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
702 spin_lock_bh(&mc->mca_lock); 702 spin_lock_bh(&mc->mca_lock);
703 if (del_timer(&mc->mca_timer)) 703 if (del_timer(&mc->mca_timer))
704 atomic_dec(&mc->mca_refcnt); 704 atomic_dec(&mc->mca_refcnt);
705done:
706 ip6_mc_clear_src(mc);
707 spin_unlock_bh(&mc->mca_lock); 705 spin_unlock_bh(&mc->mca_lock);
708} 706}
709 707
@@ -748,10 +746,11 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
748 spin_unlock_bh(&idev->mc_lock); 746 spin_unlock_bh(&idev->mc_lock);
749} 747}
750 748
751static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca) 749static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
752{ 750{
753 struct ifmcaddr6 *pmc, *pmc_prev; 751 struct ifmcaddr6 *pmc, *pmc_prev;
754 struct ip6_sf_list *psf, *psf_next; 752 struct ip6_sf_list *psf;
753 struct in6_addr *pmca = &im->mca_addr;
755 754
756 spin_lock_bh(&idev->mc_lock); 755 spin_lock_bh(&idev->mc_lock);
757 pmc_prev = NULL; 756 pmc_prev = NULL;
@@ -768,14 +767,20 @@ static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
768 } 767 }
769 spin_unlock_bh(&idev->mc_lock); 768 spin_unlock_bh(&idev->mc_lock);
770 769
770 spin_lock_bh(&im->mca_lock);
771 if (pmc) { 771 if (pmc) {
772 for (psf = pmc->mca_tomb; psf; psf = psf_next) { 772 im->idev = pmc->idev;
773 psf_next = psf->sf_next; 773 im->mca_crcount = idev->mc_qrv;
774 kfree(psf); 774 im->mca_sfmode = pmc->mca_sfmode;
775 if (pmc->mca_sfmode == MCAST_INCLUDE) {
776 im->mca_tomb = pmc->mca_tomb;
777 im->mca_sources = pmc->mca_sources;
778 for (psf = im->mca_sources; psf; psf = psf->sf_next)
779 psf->sf_crcount = im->mca_crcount;
775 } 780 }
776 in6_dev_put(pmc->idev); 781 in6_dev_put(pmc->idev);
777 kfree(pmc);
778 } 782 }
783 spin_unlock_bh(&im->mca_lock);
779} 784}
780 785
781static void mld_clear_delrec(struct inet6_dev *idev) 786static void mld_clear_delrec(struct inet6_dev *idev)
@@ -904,7 +909,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
904 mca_get(mc); 909 mca_get(mc);
905 write_unlock_bh(&idev->lock); 910 write_unlock_bh(&idev->lock);
906 911
907 mld_del_delrec(idev, &mc->mca_addr); 912 mld_del_delrec(idev, mc);
908 igmp6_group_added(mc); 913 igmp6_group_added(mc);
909 ma_put(mc); 914 ma_put(mc);
910 return 0; 915 return 0;
@@ -927,6 +932,7 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
927 write_unlock_bh(&idev->lock); 932 write_unlock_bh(&idev->lock);
928 933
929 igmp6_group_dropped(ma); 934 igmp6_group_dropped(ma);
935 ip6_mc_clear_src(ma);
930 936
931 ma_put(ma); 937 ma_put(ma);
932 return 0; 938 return 0;
@@ -2501,15 +2507,17 @@ void ipv6_mc_down(struct inet6_dev *idev)
2501 /* Withdraw multicast list */ 2507 /* Withdraw multicast list */
2502 2508
2503 read_lock_bh(&idev->lock); 2509 read_lock_bh(&idev->lock);
2504 mld_ifc_stop_timer(idev);
2505 mld_gq_stop_timer(idev);
2506 mld_dad_stop_timer(idev);
2507 2510
2508 for (i = idev->mc_list; i; i = i->next) 2511 for (i = idev->mc_list; i; i = i->next)
2509 igmp6_group_dropped(i); 2512 igmp6_group_dropped(i);
2510 read_unlock_bh(&idev->lock);
2511 2513
2512 mld_clear_delrec(idev); 2514 /* Should stop timer after group drop. or we will
2515 * start timer again in mld_ifc_event()
2516 */
2517 mld_ifc_stop_timer(idev);
2518 mld_gq_stop_timer(idev);
2519 mld_dad_stop_timer(idev);
2520 read_unlock_bh(&idev->lock);
2513} 2521}
2514 2522
2515static void ipv6_mc_reset(struct inet6_dev *idev) 2523static void ipv6_mc_reset(struct inet6_dev *idev)
@@ -2531,8 +2539,10 @@ void ipv6_mc_up(struct inet6_dev *idev)
2531 2539
2532 read_lock_bh(&idev->lock); 2540 read_lock_bh(&idev->lock);
2533 ipv6_mc_reset(idev); 2541 ipv6_mc_reset(idev);
2534 for (i = idev->mc_list; i; i = i->next) 2542 for (i = idev->mc_list; i; i = i->next) {
2543 mld_del_delrec(idev, i);
2535 igmp6_group_added(i); 2544 igmp6_group_added(i);
2545 }
2536 read_unlock_bh(&idev->lock); 2546 read_unlock_bh(&idev->lock);
2537} 2547}
2538 2548
@@ -2565,6 +2575,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2565 2575
2566 /* Deactivate timers */ 2576 /* Deactivate timers */
2567 ipv6_mc_down(idev); 2577 ipv6_mc_down(idev);
2578 mld_clear_delrec(idev);
2568 2579
2569 /* Delete all-nodes address. */ 2580 /* Delete all-nodes address. */
2570 /* We cannot call ipv6_dev_mc_dec() directly, our caller in 2581 /* We cannot call ipv6_dev_mc_dec() directly, our caller in
@@ -2579,11 +2590,9 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2579 write_lock_bh(&idev->lock); 2590 write_lock_bh(&idev->lock);
2580 while ((i = idev->mc_list) != NULL) { 2591 while ((i = idev->mc_list) != NULL) {
2581 idev->mc_list = i->next; 2592 idev->mc_list = i->next;
2582 write_unlock_bh(&idev->lock);
2583 2593
2584 igmp6_group_dropped(i); 2594 write_unlock_bh(&idev->lock);
2585 ma_put(i); 2595 ma_put(i);
2586
2587 write_lock_bh(&idev->lock); 2596 write_lock_bh(&idev->lock);
2588 } 2597 }
2589 write_unlock_bh(&idev->lock); 2598 write_unlock_bh(&idev->lock);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index ce5aaf448c54..4f6b067c8753 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3317,7 +3317,8 @@ static int rt6_fill_node(struct net *net,
3317 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags))) 3317 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
3318 goto nla_put_failure; 3318 goto nla_put_failure;
3319 3319
3320 lwtunnel_fill_encap(skb, rt->dst.lwtstate); 3320 if (lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
3321 goto nla_put_failure;
3321 3322
3322 nlmsg_end(skb, nlh); 3323 nlmsg_end(skb, nlh);
3323 return 0; 3324 return 0;
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
index ef1c8a46e7ac..03a064803626 100644
--- a/net/ipv6/seg6_hmac.c
+++ b/net/ipv6/seg6_hmac.c
@@ -400,7 +400,7 @@ static int seg6_hmac_init_algo(void)
400 *p_tfm = tfm; 400 *p_tfm = tfm;
401 } 401 }
402 402
403 p_tfm = this_cpu_ptr(algo->tfms); 403 p_tfm = raw_cpu_ptr(algo->tfms);
404 tfm = *p_tfm; 404 tfm = *p_tfm;
405 405
406 shsize = sizeof(*shash) + crypto_shash_descsize(tfm); 406 shsize = sizeof(*shash) + crypto_shash_descsize(tfm);
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index bbfca22c34ae..1d60cb132835 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -265,7 +265,9 @@ int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
265 slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate); 265 slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
266 266
267#ifdef CONFIG_DST_CACHE 267#ifdef CONFIG_DST_CACHE
268 preempt_disable();
268 dst = dst_cache_get(&slwt->cache); 269 dst = dst_cache_get(&slwt->cache);
270 preempt_enable();
269#endif 271#endif
270 272
271 if (unlikely(!dst)) { 273 if (unlikely(!dst)) {
@@ -286,7 +288,9 @@ int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
286 } 288 }
287 289
288#ifdef CONFIG_DST_CACHE 290#ifdef CONFIG_DST_CACHE
291 preempt_disable();
289 dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr); 292 dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
293 preempt_enable();
290#endif 294#endif
291 } 295 }
292 296
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index e75cbf6ecc26..a0d901d8992e 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -231,9 +231,6 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata)
231 !(sta->sdata->bss && sta->sdata->bss == sdata->bss)) 231 !(sta->sdata->bss && sta->sdata->bss == sdata->bss))
232 continue; 232 continue;
233 233
234 if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_ASSOC))
235 continue;
236
237 max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta)); 234 max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta));
238 } 235 }
239 rcu_read_unlock(); 236 rcu_read_unlock();
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 41497b670e2b..d37ae7dc114b 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -6,6 +6,7 @@
6 * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> 6 * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
7 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> 7 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
8 * Copyright 2013-2014 Intel Mobile Communications GmbH 8 * Copyright 2013-2014 Intel Mobile Communications GmbH
9 * Copyright (c) 2016 Intel Deutschland GmbH
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 12 * it under the terms of the GNU General Public License version 2 as
@@ -1295,6 +1296,26 @@ static void ieee80211_iface_work(struct work_struct *work)
1295 } else if (ieee80211_is_action(mgmt->frame_control) && 1296 } else if (ieee80211_is_action(mgmt->frame_control) &&
1296 mgmt->u.action.category == WLAN_CATEGORY_VHT) { 1297 mgmt->u.action.category == WLAN_CATEGORY_VHT) {
1297 switch (mgmt->u.action.u.vht_group_notif.action_code) { 1298 switch (mgmt->u.action.u.vht_group_notif.action_code) {
1299 case WLAN_VHT_ACTION_OPMODE_NOTIF: {
1300 struct ieee80211_rx_status *status;
1301 enum nl80211_band band;
1302 u8 opmode;
1303
1304 status = IEEE80211_SKB_RXCB(skb);
1305 band = status->band;
1306 opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
1307
1308 mutex_lock(&local->sta_mtx);
1309 sta = sta_info_get_bss(sdata, mgmt->sa);
1310
1311 if (sta)
1312 ieee80211_vht_handle_opmode(sdata, sta,
1313 opmode,
1314 band);
1315
1316 mutex_unlock(&local->sta_mtx);
1317 break;
1318 }
1298 case WLAN_VHT_ACTION_GROUPID_MGMT: 1319 case WLAN_VHT_ACTION_GROUPID_MGMT:
1299 ieee80211_process_mu_groups(sdata, mgmt); 1320 ieee80211_process_mu_groups(sdata, mgmt);
1300 break; 1321 break;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 1822c77f2b1c..56fb47953b72 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -913,12 +913,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
913 supp_ht = supp_ht || sband->ht_cap.ht_supported; 913 supp_ht = supp_ht || sband->ht_cap.ht_supported;
914 supp_vht = supp_vht || sband->vht_cap.vht_supported; 914 supp_vht = supp_vht || sband->vht_cap.vht_supported;
915 915
916 if (sband->ht_cap.ht_supported) 916 if (!sband->ht_cap.ht_supported)
917 local->rx_chains = 917 continue;
918 max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
919 local->rx_chains);
920 918
921 /* TODO: consider VHT for RX chains, hopefully it's the same */ 919 /* TODO: consider VHT for RX chains, hopefully it's the same */
920 local->rx_chains =
921 max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
922 local->rx_chains);
923
924 /* no need to mask, SM_PS_DISABLED has all bits set */
925 sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
926 IEEE80211_HT_CAP_SM_PS_SHIFT;
922 } 927 }
923 928
924 /* if low-level driver supports AP, we also support VLAN */ 929 /* if low-level driver supports AP, we also support VLAN */
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 206698bc93f4..9e2641d45587 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -40,6 +40,8 @@ void rate_control_rate_init(struct sta_info *sta)
40 40
41 ieee80211_sta_set_rx_nss(sta); 41 ieee80211_sta_set_rx_nss(sta);
42 42
43 ieee80211_recalc_min_chandef(sta->sdata);
44
43 if (!ref) 45 if (!ref)
44 return; 46 return;
45 47
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 3e289a64ed43..3090dd4342f6 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2472,7 +2472,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2472 if (!ifmsh->mshcfg.dot11MeshForwarding) 2472 if (!ifmsh->mshcfg.dot11MeshForwarding)
2473 goto out; 2473 goto out;
2474 2474
2475 fwd_skb = skb_copy_expand(skb, local->tx_headroom, 0, GFP_ATOMIC); 2475 fwd_skb = skb_copy_expand(skb, local->tx_headroom +
2476 sdata->encrypt_headroom, 0, GFP_ATOMIC);
2476 if (!fwd_skb) { 2477 if (!fwd_skb) {
2477 net_info_ratelimited("%s: failed to clone mesh frame\n", 2478 net_info_ratelimited("%s: failed to clone mesh frame\n",
2478 sdata->name); 2479 sdata->name);
@@ -2880,17 +2881,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2880 2881
2881 switch (mgmt->u.action.u.vht_opmode_notif.action_code) { 2882 switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
2882 case WLAN_VHT_ACTION_OPMODE_NOTIF: { 2883 case WLAN_VHT_ACTION_OPMODE_NOTIF: {
2883 u8 opmode;
2884
2885 /* verify opmode is present */ 2884 /* verify opmode is present */
2886 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 2885 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
2887 goto invalid; 2886 goto invalid;
2888 2887 goto queue;
2889 opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
2890
2891 ieee80211_vht_handle_opmode(rx->sdata, rx->sta,
2892 opmode, status->band);
2893 goto handled;
2894 } 2888 }
2895 case WLAN_VHT_ACTION_GROUPID_MGMT: { 2889 case WLAN_VHT_ACTION_GROUPID_MGMT: {
2896 if (len < IEEE80211_MIN_ACTION_SIZE + 25) 2890 if (len < IEEE80211_MIN_ACTION_SIZE + 25)
@@ -3942,21 +3936,31 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
3942 u64_stats_update_end(&stats->syncp); 3936 u64_stats_update_end(&stats->syncp);
3943 3937
3944 if (fast_rx->internal_forward) { 3938 if (fast_rx->internal_forward) {
3945 struct sta_info *dsta = sta_info_get(rx->sdata, skb->data); 3939 struct sk_buff *xmit_skb = NULL;
3940 bool multicast = is_multicast_ether_addr(skb->data);
3941
3942 if (multicast) {
3943 xmit_skb = skb_copy(skb, GFP_ATOMIC);
3944 } else if (sta_info_get(rx->sdata, skb->data)) {
3945 xmit_skb = skb;
3946 skb = NULL;
3947 }
3946 3948
3947 if (dsta) { 3949 if (xmit_skb) {
3948 /* 3950 /*
3949 * Send to wireless media and increase priority by 256 3951 * Send to wireless media and increase priority by 256
3950 * to keep the received priority instead of 3952 * to keep the received priority instead of
3951 * reclassifying the frame (see cfg80211_classify8021d). 3953 * reclassifying the frame (see cfg80211_classify8021d).
3952 */ 3954 */
3953 skb->priority += 256; 3955 xmit_skb->priority += 256;
3954 skb->protocol = htons(ETH_P_802_3); 3956 xmit_skb->protocol = htons(ETH_P_802_3);
3955 skb_reset_network_header(skb); 3957 skb_reset_network_header(xmit_skb);
3956 skb_reset_mac_header(skb); 3958 skb_reset_mac_header(xmit_skb);
3957 dev_queue_xmit(skb); 3959 dev_queue_xmit(xmit_skb);
3958 return true;
3959 } 3960 }
3961
3962 if (!skb)
3963 return true;
3960 } 3964 }
3961 3965
3962 /* deliver to local stack */ 3966 /* deliver to local stack */
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index b6cfcf038c11..50c309094c37 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1501,8 +1501,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
1501 1501
1502 /* This will evaluate to 1, 3, 5 or 7. */ 1502 /* This will evaluate to 1, 3, 5 or 7. */
1503 for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) 1503 for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++)
1504 if (ignored_acs & BIT(ac)) 1504 if (!(ignored_acs & ieee80211_ac_to_qos_mask[ac]))
1505 continue; 1505 break;
1506 tid = 7 - 2 * ac; 1506 tid = 7 - 2 * ac;
1507 1507
1508 ieee80211_send_null_response(sta, tid, reason, true, false); 1508 ieee80211_send_null_response(sta, tid, reason, true, false);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 0d8b716e509e..797e847cbc49 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1243,7 +1243,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1243 1243
1244static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local, 1244static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
1245 struct ieee80211_vif *vif, 1245 struct ieee80211_vif *vif,
1246 struct ieee80211_sta *pubsta, 1246 struct sta_info *sta,
1247 struct sk_buff *skb) 1247 struct sk_buff *skb)
1248{ 1248{
1249 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1249 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -1257,10 +1257,13 @@ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
1257 if (!ieee80211_is_data(hdr->frame_control)) 1257 if (!ieee80211_is_data(hdr->frame_control))
1258 return NULL; 1258 return NULL;
1259 1259
1260 if (pubsta) { 1260 if (sta) {
1261 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 1261 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1262 1262
1263 txq = pubsta->txq[tid]; 1263 if (!sta->uploaded)
1264 return NULL;
1265
1266 txq = sta->sta.txq[tid];
1264 } else if (vif) { 1267 } else if (vif) {
1265 txq = vif->txq; 1268 txq = vif->txq;
1266 } 1269 }
@@ -1503,23 +1506,17 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
1503 struct fq *fq = &local->fq; 1506 struct fq *fq = &local->fq;
1504 struct ieee80211_vif *vif; 1507 struct ieee80211_vif *vif;
1505 struct txq_info *txqi; 1508 struct txq_info *txqi;
1506 struct ieee80211_sta *pubsta;
1507 1509
1508 if (!local->ops->wake_tx_queue || 1510 if (!local->ops->wake_tx_queue ||
1509 sdata->vif.type == NL80211_IFTYPE_MONITOR) 1511 sdata->vif.type == NL80211_IFTYPE_MONITOR)
1510 return false; 1512 return false;
1511 1513
1512 if (sta && sta->uploaded)
1513 pubsta = &sta->sta;
1514 else
1515 pubsta = NULL;
1516
1517 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1514 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1518 sdata = container_of(sdata->bss, 1515 sdata = container_of(sdata->bss,
1519 struct ieee80211_sub_if_data, u.ap); 1516 struct ieee80211_sub_if_data, u.ap);
1520 1517
1521 vif = &sdata->vif; 1518 vif = &sdata->vif;
1522 txqi = ieee80211_get_txq(local, vif, pubsta, skb); 1519 txqi = ieee80211_get_txq(local, vif, sta, skb);
1523 1520
1524 if (!txqi) 1521 if (!txqi)
1525 return false; 1522 return false;
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 6832bf6ab69f..43e45bb660bc 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -527,8 +527,10 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
527 527
528 u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band); 528 u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band);
529 529
530 if (changed > 0) 530 if (changed > 0) {
531 ieee80211_recalc_min_chandef(sdata);
531 rate_control_rate_update(local, sband, sta, changed); 532 rate_control_rate_update(local, sband, sta, changed);
533 }
532} 534}
533 535
534void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, 536void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 6b78bab27755..54253ea5976e 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -514,7 +514,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
514 int hooknum, nh_off, err = NF_ACCEPT; 514 int hooknum, nh_off, err = NF_ACCEPT;
515 515
516 nh_off = skb_network_offset(skb); 516 nh_off = skb_network_offset(skb);
517 skb_pull(skb, nh_off); 517 skb_pull_rcsum(skb, nh_off);
518 518
519 /* See HOOK2MANIP(). */ 519 /* See HOOK2MANIP(). */
520 if (maniptype == NF_NAT_MANIP_SRC) 520 if (maniptype == NF_NAT_MANIP_SRC)
@@ -579,6 +579,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
579 err = nf_nat_packet(ct, ctinfo, hooknum, skb); 579 err = nf_nat_packet(ct, ctinfo, hooknum, skb);
580push: 580push:
581 skb_push(skb, nh_off); 581 skb_push(skb, nh_off);
582 skb_postpush_rcsum(skb, skb->data, nh_off);
582 583
583 return err; 584 return err;
584} 585}
@@ -886,7 +887,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
886 887
887 /* The conntrack module expects to be working at L3. */ 888 /* The conntrack module expects to be working at L3. */
888 nh_ofs = skb_network_offset(skb); 889 nh_ofs = skb_network_offset(skb);
889 skb_pull(skb, nh_ofs); 890 skb_pull_rcsum(skb, nh_ofs);
890 891
891 if (key->ip.frag != OVS_FRAG_TYPE_NONE) { 892 if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
892 err = handle_fragments(net, key, info->zone.id, skb); 893 err = handle_fragments(net, key, info->zone.id, skb);
@@ -900,6 +901,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
900 err = ovs_ct_lookup(net, key, info, skb); 901 err = ovs_ct_lookup(net, key, info, skb);
901 902
902 skb_push(skb, nh_ofs); 903 skb_push(skb, nh_ofs);
904 skb_postpush_rcsum(skb, skb->data, nh_ofs);
903 if (err) 905 if (err)
904 kfree_skb(skb); 906 kfree_skb(skb);
905 return err; 907 return err;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 2095c83ce773..e10456ef6f7a 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -900,8 +900,6 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
900 goto err; 900 goto err;
901 } 901 }
902 act->order = i; 902 act->order = i;
903 if (event == RTM_GETACTION)
904 act->tcfa_refcnt++;
905 list_add_tail(&act->list, &actions); 903 list_add_tail(&act->list, &actions);
906 } 904 }
907 905
@@ -914,7 +912,8 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
914 return ret; 912 return ret;
915 } 913 }
916err: 914err:
917 tcf_action_destroy(&actions, 0); 915 if (event != RTM_GETACTION)
916 tcf_action_destroy(&actions, 0);
918 return ret; 917 return ret;
919} 918}
920 919
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 1c60317f0121..520baa41cba3 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -123,12 +123,11 @@ static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
123 nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name)) 123 nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
124 return -EMSGSIZE; 124 return -EMSGSIZE;
125 125
126 nla = nla_reserve(skb, TCA_ACT_BPF_DIGEST, 126 nla = nla_reserve(skb, TCA_ACT_BPF_TAG, sizeof(prog->filter->tag));
127 sizeof(prog->filter->digest));
128 if (nla == NULL) 127 if (nla == NULL)
129 return -EMSGSIZE; 128 return -EMSGSIZE;
130 129
131 memcpy(nla_data(nla), prog->filter->digest, nla_len(nla)); 130 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
132 131
133 return 0; 132 return 0;
134} 133}
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index adc776048d1a..d9c97018317d 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -555,11 +555,11 @@ static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
555 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name)) 555 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
556 return -EMSGSIZE; 556 return -EMSGSIZE;
557 557
558 nla = nla_reserve(skb, TCA_BPF_DIGEST, sizeof(prog->filter->digest)); 558 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
559 if (nla == NULL) 559 if (nla == NULL)
560 return -EMSGSIZE; 560 return -EMSGSIZE;
561 561
562 memcpy(nla_data(nla), prog->filter->digest, nla_len(nla)); 562 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
563 563
564 return 0; 564 return 0;
565} 565}
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 886e9d381771..153082598522 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1489,7 +1489,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1489 case RPC_GSS_PROC_DESTROY: 1489 case RPC_GSS_PROC_DESTROY:
1490 if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq)) 1490 if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
1491 goto auth_err; 1491 goto auth_err;
1492 rsci->h.expiry_time = get_seconds(); 1492 rsci->h.expiry_time = seconds_since_boot();
1493 set_bit(CACHE_NEGATIVE, &rsci->h.flags); 1493 set_bit(CACHE_NEGATIVE, &rsci->h.flags);
1494 if (resv->iov_len + 4 > PAGE_SIZE) 1494 if (resv->iov_len + 4 > PAGE_SIZE)
1495 goto drop; 1495 goto drop;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 3bc1d61694cb..9c9db55a0c1e 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -799,6 +799,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
799 799
800 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { 800 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
801 dprintk("svc_recv: found XPT_CLOSE\n"); 801 dprintk("svc_recv: found XPT_CLOSE\n");
802 if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags))
803 xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
802 svc_delete_xprt(xprt); 804 svc_delete_xprt(xprt);
803 /* Leave XPT_BUSY set on the dead xprt: */ 805 /* Leave XPT_BUSY set on the dead xprt: */
804 goto out; 806 goto out;
@@ -1020,9 +1022,11 @@ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
1020 le = to_be_closed.next; 1022 le = to_be_closed.next;
1021 list_del_init(le); 1023 list_del_init(le);
1022 xprt = list_entry(le, struct svc_xprt, xpt_list); 1024 xprt = list_entry(le, struct svc_xprt, xpt_list);
1023 dprintk("svc_age_temp_xprts_now: closing %p\n", xprt); 1025 set_bit(XPT_CLOSE, &xprt->xpt_flags);
1024 xprt->xpt_ops->xpo_kill_temp_xprt(xprt); 1026 set_bit(XPT_KILL_TEMP, &xprt->xpt_flags);
1025 svc_close_xprt(xprt); 1027 dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing\n",
1028 xprt);
1029 svc_xprt_enqueue(xprt);
1026 } 1030 }
1027} 1031}
1028EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now); 1032EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 57d35fbb1c28..172b537f8cfc 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -347,8 +347,6 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
347 atomic_inc(&rdma_stat_read); 347 atomic_inc(&rdma_stat_read);
348 return ret; 348 return ret;
349 err: 349 err:
350 ib_dma_unmap_sg(xprt->sc_cm_id->device,
351 frmr->sg, frmr->sg_nents, frmr->direction);
352 svc_rdma_put_context(ctxt, 0); 350 svc_rdma_put_context(ctxt, 0);
353 svc_rdma_put_frmr(xprt, frmr); 351 svc_rdma_put_frmr(xprt, frmr);
354 return ret; 352 return ret;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 6b109a808d4c..02462d67d191 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -169,7 +169,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
169 169
170 /* Send response, if necessary */ 170 /* Send response, if necessary */
171 if (respond && (mtyp == DSC_REQ_MSG)) { 171 if (respond && (mtyp == DSC_REQ_MSG)) {
172 rskb = tipc_buf_acquire(MAX_H_SIZE); 172 rskb = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC);
173 if (!rskb) 173 if (!rskb)
174 return; 174 return;
175 tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer); 175 tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
@@ -278,7 +278,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b,
278 req = kmalloc(sizeof(*req), GFP_ATOMIC); 278 req = kmalloc(sizeof(*req), GFP_ATOMIC);
279 if (!req) 279 if (!req)
280 return -ENOMEM; 280 return -ENOMEM;
281 req->buf = tipc_buf_acquire(MAX_H_SIZE); 281 req->buf = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC);
282 if (!req->buf) { 282 if (!req->buf) {
283 kfree(req); 283 kfree(req);
284 return -ENOMEM; 284 return -ENOMEM;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index bda89bf9f4ff..4e8647aef01c 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1395,7 +1395,7 @@ tnl:
1395 msg_set_seqno(hdr, seqno++); 1395 msg_set_seqno(hdr, seqno++);
1396 pktlen = msg_size(hdr); 1396 pktlen = msg_size(hdr);
1397 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE); 1397 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1398 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE); 1398 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
1399 if (!tnlskb) { 1399 if (!tnlskb) {
1400 pr_warn("%sunable to send packet\n", link_co_err); 1400 pr_warn("%sunable to send packet\n", link_co_err);
1401 return; 1401 return;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index a22be502f1bd..ab02d0742476 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -58,12 +58,12 @@ static unsigned int align(unsigned int i)
58 * NOTE: Headroom is reserved to allow prepending of a data link header. 58 * NOTE: Headroom is reserved to allow prepending of a data link header.
59 * There may also be unrequested tailroom present at the buffer's end. 59 * There may also be unrequested tailroom present at the buffer's end.
60 */ 60 */
61struct sk_buff *tipc_buf_acquire(u32 size) 61struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
62{ 62{
63 struct sk_buff *skb; 63 struct sk_buff *skb;
64 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; 64 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
65 65
66 skb = alloc_skb_fclone(buf_size, GFP_ATOMIC); 66 skb = alloc_skb_fclone(buf_size, gfp);
67 if (skb) { 67 if (skb) {
68 skb_reserve(skb, BUF_HEADROOM); 68 skb_reserve(skb, BUF_HEADROOM);
69 skb_put(skb, size); 69 skb_put(skb, size);
@@ -95,7 +95,7 @@ struct sk_buff *tipc_msg_create(uint user, uint type,
95 struct tipc_msg *msg; 95 struct tipc_msg *msg;
96 struct sk_buff *buf; 96 struct sk_buff *buf;
97 97
98 buf = tipc_buf_acquire(hdr_sz + data_sz); 98 buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
99 if (unlikely(!buf)) 99 if (unlikely(!buf))
100 return NULL; 100 return NULL;
101 101
@@ -261,7 +261,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
261 261
262 /* No fragmentation needed? */ 262 /* No fragmentation needed? */
263 if (likely(msz <= pktmax)) { 263 if (likely(msz <= pktmax)) {
264 skb = tipc_buf_acquire(msz); 264 skb = tipc_buf_acquire(msz, GFP_KERNEL);
265 if (unlikely(!skb)) 265 if (unlikely(!skb))
266 return -ENOMEM; 266 return -ENOMEM;
267 skb_orphan(skb); 267 skb_orphan(skb);
@@ -282,7 +282,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
282 msg_set_importance(&pkthdr, msg_importance(mhdr)); 282 msg_set_importance(&pkthdr, msg_importance(mhdr));
283 283
284 /* Prepare first fragment */ 284 /* Prepare first fragment */
285 skb = tipc_buf_acquire(pktmax); 285 skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
286 if (!skb) 286 if (!skb)
287 return -ENOMEM; 287 return -ENOMEM;
288 skb_orphan(skb); 288 skb_orphan(skb);
@@ -313,7 +313,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
313 pktsz = drem + INT_H_SIZE; 313 pktsz = drem + INT_H_SIZE;
314 else 314 else
315 pktsz = pktmax; 315 pktsz = pktmax;
316 skb = tipc_buf_acquire(pktsz); 316 skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
317 if (!skb) { 317 if (!skb) {
318 rc = -ENOMEM; 318 rc = -ENOMEM;
319 goto error; 319 goto error;
@@ -448,7 +448,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
448 if (msz > (max / 2)) 448 if (msz > (max / 2))
449 return false; 449 return false;
450 450
451 _skb = tipc_buf_acquire(max); 451 _skb = tipc_buf_acquire(max, GFP_ATOMIC);
452 if (!_skb) 452 if (!_skb)
453 return false; 453 return false;
454 454
@@ -496,7 +496,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
496 496
497 /* Never return SHORT header; expand by replacing buffer if necessary */ 497 /* Never return SHORT header; expand by replacing buffer if necessary */
498 if (msg_short(hdr)) { 498 if (msg_short(hdr)) {
499 *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen); 499 *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC);
500 if (!*skb) 500 if (!*skb)
501 goto exit; 501 goto exit;
502 memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen); 502 memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 8d408612ffa4..2c3dc38abf9c 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -820,7 +820,7 @@ static inline bool msg_is_reset(struct tipc_msg *hdr)
820 return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG); 820 return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG);
821} 821}
822 822
823struct sk_buff *tipc_buf_acquire(u32 size); 823struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp);
824bool tipc_msg_validate(struct sk_buff *skb); 824bool tipc_msg_validate(struct sk_buff *skb);
825bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err); 825bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
826void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type, 826void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index c1cfd92de17a..23f8899e0f8c 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -69,7 +69,7 @@ static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
69 u32 dest) 69 u32 dest)
70{ 70{
71 struct tipc_net *tn = net_generic(net, tipc_net_id); 71 struct tipc_net *tn = net_generic(net, tipc_net_id);
72 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size); 72 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
73 struct tipc_msg *msg; 73 struct tipc_msg *msg;
74 74
75 if (buf != NULL) { 75 if (buf != NULL) {
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index ef5eff93a8b8..5c1b267e22be 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4615,6 +4615,15 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
4615 break; 4615 break;
4616 } 4616 }
4617 4617
4618 /*
4619 * Older kernel versions ignored this attribute entirely, so don't
4620 * reject attempts to update it but mark it as unused instead so the
4621 * driver won't look at the data.
4622 */
4623 if (statype != CFG80211_STA_AP_CLIENT_UNASSOC &&
4624 statype != CFG80211_STA_TDLS_PEER_SETUP)
4625 params->opmode_notif_used = false;
4626
4618 return 0; 4627 return 0;
4619} 4628}
4620EXPORT_SYMBOL(cfg80211_check_station_change); 4629EXPORT_SYMBOL(cfg80211_check_station_change);
@@ -4854,6 +4863,12 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
4854 params.local_pm = pm; 4863 params.local_pm = pm;
4855 } 4864 }
4856 4865
4866 if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) {
4867 params.opmode_notif_used = true;
4868 params.opmode_notif =
4869 nla_get_u8(info->attrs[NL80211_ATTR_OPMODE_NOTIF]);
4870 }
4871
4857 /* Include parameters for TDLS peer (will check later) */ 4872 /* Include parameters for TDLS peer (will check later) */
4858 err = nl80211_set_station_tdls(info, &params); 4873 err = nl80211_set_station_tdls(info, &params);
4859 if (err) 4874 if (err)
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 4a57c8a60bd9..6a6f44dd594b 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -610,6 +610,33 @@ error:
610 return ret ? : -ENOENT; 610 return ret ? : -ENOENT;
611} 611}
612 612
613/* Adjust symbol name and address */
614static int post_process_probe_trace_point(struct probe_trace_point *tp,
615 struct map *map, unsigned long offs)
616{
617 struct symbol *sym;
618 u64 addr = tp->address + tp->offset - offs;
619
620 sym = map__find_symbol(map, addr);
621 if (!sym)
622 return -ENOENT;
623
624 if (strcmp(sym->name, tp->symbol)) {
625 /* If we have no realname, use symbol for it */
626 if (!tp->realname)
627 tp->realname = tp->symbol;
628 else
629 free(tp->symbol);
630 tp->symbol = strdup(sym->name);
631 if (!tp->symbol)
632 return -ENOMEM;
633 }
634 tp->offset = addr - sym->start;
635 tp->address -= offs;
636
637 return 0;
638}
639
613/* 640/*
614 * Rename DWARF symbols to ELF symbols -- gcc sometimes optimizes functions 641 * Rename DWARF symbols to ELF symbols -- gcc sometimes optimizes functions
615 * and generate new symbols with suffixes such as .constprop.N or .isra.N 642 * and generate new symbols with suffixes such as .constprop.N or .isra.N
@@ -622,11 +649,9 @@ static int
622post_process_offline_probe_trace_events(struct probe_trace_event *tevs, 649post_process_offline_probe_trace_events(struct probe_trace_event *tevs,
623 int ntevs, const char *pathname) 650 int ntevs, const char *pathname)
624{ 651{
625 struct symbol *sym;
626 struct map *map; 652 struct map *map;
627 unsigned long stext = 0; 653 unsigned long stext = 0;
628 u64 addr; 654 int i, ret = 0;
629 int i;
630 655
631 /* Prepare a map for offline binary */ 656 /* Prepare a map for offline binary */
632 map = dso__new_map(pathname); 657 map = dso__new_map(pathname);
@@ -636,23 +661,14 @@ post_process_offline_probe_trace_events(struct probe_trace_event *tevs,
636 } 661 }
637 662
638 for (i = 0; i < ntevs; i++) { 663 for (i = 0; i < ntevs; i++) {
639 addr = tevs[i].point.address + tevs[i].point.offset - stext; 664 ret = post_process_probe_trace_point(&tevs[i].point,
640 sym = map__find_symbol(map, addr); 665 map, stext);
641 if (!sym) 666 if (ret < 0)
642 continue; 667 break;
643 if (!strcmp(sym->name, tevs[i].point.symbol))
644 continue;
645 /* If we have no realname, use symbol for it */
646 if (!tevs[i].point.realname)
647 tevs[i].point.realname = tevs[i].point.symbol;
648 else
649 free(tevs[i].point.symbol);
650 tevs[i].point.symbol = strdup(sym->name);
651 tevs[i].point.offset = addr - sym->start;
652 } 668 }
653 map__put(map); 669 map__put(map);
654 670
655 return 0; 671 return ret;
656} 672}
657 673
658static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs, 674static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
@@ -682,18 +698,31 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
682 return ret; 698 return ret;
683} 699}
684 700
685static int add_module_to_probe_trace_events(struct probe_trace_event *tevs, 701static int
686 int ntevs, const char *module) 702post_process_module_probe_trace_events(struct probe_trace_event *tevs,
703 int ntevs, const char *module,
704 struct debuginfo *dinfo)
687{ 705{
706 Dwarf_Addr text_offs = 0;
688 int i, ret = 0; 707 int i, ret = 0;
689 char *mod_name = NULL; 708 char *mod_name = NULL;
709 struct map *map;
690 710
691 if (!module) 711 if (!module)
692 return 0; 712 return 0;
693 713
694 mod_name = find_module_name(module); 714 map = get_target_map(module, false);
715 if (!map || debuginfo__get_text_offset(dinfo, &text_offs, true) < 0) {
716 pr_warning("Failed to get ELF symbols for %s\n", module);
717 return -EINVAL;
718 }
695 719
720 mod_name = find_module_name(module);
696 for (i = 0; i < ntevs; i++) { 721 for (i = 0; i < ntevs; i++) {
722 ret = post_process_probe_trace_point(&tevs[i].point,
723 map, (unsigned long)text_offs);
724 if (ret < 0)
725 break;
697 tevs[i].point.module = 726 tevs[i].point.module =
698 strdup(mod_name ? mod_name : module); 727 strdup(mod_name ? mod_name : module);
699 if (!tevs[i].point.module) { 728 if (!tevs[i].point.module) {
@@ -703,6 +732,8 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
703 } 732 }
704 733
705 free(mod_name); 734 free(mod_name);
735 map__put(map);
736
706 return ret; 737 return ret;
707} 738}
708 739
@@ -760,7 +791,7 @@ arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unuse
760static int post_process_probe_trace_events(struct perf_probe_event *pev, 791static int post_process_probe_trace_events(struct perf_probe_event *pev,
761 struct probe_trace_event *tevs, 792 struct probe_trace_event *tevs,
762 int ntevs, const char *module, 793 int ntevs, const char *module,
763 bool uprobe) 794 bool uprobe, struct debuginfo *dinfo)
764{ 795{
765 int ret; 796 int ret;
766 797
@@ -768,7 +799,8 @@ static int post_process_probe_trace_events(struct perf_probe_event *pev,
768 ret = add_exec_to_probe_trace_events(tevs, ntevs, module); 799 ret = add_exec_to_probe_trace_events(tevs, ntevs, module);
769 else if (module) 800 else if (module)
770 /* Currently ref_reloc_sym based probe is not for drivers */ 801 /* Currently ref_reloc_sym based probe is not for drivers */
771 ret = add_module_to_probe_trace_events(tevs, ntevs, module); 802 ret = post_process_module_probe_trace_events(tevs, ntevs,
803 module, dinfo);
772 else 804 else
773 ret = post_process_kernel_probe_trace_events(tevs, ntevs); 805 ret = post_process_kernel_probe_trace_events(tevs, ntevs);
774 806
@@ -812,30 +844,27 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
812 } 844 }
813 } 845 }
814 846
815 debuginfo__delete(dinfo);
816
817 if (ntevs > 0) { /* Succeeded to find trace events */ 847 if (ntevs > 0) { /* Succeeded to find trace events */
818 pr_debug("Found %d probe_trace_events.\n", ntevs); 848 pr_debug("Found %d probe_trace_events.\n", ntevs);
819 ret = post_process_probe_trace_events(pev, *tevs, ntevs, 849 ret = post_process_probe_trace_events(pev, *tevs, ntevs,
820 pev->target, pev->uprobes); 850 pev->target, pev->uprobes, dinfo);
821 if (ret < 0 || ret == ntevs) { 851 if (ret < 0 || ret == ntevs) {
852 pr_debug("Post processing failed or all events are skipped. (%d)\n", ret);
822 clear_probe_trace_events(*tevs, ntevs); 853 clear_probe_trace_events(*tevs, ntevs);
823 zfree(tevs); 854 zfree(tevs);
855 ntevs = 0;
824 } 856 }
825 if (ret != ntevs)
826 return ret < 0 ? ret : ntevs;
827 ntevs = 0;
828 /* Fall through */
829 } 857 }
830 858
859 debuginfo__delete(dinfo);
860
831 if (ntevs == 0) { /* No error but failed to find probe point. */ 861 if (ntevs == 0) { /* No error but failed to find probe point. */
832 pr_warning("Probe point '%s' not found.\n", 862 pr_warning("Probe point '%s' not found.\n",
833 synthesize_perf_probe_point(&pev->point)); 863 synthesize_perf_probe_point(&pev->point));
834 return -ENOENT; 864 return -ENOENT;
835 } 865 } else if (ntevs < 0) {
836 /* Error path : ntevs < 0 */ 866 /* Error path : ntevs < 0 */
837 pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs); 867 pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
838 if (ntevs < 0) {
839 if (ntevs == -EBADF) 868 if (ntevs == -EBADF)
840 pr_warning("Warning: No dwarf info found in the vmlinux - " 869 pr_warning("Warning: No dwarf info found in the vmlinux - "
841 "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n"); 870 "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n");
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index df4debe564da..0d9d6e0803b8 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -1501,7 +1501,8 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg,
1501} 1501}
1502 1502
1503/* For the kernel module, we need a special code to get a DIE */ 1503/* For the kernel module, we need a special code to get a DIE */
1504static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs) 1504int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
1505 bool adjust_offset)
1505{ 1506{
1506 int n, i; 1507 int n, i;
1507 Elf32_Word shndx; 1508 Elf32_Word shndx;
@@ -1530,6 +1531,8 @@ static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs)
1530 if (!shdr) 1531 if (!shdr)
1531 return -ENOENT; 1532 return -ENOENT;
1532 *offs = shdr->sh_addr; 1533 *offs = shdr->sh_addr;
1534 if (adjust_offset)
1535 *offs -= shdr->sh_offset;
1533 } 1536 }
1534 } 1537 }
1535 return 0; 1538 return 0;
@@ -1543,16 +1546,12 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
1543 Dwarf_Addr _addr = 0, baseaddr = 0; 1546 Dwarf_Addr _addr = 0, baseaddr = 0;
1544 const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp; 1547 const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
1545 int baseline = 0, lineno = 0, ret = 0; 1548 int baseline = 0, lineno = 0, ret = 0;
1546 bool reloc = false;
1547 1549
1548retry: 1550 /* We always need to relocate the address for aranges */
1551 if (debuginfo__get_text_offset(dbg, &baseaddr, false) == 0)
1552 addr += baseaddr;
1549 /* Find cu die */ 1553 /* Find cu die */
1550 if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) { 1554 if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
1551 if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) {
1552 addr += baseaddr;
1553 reloc = true;
1554 goto retry;
1555 }
1556 pr_warning("Failed to find debug information for address %lx\n", 1555 pr_warning("Failed to find debug information for address %lx\n",
1557 addr); 1556 addr);
1558 ret = -EINVAL; 1557 ret = -EINVAL;
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index f1d8558f498e..2956c5198652 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -46,6 +46,9 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
46int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, 46int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
47 struct perf_probe_point *ppt); 47 struct perf_probe_point *ppt);
48 48
49int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
50 bool adjust_offset);
51
49/* Find a line range */ 52/* Find a line range */
50int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr); 53int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr);
51 54
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
index c22860ab9733..30e1ac62e8cb 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
@@ -66,7 +66,7 @@ int pmc56_overflow(void)
66 66
67 FAIL_IF(ebb_event_enable(&event)); 67 FAIL_IF(ebb_event_enable(&event));
68 68
69 mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); 69 mtspr(SPRN_PMC2, pmc_sample_period(sample_period));
70 mtspr(SPRN_PMC5, 0); 70 mtspr(SPRN_PMC5, 0);
71 mtspr(SPRN_PMC6, 0); 71 mtspr(SPRN_PMC6, 0);
72 72
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
index 34e63cc4c572..14142faf040b 100644
--- a/tools/virtio/ringtest/main.h
+++ b/tools/virtio/ringtest/main.h
@@ -26,6 +26,16 @@ static inline void wait_cycles(unsigned long long cycles)
26#define VMEXIT_CYCLES 500 26#define VMEXIT_CYCLES 500
27#define VMENTRY_CYCLES 500 27#define VMENTRY_CYCLES 500
28 28
29#elif defined(__s390x__)
30static inline void wait_cycles(unsigned long long cycles)
31{
32 asm volatile("0: brctg %0,0b" : : "d" (cycles));
33}
34
35/* tweak me */
36#define VMEXIT_CYCLES 200
37#define VMENTRY_CYCLES 200
38
29#else 39#else
30static inline void wait_cycles(unsigned long long cycles) 40static inline void wait_cycles(unsigned long long cycles)
31{ 41{
@@ -81,6 +91,8 @@ extern unsigned ring_size;
81/* Is there a portable way to do this? */ 91/* Is there a portable way to do this? */
82#if defined(__x86_64__) || defined(__i386__) 92#if defined(__x86_64__) || defined(__i386__)
83#define cpu_relax() asm ("rep; nop" ::: "memory") 93#define cpu_relax() asm ("rep; nop" ::: "memory")
94#elif defined(__s390x__)
95#define cpu_relax() barrier()
84#else 96#else
85#define cpu_relax() assert(0) 97#define cpu_relax() assert(0)
86#endif 98#endif
diff --git a/tools/virtio/ringtest/run-on-all.sh b/tools/virtio/ringtest/run-on-all.sh
index 2e69ca812b4c..29b0d3920bfc 100755
--- a/tools/virtio/ringtest/run-on-all.sh
+++ b/tools/virtio/ringtest/run-on-all.sh
@@ -1,12 +1,13 @@
1#!/bin/sh 1#!/bin/sh
2 2
3CPUS_ONLINE=$(lscpu --online -p=cpu|grep -v -e '#')
3#use last CPU for host. Why not the first? 4#use last CPU for host. Why not the first?
4#many devices tend to use cpu0 by default so 5#many devices tend to use cpu0 by default so
5#it tends to be busier 6#it tends to be busier
6HOST_AFFINITY=$(lscpu -p=cpu | tail -1) 7HOST_AFFINITY=$(echo "${CPUS_ONLINE}"|tail -n 1)
7 8
8#run command on all cpus 9#run command on all cpus
9for cpu in $(seq 0 $HOST_AFFINITY) 10for cpu in $CPUS_ONLINE
10do 11do
11 #Don't run guest and host on same CPU 12 #Don't run guest and host on same CPU
12 #It actually works ok if using signalling 13 #It actually works ok if using signalling
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index a2dbbccbb6a3..35d7100e0815 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -24,6 +24,7 @@
24 24
25#include <clocksource/arm_arch_timer.h> 25#include <clocksource/arm_arch_timer.h>
26#include <asm/arch_timer.h> 26#include <asm/arch_timer.h>
27#include <asm/kvm_hyp.h>
27 28
28#include <kvm/arm_vgic.h> 29#include <kvm/arm_vgic.h>
29#include <kvm/arm_arch_timer.h> 30#include <kvm/arm_arch_timer.h>
@@ -36,10 +37,10 @@ static u32 host_vtimer_irq_flags;
36 37
37void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) 38void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
38{ 39{
39 vcpu->arch.timer_cpu.active_cleared_last = false; 40 vcpu_vtimer(vcpu)->active_cleared_last = false;
40} 41}
41 42
42static u64 kvm_phys_timer_read(void) 43u64 kvm_phys_timer_read(void)
43{ 44{
44 return timecounter->cc->read(timecounter->cc); 45 return timecounter->cc->read(timecounter->cc);
45} 46}
@@ -89,9 +90,6 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
89 struct kvm_vcpu *vcpu; 90 struct kvm_vcpu *vcpu;
90 91
91 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); 92 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
92 vcpu->arch.timer_cpu.armed = false;
93
94 WARN_ON(!kvm_timer_should_fire(vcpu));
95 93
96 /* 94 /*
97 * If the vcpu is blocked we want to wake it up so that it will see 95 * If the vcpu is blocked we want to wake it up so that it will see
@@ -100,12 +98,12 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
100 kvm_vcpu_kick(vcpu); 98 kvm_vcpu_kick(vcpu);
101} 99}
102 100
103static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu) 101static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
104{ 102{
105 u64 cval, now; 103 u64 cval, now;
106 104
107 cval = vcpu->arch.timer_cpu.cntv_cval; 105 cval = timer_ctx->cnt_cval;
108 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; 106 now = kvm_phys_timer_read() - timer_ctx->cntvoff;
109 107
110 if (now < cval) { 108 if (now < cval) {
111 u64 ns; 109 u64 ns;
@@ -120,6 +118,35 @@ static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
120 return 0; 118 return 0;
121} 119}
122 120
121static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
122{
123 return !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
124 (timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
125}
126
127/*
128 * Returns the earliest expiration time in ns among guest timers.
129 * Note that it will return 0 if none of timers can fire.
130 */
131static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
132{
133 u64 min_virt = ULLONG_MAX, min_phys = ULLONG_MAX;
134 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
135 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
136
137 if (kvm_timer_irq_can_fire(vtimer))
138 min_virt = kvm_timer_compute_delta(vtimer);
139
140 if (kvm_timer_irq_can_fire(ptimer))
141 min_phys = kvm_timer_compute_delta(ptimer);
142
143 /* If none of timers can fire, then return 0 */
144 if ((min_virt == ULLONG_MAX) && (min_phys == ULLONG_MAX))
145 return 0;
146
147 return min(min_virt, min_phys);
148}
149
123static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) 150static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
124{ 151{
125 struct arch_timer_cpu *timer; 152 struct arch_timer_cpu *timer;
@@ -134,7 +161,7 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
134 * PoV (NTP on the host may have forced it to expire 161 * PoV (NTP on the host may have forced it to expire
135 * early). If we should have slept longer, restart it. 162 * early). If we should have slept longer, restart it.
136 */ 163 */
137 ns = kvm_timer_compute_delta(vcpu); 164 ns = kvm_timer_earliest_exp(vcpu);
138 if (unlikely(ns)) { 165 if (unlikely(ns)) {
139 hrtimer_forward_now(hrt, ns_to_ktime(ns)); 166 hrtimer_forward_now(hrt, ns_to_ktime(ns));
140 return HRTIMER_RESTART; 167 return HRTIMER_RESTART;
@@ -144,42 +171,33 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
144 return HRTIMER_NORESTART; 171 return HRTIMER_NORESTART;
145} 172}
146 173
147static bool kvm_timer_irq_can_fire(struct kvm_vcpu *vcpu) 174bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
148{ 175{
149 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
150
151 return !(timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
152 (timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE);
153}
154
155bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
156{
157 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
158 u64 cval, now; 176 u64 cval, now;
159 177
160 if (!kvm_timer_irq_can_fire(vcpu)) 178 if (!kvm_timer_irq_can_fire(timer_ctx))
161 return false; 179 return false;
162 180
163 cval = timer->cntv_cval; 181 cval = timer_ctx->cnt_cval;
164 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; 182 now = kvm_phys_timer_read() - timer_ctx->cntvoff;
165 183
166 return cval <= now; 184 return cval <= now;
167} 185}
168 186
169static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level) 187static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
188 struct arch_timer_context *timer_ctx)
170{ 189{
171 int ret; 190 int ret;
172 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
173 191
174 BUG_ON(!vgic_initialized(vcpu->kvm)); 192 BUG_ON(!vgic_initialized(vcpu->kvm));
175 193
176 timer->active_cleared_last = false; 194 timer_ctx->active_cleared_last = false;
177 timer->irq.level = new_level; 195 timer_ctx->irq.level = new_level;
178 trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->irq.irq, 196 trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
179 timer->irq.level); 197 timer_ctx->irq.level);
180 ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id, 198
181 timer->irq.irq, 199 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, timer_ctx->irq.irq,
182 timer->irq.level); 200 timer_ctx->irq.level);
183 WARN_ON(ret); 201 WARN_ON(ret);
184} 202}
185 203
@@ -190,22 +208,43 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
190static int kvm_timer_update_state(struct kvm_vcpu *vcpu) 208static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
191{ 209{
192 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 210 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
211 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
212 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
193 213
194 /* 214 /*
195 * If userspace modified the timer registers via SET_ONE_REG before 215 * If userspace modified the timer registers via SET_ONE_REG before
196 * the vgic was initialized, we mustn't set the timer->irq.level value 216 * the vgic was initialized, we mustn't set the vtimer->irq.level value
197 * because the guest would never see the interrupt. Instead wait 217 * because the guest would never see the interrupt. Instead wait
198 * until we call this function from kvm_timer_flush_hwstate. 218 * until we call this function from kvm_timer_flush_hwstate.
199 */ 219 */
200 if (!vgic_initialized(vcpu->kvm) || !timer->enabled) 220 if (!vgic_initialized(vcpu->kvm) || !timer->enabled)
201 return -ENODEV; 221 return -ENODEV;
202 222
203 if (kvm_timer_should_fire(vcpu) != timer->irq.level) 223 if (kvm_timer_should_fire(vtimer) != vtimer->irq.level)
204 kvm_timer_update_irq(vcpu, !timer->irq.level); 224 kvm_timer_update_irq(vcpu, !vtimer->irq.level, vtimer);
225
226 if (kvm_timer_should_fire(ptimer) != ptimer->irq.level)
227 kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer);
205 228
206 return 0; 229 return 0;
207} 230}
208 231
232/* Schedule the background timer for the emulated timer. */
233static void kvm_timer_emulate(struct kvm_vcpu *vcpu,
234 struct arch_timer_context *timer_ctx)
235{
236 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
237
238 if (kvm_timer_should_fire(timer_ctx))
239 return;
240
241 if (!kvm_timer_irq_can_fire(timer_ctx))
242 return;
243
244 /* The timer has not yet expired, schedule a background timer */
245 timer_arm(timer, kvm_timer_compute_delta(timer_ctx));
246}
247
209/* 248/*
210 * Schedule the background timer before calling kvm_vcpu_block, so that this 249 * Schedule the background timer before calling kvm_vcpu_block, so that this
211 * thread is removed from its waitqueue and made runnable when there's a timer 250 * thread is removed from its waitqueue and made runnable when there's a timer
@@ -214,26 +253,31 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
214void kvm_timer_schedule(struct kvm_vcpu *vcpu) 253void kvm_timer_schedule(struct kvm_vcpu *vcpu)
215{ 254{
216 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 255 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
256 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
257 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
217 258
218 BUG_ON(timer_is_armed(timer)); 259 BUG_ON(timer_is_armed(timer));
219 260
220 /* 261 /*
221 * No need to schedule a background timer if the guest timer has 262 * No need to schedule a background timer if any guest timer has
222 * already expired, because kvm_vcpu_block will return before putting 263 * already expired, because kvm_vcpu_block will return before putting
223 * the thread to sleep. 264 * the thread to sleep.
224 */ 265 */
225 if (kvm_timer_should_fire(vcpu)) 266 if (kvm_timer_should_fire(vtimer) || kvm_timer_should_fire(ptimer))
226 return; 267 return;
227 268
228 /* 269 /*
229 * If the timer is not capable of raising interrupts (disabled or 270 * If both timers are not capable of raising interrupts (disabled or
230 * masked), then there's no more work for us to do. 271 * masked), then there's no more work for us to do.
231 */ 272 */
232 if (!kvm_timer_irq_can_fire(vcpu)) 273 if (!kvm_timer_irq_can_fire(vtimer) && !kvm_timer_irq_can_fire(ptimer))
233 return; 274 return;
234 275
235 /* The timer has not yet expired, schedule a background timer */ 276 /*
236 timer_arm(timer, kvm_timer_compute_delta(vcpu)); 277 * The guest timers have not yet expired, schedule a background timer.
278 * Set the earliest expiration time among the guest timers.
279 */
280 timer_arm(timer, kvm_timer_earliest_exp(vcpu));
237} 281}
238 282
239void kvm_timer_unschedule(struct kvm_vcpu *vcpu) 283void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
@@ -251,13 +295,16 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
251 */ 295 */
252void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) 296void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
253{ 297{
254 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 298 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
255 bool phys_active; 299 bool phys_active;
256 int ret; 300 int ret;
257 301
258 if (kvm_timer_update_state(vcpu)) 302 if (kvm_timer_update_state(vcpu))
259 return; 303 return;
260 304
305 /* Set the background timer for the physical timer emulation. */
306 kvm_timer_emulate(vcpu, vcpu_ptimer(vcpu));
307
261 /* 308 /*
262 * If we enter the guest with the virtual input level to the VGIC 309 * If we enter the guest with the virtual input level to the VGIC
263 * asserted, then we have already told the VGIC what we need to, and 310 * asserted, then we have already told the VGIC what we need to, and
@@ -275,8 +322,8 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
275 * to ensure that hardware interrupts from the timer triggers a guest 322 * to ensure that hardware interrupts from the timer triggers a guest
276 * exit. 323 * exit.
277 */ 324 */
278 phys_active = timer->irq.level || 325 phys_active = vtimer->irq.level ||
279 kvm_vgic_map_is_active(vcpu, timer->irq.irq); 326 kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
280 327
281 /* 328 /*
282 * We want to avoid hitting the (re)distributor as much as 329 * We want to avoid hitting the (re)distributor as much as
@@ -298,7 +345,7 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
298 * - cached value is "active clear" 345 * - cached value is "active clear"
299 * - value to be programmed is "active clear" 346 * - value to be programmed is "active clear"
300 */ 347 */
301 if (timer->active_cleared_last && !phys_active) 348 if (vtimer->active_cleared_last && !phys_active)
302 return; 349 return;
303 350
304 ret = irq_set_irqchip_state(host_vtimer_irq, 351 ret = irq_set_irqchip_state(host_vtimer_irq,
@@ -306,7 +353,7 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
306 phys_active); 353 phys_active);
307 WARN_ON(ret); 354 WARN_ON(ret);
308 355
309 timer->active_cleared_last = !phys_active; 356 vtimer->active_cleared_last = !phys_active;
310} 357}
311 358
312/** 359/**
@@ -320,7 +367,11 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
320{ 367{
321 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 368 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
322 369
323 BUG_ON(timer_is_armed(timer)); 370 /*
371 * This is to cancel the background timer for the physical timer
372 * emulation if it is set.
373 */
374 timer_disarm(timer);
324 375
325 /* 376 /*
326 * The guest could have modified the timer registers or the timer 377 * The guest could have modified the timer registers or the timer
@@ -330,9 +381,11 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
330} 381}
331 382
332int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, 383int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
333 const struct kvm_irq_level *irq) 384 const struct kvm_irq_level *virt_irq,
385 const struct kvm_irq_level *phys_irq)
334{ 386{
335 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 387 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
388 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
336 389
337 /* 390 /*
338 * The vcpu timer irq number cannot be determined in 391 * The vcpu timer irq number cannot be determined in
@@ -340,7 +393,8 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
340 * kvm_vcpu_set_target(). To handle this, we determine 393 * kvm_vcpu_set_target(). To handle this, we determine
341 * vcpu timer irq number when the vcpu is reset. 394 * vcpu timer irq number when the vcpu is reset.
342 */ 395 */
343 timer->irq.irq = irq->irq; 396 vtimer->irq.irq = virt_irq->irq;
397 ptimer->irq.irq = phys_irq->irq;
344 398
345 /* 399 /*
346 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8 400 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
@@ -348,16 +402,40 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
348 * resets the timer to be disabled and unmasked and is compliant with 402 * resets the timer to be disabled and unmasked and is compliant with
349 * the ARMv7 architecture. 403 * the ARMv7 architecture.
350 */ 404 */
351 timer->cntv_ctl = 0; 405 vtimer->cnt_ctl = 0;
406 ptimer->cnt_ctl = 0;
352 kvm_timer_update_state(vcpu); 407 kvm_timer_update_state(vcpu);
353 408
354 return 0; 409 return 0;
355} 410}
356 411
412/* Make the updates of cntvoff for all vtimer contexts atomic */
413static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
414{
415 int i;
416 struct kvm *kvm = vcpu->kvm;
417 struct kvm_vcpu *tmp;
418
419 mutex_lock(&kvm->lock);
420 kvm_for_each_vcpu(i, tmp, kvm)
421 vcpu_vtimer(tmp)->cntvoff = cntvoff;
422
423 /*
424 * When called from the vcpu create path, the CPU being created is not
425 * included in the loop above, so we just set it here as well.
426 */
427 vcpu_vtimer(vcpu)->cntvoff = cntvoff;
428 mutex_unlock(&kvm->lock);
429}
430
357void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) 431void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
358{ 432{
359 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 433 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
360 434
435 /* Synchronize cntvoff across all vtimers of a VM. */
436 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
437 vcpu_ptimer(vcpu)->cntvoff = 0;
438
361 INIT_WORK(&timer->expired, kvm_timer_inject_irq_work); 439 INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
362 hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 440 hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
363 timer->timer.function = kvm_timer_expire; 441 timer->timer.function = kvm_timer_expire;
@@ -370,17 +448,17 @@ static void kvm_timer_init_interrupt(void *info)
370 448
371int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) 449int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
372{ 450{
373 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 451 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
374 452
375 switch (regid) { 453 switch (regid) {
376 case KVM_REG_ARM_TIMER_CTL: 454 case KVM_REG_ARM_TIMER_CTL:
377 timer->cntv_ctl = value; 455 vtimer->cnt_ctl = value;
378 break; 456 break;
379 case KVM_REG_ARM_TIMER_CNT: 457 case KVM_REG_ARM_TIMER_CNT:
380 vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value; 458 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
381 break; 459 break;
382 case KVM_REG_ARM_TIMER_CVAL: 460 case KVM_REG_ARM_TIMER_CVAL:
383 timer->cntv_cval = value; 461 vtimer->cnt_cval = value;
384 break; 462 break;
385 default: 463 default:
386 return -1; 464 return -1;
@@ -392,15 +470,15 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
392 470
393u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) 471u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
394{ 472{
395 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 473 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
396 474
397 switch (regid) { 475 switch (regid) {
398 case KVM_REG_ARM_TIMER_CTL: 476 case KVM_REG_ARM_TIMER_CTL:
399 return timer->cntv_ctl; 477 return vtimer->cnt_ctl;
400 case KVM_REG_ARM_TIMER_CNT: 478 case KVM_REG_ARM_TIMER_CNT:
401 return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; 479 return kvm_phys_timer_read() - vtimer->cntvoff;
402 case KVM_REG_ARM_TIMER_CVAL: 480 case KVM_REG_ARM_TIMER_CVAL:
403 return timer->cntv_cval; 481 return vtimer->cnt_cval;
404 } 482 }
405 return (u64)-1; 483 return (u64)-1;
406} 484}
@@ -464,14 +542,16 @@ int kvm_timer_hyp_init(void)
464void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) 542void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
465{ 543{
466 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 544 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
545 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
467 546
468 timer_disarm(timer); 547 timer_disarm(timer);
469 kvm_vgic_unmap_phys_irq(vcpu, timer->irq.irq); 548 kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq);
470} 549}
471 550
472int kvm_timer_enable(struct kvm_vcpu *vcpu) 551int kvm_timer_enable(struct kvm_vcpu *vcpu)
473{ 552{
474 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 553 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
554 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
475 struct irq_desc *desc; 555 struct irq_desc *desc;
476 struct irq_data *data; 556 struct irq_data *data;
477 int phys_irq; 557 int phys_irq;
@@ -499,7 +579,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
499 * Tell the VGIC that the virtual interrupt is tied to a 579 * Tell the VGIC that the virtual interrupt is tied to a
500 * physical interrupt. We do that once per VCPU. 580 * physical interrupt. We do that once per VCPU.
501 */ 581 */
502 ret = kvm_vgic_map_phys_irq(vcpu, timer->irq.irq, phys_irq); 582 ret = kvm_vgic_map_phys_irq(vcpu, vtimer->irq.irq, phys_irq);
503 if (ret) 583 if (ret)
504 return ret; 584 return ret;
505 585
@@ -508,7 +588,24 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
508 return 0; 588 return 0;
509} 589}
510 590
511void kvm_timer_init(struct kvm *kvm) 591/*
592 * On VHE system, we only need to configure trap on physical timer and counter
593 * accesses in EL0 and EL1 once, not for every world switch.
594 * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
595 * and this makes those bits have no effect for the host kernel execution.
596 */
597void kvm_timer_init_vhe(void)
512{ 598{
513 kvm->arch.timer.cntvoff = kvm_phys_timer_read(); 599 /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
600 u32 cnthctl_shift = 10;
601 u64 val;
602
603 /*
604 * Disallow physical timer access for the guest.
605 * Physical counter access is allowed.
606 */
607 val = read_sysreg(cnthctl_el2);
608 val &= ~(CNTHCTL_EL1PCEN << cnthctl_shift);
609 val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
610 write_sysreg(val, cnthctl_el2);
514} 611}
diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c
index 798866a8d875..4734915ab71f 100644
--- a/virt/kvm/arm/hyp/timer-sr.c
+++ b/virt/kvm/arm/hyp/timer-sr.c
@@ -25,20 +25,27 @@
25void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) 25void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
26{ 26{
27 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 27 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
28 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
28 u64 val; 29 u64 val;
29 30
30 if (timer->enabled) { 31 if (timer->enabled) {
31 timer->cntv_ctl = read_sysreg_el0(cntv_ctl); 32 vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
32 timer->cntv_cval = read_sysreg_el0(cntv_cval); 33 vtimer->cnt_cval = read_sysreg_el0(cntv_cval);
33 } 34 }
34 35
35 /* Disable the virtual timer */ 36 /* Disable the virtual timer */
36 write_sysreg_el0(0, cntv_ctl); 37 write_sysreg_el0(0, cntv_ctl);
37 38
38 /* Allow physical timer/counter access for the host */ 39 /*
39 val = read_sysreg(cnthctl_el2); 40 * We don't need to do this for VHE since the host kernel runs in EL2
40 val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; 41 * with HCR_EL2.TGE ==1, which makes those bits have no impact.
41 write_sysreg(val, cnthctl_el2); 42 */
43 if (!has_vhe()) {
44 /* Allow physical timer/counter access for the host */
45 val = read_sysreg(cnthctl_el2);
46 val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
47 write_sysreg(val, cnthctl_el2);
48 }
42 49
43 /* Clear cntvoff for the host */ 50 /* Clear cntvoff for the host */
44 write_sysreg(0, cntvoff_el2); 51 write_sysreg(0, cntvoff_el2);
@@ -46,23 +53,26 @@ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
46 53
47void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) 54void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
48{ 55{
49 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
50 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 56 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
57 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
51 u64 val; 58 u64 val;
52 59
53 /* 60 /* Those bits are already configured at boot on VHE-system */
54 * Disallow physical timer access for the guest 61 if (!has_vhe()) {
55 * Physical counter access is allowed 62 /*
56 */ 63 * Disallow physical timer access for the guest
57 val = read_sysreg(cnthctl_el2); 64 * Physical counter access is allowed
58 val &= ~CNTHCTL_EL1PCEN; 65 */
59 val |= CNTHCTL_EL1PCTEN; 66 val = read_sysreg(cnthctl_el2);
60 write_sysreg(val, cnthctl_el2); 67 val &= ~CNTHCTL_EL1PCEN;
68 val |= CNTHCTL_EL1PCTEN;
69 write_sysreg(val, cnthctl_el2);
70 }
61 71
62 if (timer->enabled) { 72 if (timer->enabled) {
63 write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2); 73 write_sysreg(vtimer->cntvoff, cntvoff_el2);
64 write_sysreg_el0(timer->cntv_cval, cntv_cval); 74 write_sysreg_el0(vtimer->cnt_cval, cntv_cval);
65 isb(); 75 isb();
66 write_sysreg_el0(timer->cntv_ctl, cntv_ctl); 76 write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl);
67 } 77 }
68} 78}
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
new file mode 100644
index 000000000000..7072ab743332
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-debug.c
@@ -0,0 +1,283 @@
1/*
2 * Copyright (C) 2016 Linaro
3 * Author: Christoffer Dall <christoffer.dall@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/cpu.h>
19#include <linux/debugfs.h>
20#include <linux/interrupt.h>
21#include <linux/kvm_host.h>
22#include <linux/seq_file.h>
23#include <kvm/arm_vgic.h>
24#include <asm/kvm_mmu.h>
25#include "vgic.h"
26
27/*
28 * Structure to control looping through the entire vgic state. We start at
29 * zero for each field and move upwards. So, if dist_id is 0 we print the
30 * distributor info. When dist_id is 1, we have already printed it and move
31 * on.
32 *
33 * When vcpu_id < nr_cpus we print the vcpu info until vcpu_id == nr_cpus and
34 * so on.
35 */
36struct vgic_state_iter {
37 int nr_cpus;
38 int nr_spis;
39 int dist_id;
40 int vcpu_id;
41 int intid;
42};
43
44static void iter_next(struct vgic_state_iter *iter)
45{
46 if (iter->dist_id == 0) {
47 iter->dist_id++;
48 return;
49 }
50
51 iter->intid++;
52 if (iter->intid == VGIC_NR_PRIVATE_IRQS &&
53 ++iter->vcpu_id < iter->nr_cpus)
54 iter->intid = 0;
55}
56
57static void iter_init(struct kvm *kvm, struct vgic_state_iter *iter,
58 loff_t pos)
59{
60 int nr_cpus = atomic_read(&kvm->online_vcpus);
61
62 memset(iter, 0, sizeof(*iter));
63
64 iter->nr_cpus = nr_cpus;
65 iter->nr_spis = kvm->arch.vgic.nr_spis;
66
67 /* Fast forward to the right position if needed */
68 while (pos--)
69 iter_next(iter);
70}
71
72static bool end_of_vgic(struct vgic_state_iter *iter)
73{
74 return iter->dist_id > 0 &&
75 iter->vcpu_id == iter->nr_cpus &&
76 (iter->intid - VGIC_NR_PRIVATE_IRQS) == iter->nr_spis;
77}
78
79static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
80{
81 struct kvm *kvm = (struct kvm *)s->private;
82 struct vgic_state_iter *iter;
83
84 mutex_lock(&kvm->lock);
85 iter = kvm->arch.vgic.iter;
86 if (iter) {
87 iter = ERR_PTR(-EBUSY);
88 goto out;
89 }
90
91 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
92 if (!iter) {
93 iter = ERR_PTR(-ENOMEM);
94 goto out;
95 }
96
97 iter_init(kvm, iter, *pos);
98 kvm->arch.vgic.iter = iter;
99
100 if (end_of_vgic(iter))
101 iter = NULL;
102out:
103 mutex_unlock(&kvm->lock);
104 return iter;
105}
106
107static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
108{
109 struct kvm *kvm = (struct kvm *)s->private;
110 struct vgic_state_iter *iter = kvm->arch.vgic.iter;
111
112 ++*pos;
113 iter_next(iter);
114 if (end_of_vgic(iter))
115 iter = NULL;
116 return iter;
117}
118
119static void vgic_debug_stop(struct seq_file *s, void *v)
120{
121 struct kvm *kvm = (struct kvm *)s->private;
122 struct vgic_state_iter *iter;
123
124 /*
125 * If the seq file wasn't properly opened, there's nothing to clearn
126 * up.
127 */
128 if (IS_ERR(v))
129 return;
130
131 mutex_lock(&kvm->lock);
132 iter = kvm->arch.vgic.iter;
133 kfree(iter);
134 kvm->arch.vgic.iter = NULL;
135 mutex_unlock(&kvm->lock);
136}
137
138static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)
139{
140 seq_printf(s, "Distributor\n");
141 seq_printf(s, "===========\n");
142 seq_printf(s, "vgic_model:\t%s\n",
143 (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) ?
144 "GICv3" : "GICv2");
145 seq_printf(s, "nr_spis:\t%d\n", dist->nr_spis);
146 seq_printf(s, "enabled:\t%d\n", dist->enabled);
147 seq_printf(s, "\n");
148
149 seq_printf(s, "P=pending_latch, L=line_level, A=active\n");
150 seq_printf(s, "E=enabled, H=hw, C=config (level=1, edge=0)\n");
151}
152
153static void print_header(struct seq_file *s, struct vgic_irq *irq,
154 struct kvm_vcpu *vcpu)
155{
156 int id = 0;
157 char *hdr = "SPI ";
158
159 if (vcpu) {
160 hdr = "VCPU";
161 id = vcpu->vcpu_id;
162 }
163
164 seq_printf(s, "\n");
165 seq_printf(s, "%s%2d TYP ID TGT_ID PLAEHC HWID TARGET SRC PRI VCPU_ID\n", hdr, id);
166 seq_printf(s, "---------------------------------------------------------------\n");
167}
168
169static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
170 struct kvm_vcpu *vcpu)
171{
172 char *type;
173 if (irq->intid < VGIC_NR_SGIS)
174 type = "SGI";
175 else if (irq->intid < VGIC_NR_PRIVATE_IRQS)
176 type = "PPI";
177 else
178 type = "SPI";
179
180 if (irq->intid ==0 || irq->intid == VGIC_NR_PRIVATE_IRQS)
181 print_header(s, irq, vcpu);
182
183 seq_printf(s, " %s %4d "
184 " %2d "
185 "%d%d%d%d%d%d "
186 "%8d "
187 "%8x "
188 " %2x "
189 "%3d "
190 " %2d "
191 "\n",
192 type, irq->intid,
193 (irq->target_vcpu) ? irq->target_vcpu->vcpu_id : -1,
194 irq->pending_latch,
195 irq->line_level,
196 irq->active,
197 irq->enabled,
198 irq->hw,
199 irq->config == VGIC_CONFIG_LEVEL,
200 irq->hwintid,
201 irq->mpidr,
202 irq->source,
203 irq->priority,
204 (irq->vcpu) ? irq->vcpu->vcpu_id : -1);
205
206}
207
208static int vgic_debug_show(struct seq_file *s, void *v)
209{
210 struct kvm *kvm = (struct kvm *)s->private;
211 struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
212 struct vgic_irq *irq;
213 struct kvm_vcpu *vcpu = NULL;
214
215 if (iter->dist_id == 0) {
216 print_dist_state(s, &kvm->arch.vgic);
217 return 0;
218 }
219
220 if (!kvm->arch.vgic.initialized)
221 return 0;
222
223 if (iter->vcpu_id < iter->nr_cpus) {
224 vcpu = kvm_get_vcpu(kvm, iter->vcpu_id);
225 irq = &vcpu->arch.vgic_cpu.private_irqs[iter->intid];
226 } else {
227 irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS];
228 }
229
230 spin_lock(&irq->irq_lock);
231 print_irq_state(s, irq, vcpu);
232 spin_unlock(&irq->irq_lock);
233
234 return 0;
235}
236
237static struct seq_operations vgic_debug_seq_ops = {
238 .start = vgic_debug_start,
239 .next = vgic_debug_next,
240 .stop = vgic_debug_stop,
241 .show = vgic_debug_show
242};
243
244static int debug_open(struct inode *inode, struct file *file)
245{
246 int ret;
247 ret = seq_open(file, &vgic_debug_seq_ops);
248 if (!ret) {
249 struct seq_file *seq;
250 /* seq_open will have modified file->private_data */
251 seq = file->private_data;
252 seq->private = inode->i_private;
253 }
254
255 return ret;
256};
257
258static struct file_operations vgic_debug_fops = {
259 .owner = THIS_MODULE,
260 .open = debug_open,
261 .read = seq_read,
262 .llseek = seq_lseek,
263 .release = seq_release
264};
265
266int vgic_debug_init(struct kvm *kvm)
267{
268 if (!kvm->debugfs_dentry)
269 return -ENOENT;
270
271 if (!debugfs_create_file("vgic-state", 0444,
272 kvm->debugfs_dentry,
273 kvm,
274 &vgic_debug_fops))
275 return -ENOMEM;
276
277 return 0;
278}
279
280int vgic_debug_destroy(struct kvm *kvm)
281{
282 return 0;
283}
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 5114391b7e5a..276139a24e6f 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -259,6 +259,8 @@ int vgic_init(struct kvm *kvm)
259 if (ret) 259 if (ret)
260 goto out; 260 goto out;
261 261
262 vgic_debug_init(kvm);
263
262 dist->initialized = true; 264 dist->initialized = true;
263out: 265out:
264 return ret; 266 return ret;
@@ -268,15 +270,11 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
268{ 270{
269 struct vgic_dist *dist = &kvm->arch.vgic; 271 struct vgic_dist *dist = &kvm->arch.vgic;
270 272
271 mutex_lock(&kvm->lock);
272
273 dist->ready = false; 273 dist->ready = false;
274 dist->initialized = false; 274 dist->initialized = false;
275 275
276 kfree(dist->spis); 276 kfree(dist->spis);
277 dist->nr_spis = 0; 277 dist->nr_spis = 0;
278
279 mutex_unlock(&kvm->lock);
280} 278}
281 279
282void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) 280void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -286,17 +284,27 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
286 INIT_LIST_HEAD(&vgic_cpu->ap_list_head); 284 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
287} 285}
288 286
289void kvm_vgic_destroy(struct kvm *kvm) 287/* To be called with kvm->lock held */
288static void __kvm_vgic_destroy(struct kvm *kvm)
290{ 289{
291 struct kvm_vcpu *vcpu; 290 struct kvm_vcpu *vcpu;
292 int i; 291 int i;
293 292
293 vgic_debug_destroy(kvm);
294
294 kvm_vgic_dist_destroy(kvm); 295 kvm_vgic_dist_destroy(kvm);
295 296
296 kvm_for_each_vcpu(i, vcpu, kvm) 297 kvm_for_each_vcpu(i, vcpu, kvm)
297 kvm_vgic_vcpu_destroy(vcpu); 298 kvm_vgic_vcpu_destroy(vcpu);
298} 299}
299 300
301void kvm_vgic_destroy(struct kvm *kvm)
302{
303 mutex_lock(&kvm->lock);
304 __kvm_vgic_destroy(kvm);
305 mutex_unlock(&kvm->lock);
306}
307
300/** 308/**
301 * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest 309 * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
302 * is a GICv2. A GICv3 must be explicitly initialized by the guest using the 310 * is a GICv2. A GICv3 must be explicitly initialized by the guest using the
@@ -348,6 +356,10 @@ int kvm_vgic_map_resources(struct kvm *kvm)
348 ret = vgic_v2_map_resources(kvm); 356 ret = vgic_v2_map_resources(kvm);
349 else 357 else
350 ret = vgic_v3_map_resources(kvm); 358 ret = vgic_v3_map_resources(kvm);
359
360 if (ret)
361 __kvm_vgic_destroy(kvm);
362
351out: 363out:
352 mutex_unlock(&kvm->lock); 364 mutex_unlock(&kvm->lock);
353 return ret; 365 return ret;
diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c
index d918dcf26a5a..f138ed2e9c63 100644
--- a/virt/kvm/arm/vgic/vgic-irqfd.c
+++ b/virt/kvm/arm/vgic/vgic-irqfd.c
@@ -99,6 +99,9 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
99 if (!vgic_has_its(kvm)) 99 if (!vgic_has_its(kvm))
100 return -ENODEV; 100 return -ENODEV;
101 101
102 if (!level)
103 return -1;
104
102 return vgic_its_inject_msi(kvm, &msi); 105 return vgic_its_inject_msi(kvm, &msi);
103} 106}
104 107
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 8c2b3cdcb2c5..571b64a01c50 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -350,7 +350,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
350 350
351 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); 351 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
352 spin_lock(&irq->irq_lock); 352 spin_lock(&irq->irq_lock);
353 irq->pending = pendmask & (1U << bit_nr); 353 irq->pending_latch = pendmask & (1U << bit_nr);
354 vgic_queue_irq_unlock(vcpu->kvm, irq); 354 vgic_queue_irq_unlock(vcpu->kvm, irq);
355 vgic_put_irq(vcpu->kvm, irq); 355 vgic_put_irq(vcpu->kvm, irq);
356 } 356 }
@@ -465,7 +465,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
465 return -EBUSY; 465 return -EBUSY;
466 466
467 spin_lock(&itte->irq->irq_lock); 467 spin_lock(&itte->irq->irq_lock);
468 itte->irq->pending = true; 468 itte->irq->pending_latch = true;
469 vgic_queue_irq_unlock(kvm, itte->irq); 469 vgic_queue_irq_unlock(kvm, itte->irq);
470 470
471 return 0; 471 return 0;
@@ -913,7 +913,7 @@ static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
913 if (!itte) 913 if (!itte)
914 return E_ITS_CLEAR_UNMAPPED_INTERRUPT; 914 return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
915 915
916 itte->irq->pending = false; 916 itte->irq->pending_latch = false;
917 917
918 return 0; 918 return 0;
919} 919}
diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c
index fbe87a63d250..d181d2baee9c 100644
--- a/virt/kvm/arm/vgic/vgic-kvm-device.c
+++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
@@ -17,6 +17,7 @@
17#include <kvm/arm_vgic.h> 17#include <kvm/arm_vgic.h>
18#include <linux/uaccess.h> 18#include <linux/uaccess.h>
19#include <asm/kvm_mmu.h> 19#include <asm/kvm_mmu.h>
20#include <asm/cputype.h>
20#include "vgic.h" 21#include "vgic.h"
21 22
22/* common helpers */ 23/* common helpers */
@@ -230,14 +231,8 @@ int kvm_register_vgic_device(unsigned long type)
230 return ret; 231 return ret;
231} 232}
232 233
233struct vgic_reg_attr { 234int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
234 struct kvm_vcpu *vcpu; 235 struct vgic_reg_attr *reg_attr)
235 gpa_t addr;
236};
237
238static int parse_vgic_v2_attr(struct kvm_device *dev,
239 struct kvm_device_attr *attr,
240 struct vgic_reg_attr *reg_attr)
241{ 236{
242 int cpuid; 237 int cpuid;
243 238
@@ -292,14 +287,14 @@ static bool lock_all_vcpus(struct kvm *kvm)
292} 287}
293 288
294/** 289/**
295 * vgic_attr_regs_access_v2 - allows user space to access VGIC v2 state 290 * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
296 * 291 *
297 * @dev: kvm device handle 292 * @dev: kvm device handle
298 * @attr: kvm device attribute 293 * @attr: kvm device attribute
299 * @reg: address the value is read or written 294 * @reg: address the value is read or written
300 * @is_write: true if userspace is writing a register 295 * @is_write: true if userspace is writing a register
301 */ 296 */
302static int vgic_attr_regs_access_v2(struct kvm_device *dev, 297static int vgic_v2_attr_regs_access(struct kvm_device *dev,
303 struct kvm_device_attr *attr, 298 struct kvm_device_attr *attr,
304 u32 *reg, bool is_write) 299 u32 *reg, bool is_write)
305{ 300{
@@ -308,7 +303,7 @@ static int vgic_attr_regs_access_v2(struct kvm_device *dev,
308 struct kvm_vcpu *vcpu; 303 struct kvm_vcpu *vcpu;
309 int ret; 304 int ret;
310 305
311 ret = parse_vgic_v2_attr(dev, attr, &reg_attr); 306 ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
312 if (ret) 307 if (ret)
313 return ret; 308 return ret;
314 309
@@ -362,7 +357,7 @@ static int vgic_v2_set_attr(struct kvm_device *dev,
362 if (get_user(reg, uaddr)) 357 if (get_user(reg, uaddr))
363 return -EFAULT; 358 return -EFAULT;
364 359
365 return vgic_attr_regs_access_v2(dev, attr, &reg, true); 360 return vgic_v2_attr_regs_access(dev, attr, &reg, true);
366 } 361 }
367 } 362 }
368 363
@@ -384,7 +379,7 @@ static int vgic_v2_get_attr(struct kvm_device *dev,
384 u32 __user *uaddr = (u32 __user *)(long)attr->addr; 379 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
385 u32 reg = 0; 380 u32 reg = 0;
386 381
387 ret = vgic_attr_regs_access_v2(dev, attr, &reg, false); 382 ret = vgic_v2_attr_regs_access(dev, attr, &reg, false);
388 if (ret) 383 if (ret)
389 return ret; 384 return ret;
390 return put_user(reg, uaddr); 385 return put_user(reg, uaddr);
@@ -428,16 +423,211 @@ struct kvm_device_ops kvm_arm_vgic_v2_ops = {
428 .has_attr = vgic_v2_has_attr, 423 .has_attr = vgic_v2_has_attr,
429}; 424};
430 425
426int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
427 struct vgic_reg_attr *reg_attr)
428{
429 unsigned long vgic_mpidr, mpidr_reg;
430
431 /*
432 * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group,
433 * attr might not hold MPIDR. Hence assume vcpu0.
434 */
435 if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) {
436 vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >>
437 KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT;
438
439 mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr);
440 reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
441 } else {
442 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
443 }
444
445 if (!reg_attr->vcpu)
446 return -EINVAL;
447
448 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
449
450 return 0;
451}
452
453/*
454 * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state
455 *
456 * @dev: kvm device handle
457 * @attr: kvm device attribute
458 * @reg: address the value is read or written
459 * @is_write: true if userspace is writing a register
460 */
461static int vgic_v3_attr_regs_access(struct kvm_device *dev,
462 struct kvm_device_attr *attr,
463 u64 *reg, bool is_write)
464{
465 struct vgic_reg_attr reg_attr;
466 gpa_t addr;
467 struct kvm_vcpu *vcpu;
468 int ret;
469 u32 tmp32;
470
471 ret = vgic_v3_parse_attr(dev, attr, &reg_attr);
472 if (ret)
473 return ret;
474
475 vcpu = reg_attr.vcpu;
476 addr = reg_attr.addr;
477
478 mutex_lock(&dev->kvm->lock);
479
480 if (unlikely(!vgic_initialized(dev->kvm))) {
481 ret = -EBUSY;
482 goto out;
483 }
484
485 if (!lock_all_vcpus(dev->kvm)) {
486 ret = -EBUSY;
487 goto out;
488 }
489
490 switch (attr->group) {
491 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
492 if (is_write)
493 tmp32 = *reg;
494
495 ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &tmp32);
496 if (!is_write)
497 *reg = tmp32;
498 break;
499 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
500 if (is_write)
501 tmp32 = *reg;
502
503 ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &tmp32);
504 if (!is_write)
505 *reg = tmp32;
506 break;
507 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
508 u64 regid;
509
510 regid = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
511 ret = vgic_v3_cpu_sysregs_uaccess(vcpu, is_write,
512 regid, reg);
513 break;
514 }
515 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
516 unsigned int info, intid;
517
518 info = (attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
519 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT;
520 if (info == VGIC_LEVEL_INFO_LINE_LEVEL) {
521 intid = attr->attr &
522 KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK;
523 ret = vgic_v3_line_level_info_uaccess(vcpu, is_write,
524 intid, reg);
525 } else {
526 ret = -EINVAL;
527 }
528 break;
529 }
530 default:
531 ret = -EINVAL;
532 break;
533 }
534
535 unlock_all_vcpus(dev->kvm);
536out:
537 mutex_unlock(&dev->kvm->lock);
538 return ret;
539}
540
431static int vgic_v3_set_attr(struct kvm_device *dev, 541static int vgic_v3_set_attr(struct kvm_device *dev,
432 struct kvm_device_attr *attr) 542 struct kvm_device_attr *attr)
433{ 543{
434 return vgic_set_common_attr(dev, attr); 544 int ret;
545
546 ret = vgic_set_common_attr(dev, attr);
547 if (ret != -ENXIO)
548 return ret;
549
550 switch (attr->group) {
551 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
552 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
553 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
554 u32 tmp32;
555 u64 reg;
556
557 if (get_user(tmp32, uaddr))
558 return -EFAULT;
559
560 reg = tmp32;
561 return vgic_v3_attr_regs_access(dev, attr, &reg, true);
562 }
563 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
564 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
565 u64 reg;
566
567 if (get_user(reg, uaddr))
568 return -EFAULT;
569
570 return vgic_v3_attr_regs_access(dev, attr, &reg, true);
571 }
572 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
573 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
574 u64 reg;
575 u32 tmp32;
576
577 if (get_user(tmp32, uaddr))
578 return -EFAULT;
579
580 reg = tmp32;
581 return vgic_v3_attr_regs_access(dev, attr, &reg, true);
582 }
583 }
584 return -ENXIO;
435} 585}
436 586
437static int vgic_v3_get_attr(struct kvm_device *dev, 587static int vgic_v3_get_attr(struct kvm_device *dev,
438 struct kvm_device_attr *attr) 588 struct kvm_device_attr *attr)
439{ 589{
440 return vgic_get_common_attr(dev, attr); 590 int ret;
591
592 ret = vgic_get_common_attr(dev, attr);
593 if (ret != -ENXIO)
594 return ret;
595
596 switch (attr->group) {
597 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
598 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
599 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
600 u64 reg;
601 u32 tmp32;
602
603 ret = vgic_v3_attr_regs_access(dev, attr, &reg, false);
604 if (ret)
605 return ret;
606 tmp32 = reg;
607 return put_user(tmp32, uaddr);
608 }
609 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
610 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
611 u64 reg;
612
613 ret = vgic_v3_attr_regs_access(dev, attr, &reg, false);
614 if (ret)
615 return ret;
616 return put_user(reg, uaddr);
617 }
618 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
619 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
620 u64 reg;
621 u32 tmp32;
622
623 ret = vgic_v3_attr_regs_access(dev, attr, &reg, false);
624 if (ret)
625 return ret;
626 tmp32 = reg;
627 return put_user(tmp32, uaddr);
628 }
629 }
630 return -ENXIO;
441} 631}
442 632
443static int vgic_v3_has_attr(struct kvm_device *dev, 633static int vgic_v3_has_attr(struct kvm_device *dev,
@@ -451,8 +641,19 @@ static int vgic_v3_has_attr(struct kvm_device *dev,
451 return 0; 641 return 0;
452 } 642 }
453 break; 643 break;
644 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
645 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
646 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
647 return vgic_v3_has_attr_regs(dev, attr);
454 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: 648 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
455 return 0; 649 return 0;
650 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
651 if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
652 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) ==
653 VGIC_LEVEL_INFO_LINE_LEVEL)
654 return 0;
655 break;
656 }
456 case KVM_DEV_ARM_VGIC_GRP_CTRL: 657 case KVM_DEV_ARM_VGIC_GRP_CTRL:
457 switch (attr->attr) { 658 switch (attr->attr) {
458 case KVM_DEV_ARM_VGIC_CTRL_INIT: 659 case KVM_DEV_ARM_VGIC_CTRL_INIT:
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index 78e34bc4d89b..a3ad7ff95c9b 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -98,7 +98,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
98 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); 98 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
99 99
100 spin_lock(&irq->irq_lock); 100 spin_lock(&irq->irq_lock);
101 irq->pending = true; 101 irq->pending_latch = true;
102 irq->source |= 1U << source_vcpu->vcpu_id; 102 irq->source |= 1U << source_vcpu->vcpu_id;
103 103
104 vgic_queue_irq_unlock(source_vcpu->kvm, irq); 104 vgic_queue_irq_unlock(source_vcpu->kvm, irq);
@@ -182,7 +182,7 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
182 182
183 irq->source &= ~((val >> (i * 8)) & 0xff); 183 irq->source &= ~((val >> (i * 8)) & 0xff);
184 if (!irq->source) 184 if (!irq->source)
185 irq->pending = false; 185 irq->pending_latch = false;
186 186
187 spin_unlock(&irq->irq_lock); 187 spin_unlock(&irq->irq_lock);
188 vgic_put_irq(vcpu->kvm, irq); 188 vgic_put_irq(vcpu->kvm, irq);
@@ -204,7 +204,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
204 irq->source |= (val >> (i * 8)) & 0xff; 204 irq->source |= (val >> (i * 8)) & 0xff;
205 205
206 if (irq->source) { 206 if (irq->source) {
207 irq->pending = true; 207 irq->pending_latch = true;
208 vgic_queue_irq_unlock(vcpu->kvm, irq); 208 vgic_queue_irq_unlock(vcpu->kvm, irq);
209 } else { 209 } else {
210 spin_unlock(&irq->irq_lock); 210 spin_unlock(&irq->irq_lock);
@@ -213,22 +213,6 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
213 } 213 }
214} 214}
215 215
216static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
217{
218 if (kvm_vgic_global_state.type == VGIC_V2)
219 vgic_v2_set_vmcr(vcpu, vmcr);
220 else
221 vgic_v3_set_vmcr(vcpu, vmcr);
222}
223
224static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
225{
226 if (kvm_vgic_global_state.type == VGIC_V2)
227 vgic_v2_get_vmcr(vcpu, vmcr);
228 else
229 vgic_v3_get_vmcr(vcpu, vmcr);
230}
231
232#define GICC_ARCH_VERSION_V2 0x2 216#define GICC_ARCH_VERSION_V2 0x2
233 217
234/* These are for userland accesses only, there is no guest-facing emulation. */ 218/* These are for userland accesses only, there is no guest-facing emulation. */
@@ -369,21 +353,30 @@ unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
369 353
370int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr) 354int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
371{ 355{
372 int nr_irqs = dev->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; 356 const struct vgic_register_region *region;
373 const struct vgic_register_region *regions; 357 struct vgic_io_device iodev;
358 struct vgic_reg_attr reg_attr;
359 struct kvm_vcpu *vcpu;
374 gpa_t addr; 360 gpa_t addr;
375 int nr_regions, i, len; 361 int ret;
362
363 ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
364 if (ret)
365 return ret;
376 366
377 addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; 367 vcpu = reg_attr.vcpu;
368 addr = reg_attr.addr;
378 369
379 switch (attr->group) { 370 switch (attr->group) {
380 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: 371 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
381 regions = vgic_v2_dist_registers; 372 iodev.regions = vgic_v2_dist_registers;
382 nr_regions = ARRAY_SIZE(vgic_v2_dist_registers); 373 iodev.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
374 iodev.base_addr = 0;
383 break; 375 break;
384 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: 376 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
385 regions = vgic_v2_cpu_registers; 377 iodev.regions = vgic_v2_cpu_registers;
386 nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers); 378 iodev.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
379 iodev.base_addr = 0;
387 break; 380 break;
388 default: 381 default:
389 return -ENXIO; 382 return -ENXIO;
@@ -393,43 +386,11 @@ int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
393 if (addr & 3) 386 if (addr & 3)
394 return -ENXIO; 387 return -ENXIO;
395 388
396 for (i = 0; i < nr_regions; i++) { 389 region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
397 if (regions[i].bits_per_irq) 390 if (!region)
398 len = (regions[i].bits_per_irq * nr_irqs) / 8; 391 return -ENXIO;
399 else
400 len = regions[i].len;
401
402 if (regions[i].reg_offset <= addr &&
403 regions[i].reg_offset + len > addr)
404 return 0;
405 }
406
407 return -ENXIO;
408}
409
410/*
411 * When userland tries to access the VGIC register handlers, we need to
412 * create a usable struct vgic_io_device to be passed to the handlers and we
413 * have to set up a buffer similar to what would have happened if a guest MMIO
414 * access occurred, including doing endian conversions on BE systems.
415 */
416static int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
417 bool is_write, int offset, u32 *val)
418{
419 unsigned int len = 4;
420 u8 buf[4];
421 int ret;
422
423 if (is_write) {
424 vgic_data_host_to_mmio_bus(buf, len, *val);
425 ret = kvm_io_gic_ops.write(vcpu, &dev->dev, offset, len, buf);
426 } else {
427 ret = kvm_io_gic_ops.read(vcpu, &dev->dev, offset, len, buf);
428 if (!ret)
429 *val = vgic_data_mmio_bus_to_host(buf, len);
430 }
431 392
432 return ret; 393 return 0;
433} 394}
434 395
435int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write, 396int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index 50f42f0f8c4f..6afb3b484886 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -18,6 +18,8 @@
18#include <kvm/arm_vgic.h> 18#include <kvm/arm_vgic.h>
19 19
20#include <asm/kvm_emulate.h> 20#include <asm/kvm_emulate.h>
21#include <asm/kvm_arm.h>
22#include <asm/kvm_mmu.h>
21 23
22#include "vgic.h" 24#include "vgic.h"
23#include "vgic-mmio.h" 25#include "vgic-mmio.h"
@@ -207,6 +209,60 @@ static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu,
207 return 0; 209 return 0;
208} 210}
209 211
212static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu,
213 gpa_t addr, unsigned int len)
214{
215 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
216 u32 value = 0;
217 int i;
218
219 /*
220 * pending state of interrupt is latched in pending_latch variable.
221 * Userspace will save and restore pending state and line_level
222 * separately.
223 * Refer to Documentation/virtual/kvm/devices/arm-vgic-v3.txt
224 * for handling of ISPENDR and ICPENDR.
225 */
226 for (i = 0; i < len * 8; i++) {
227 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
228
229 if (irq->pending_latch)
230 value |= (1U << i);
231
232 vgic_put_irq(vcpu->kvm, irq);
233 }
234
235 return value;
236}
237
238static void vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
239 gpa_t addr, unsigned int len,
240 unsigned long val)
241{
242 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
243 int i;
244
245 for (i = 0; i < len * 8; i++) {
246 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
247
248 spin_lock(&irq->irq_lock);
249 if (test_bit(i, &val)) {
250 /*
251 * pending_latch is set irrespective of irq type
252 * (level or edge) to avoid dependency that VM should
253 * restore irq config before pending info.
254 */
255 irq->pending_latch = true;
256 vgic_queue_irq_unlock(vcpu->kvm, irq);
257 } else {
258 irq->pending_latch = false;
259 spin_unlock(&irq->irq_lock);
260 }
261
262 vgic_put_irq(vcpu->kvm, irq);
263 }
264}
265
210/* We want to avoid outer shareable. */ 266/* We want to avoid outer shareable. */
211u64 vgic_sanitise_shareability(u64 field) 267u64 vgic_sanitise_shareability(u64 field)
212{ 268{
@@ -356,7 +412,7 @@ static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
356 * We take some special care here to fix the calculation of the register 412 * We take some special care here to fix the calculation of the register
357 * offset. 413 * offset.
358 */ 414 */
359#define REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(off, rd, wr, bpi, acc) \ 415#define REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(off, rd, wr, ur, uw, bpi, acc) \
360 { \ 416 { \
361 .reg_offset = off, \ 417 .reg_offset = off, \
362 .bits_per_irq = bpi, \ 418 .bits_per_irq = bpi, \
@@ -371,47 +427,54 @@ static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
371 .access_flags = acc, \ 427 .access_flags = acc, \
372 .read = rd, \ 428 .read = rd, \
373 .write = wr, \ 429 .write = wr, \
430 .uaccess_read = ur, \
431 .uaccess_write = uw, \
374 } 432 }
375 433
376static const struct vgic_register_region vgic_v3_dist_registers[] = { 434static const struct vgic_register_region vgic_v3_dist_registers[] = {
377 REGISTER_DESC_WITH_LENGTH(GICD_CTLR, 435 REGISTER_DESC_WITH_LENGTH(GICD_CTLR,
378 vgic_mmio_read_v3_misc, vgic_mmio_write_v3_misc, 16, 436 vgic_mmio_read_v3_misc, vgic_mmio_write_v3_misc, 16,
379 VGIC_ACCESS_32bit), 437 VGIC_ACCESS_32bit),
438 REGISTER_DESC_WITH_LENGTH(GICD_STATUSR,
439 vgic_mmio_read_rao, vgic_mmio_write_wi, 4,
440 VGIC_ACCESS_32bit),
380 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR, 441 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR,
381 vgic_mmio_read_rao, vgic_mmio_write_wi, 1, 442 vgic_mmio_read_rao, vgic_mmio_write_wi, NULL, NULL, 1,
382 VGIC_ACCESS_32bit), 443 VGIC_ACCESS_32bit),
383 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER, 444 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER,
384 vgic_mmio_read_enable, vgic_mmio_write_senable, 1, 445 vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
385 VGIC_ACCESS_32bit), 446 VGIC_ACCESS_32bit),
386 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER, 447 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER,
387 vgic_mmio_read_enable, vgic_mmio_write_cenable, 1, 448 vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
388 VGIC_ACCESS_32bit), 449 VGIC_ACCESS_32bit),
389 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR, 450 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR,
390 vgic_mmio_read_pending, vgic_mmio_write_spending, 1, 451 vgic_mmio_read_pending, vgic_mmio_write_spending,
452 vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 1,
391 VGIC_ACCESS_32bit), 453 VGIC_ACCESS_32bit),
392 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR, 454 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR,
393 vgic_mmio_read_pending, vgic_mmio_write_cpending, 1, 455 vgic_mmio_read_pending, vgic_mmio_write_cpending,
456 vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
394 VGIC_ACCESS_32bit), 457 VGIC_ACCESS_32bit),
395 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER, 458 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER,
396 vgic_mmio_read_active, vgic_mmio_write_sactive, 1, 459 vgic_mmio_read_active, vgic_mmio_write_sactive, NULL, NULL, 1,
397 VGIC_ACCESS_32bit), 460 VGIC_ACCESS_32bit),
398 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER, 461 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER,
399 vgic_mmio_read_active, vgic_mmio_write_cactive, 1, 462 vgic_mmio_read_active, vgic_mmio_write_cactive, NULL, NULL, 1,
400 VGIC_ACCESS_32bit), 463 VGIC_ACCESS_32bit),
401 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR, 464 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR,
402 vgic_mmio_read_priority, vgic_mmio_write_priority, 8, 465 vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
403 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), 466 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
404 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ITARGETSR, 467 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ITARGETSR,
405 vgic_mmio_read_raz, vgic_mmio_write_wi, 8, 468 vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 8,
406 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), 469 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
407 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICFGR, 470 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICFGR,
408 vgic_mmio_read_config, vgic_mmio_write_config, 2, 471 vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
409 VGIC_ACCESS_32bit), 472 VGIC_ACCESS_32bit),
410 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR, 473 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR,
411 vgic_mmio_read_raz, vgic_mmio_write_wi, 1, 474 vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 1,
412 VGIC_ACCESS_32bit), 475 VGIC_ACCESS_32bit),
413 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER, 476 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER,
414 vgic_mmio_read_irouter, vgic_mmio_write_irouter, 64, 477 vgic_mmio_read_irouter, vgic_mmio_write_irouter, NULL, NULL, 64,
415 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), 478 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
416 REGISTER_DESC_WITH_LENGTH(GICD_IDREGS, 479 REGISTER_DESC_WITH_LENGTH(GICD_IDREGS,
417 vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48, 480 vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
@@ -422,12 +485,18 @@ static const struct vgic_register_region vgic_v3_rdbase_registers[] = {
422 REGISTER_DESC_WITH_LENGTH(GICR_CTLR, 485 REGISTER_DESC_WITH_LENGTH(GICR_CTLR,
423 vgic_mmio_read_v3r_ctlr, vgic_mmio_write_v3r_ctlr, 4, 486 vgic_mmio_read_v3r_ctlr, vgic_mmio_write_v3r_ctlr, 4,
424 VGIC_ACCESS_32bit), 487 VGIC_ACCESS_32bit),
488 REGISTER_DESC_WITH_LENGTH(GICR_STATUSR,
489 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
490 VGIC_ACCESS_32bit),
425 REGISTER_DESC_WITH_LENGTH(GICR_IIDR, 491 REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
426 vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4, 492 vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
427 VGIC_ACCESS_32bit), 493 VGIC_ACCESS_32bit),
428 REGISTER_DESC_WITH_LENGTH(GICR_TYPER, 494 REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
429 vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8, 495 vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
430 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), 496 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
497 REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
498 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
499 VGIC_ACCESS_32bit),
431 REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER, 500 REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER,
432 vgic_mmio_read_propbase, vgic_mmio_write_propbase, 8, 501 vgic_mmio_read_propbase, vgic_mmio_write_propbase, 8,
433 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), 502 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
@@ -449,11 +518,13 @@ static const struct vgic_register_region vgic_v3_sgibase_registers[] = {
449 REGISTER_DESC_WITH_LENGTH(GICR_ICENABLER0, 518 REGISTER_DESC_WITH_LENGTH(GICR_ICENABLER0,
450 vgic_mmio_read_enable, vgic_mmio_write_cenable, 4, 519 vgic_mmio_read_enable, vgic_mmio_write_cenable, 4,
451 VGIC_ACCESS_32bit), 520 VGIC_ACCESS_32bit),
452 REGISTER_DESC_WITH_LENGTH(GICR_ISPENDR0, 521 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ISPENDR0,
453 vgic_mmio_read_pending, vgic_mmio_write_spending, 4, 522 vgic_mmio_read_pending, vgic_mmio_write_spending,
523 vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4,
454 VGIC_ACCESS_32bit), 524 VGIC_ACCESS_32bit),
455 REGISTER_DESC_WITH_LENGTH(GICR_ICPENDR0, 525 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ICPENDR0,
456 vgic_mmio_read_pending, vgic_mmio_write_cpending, 4, 526 vgic_mmio_read_pending, vgic_mmio_write_cpending,
527 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
457 VGIC_ACCESS_32bit), 528 VGIC_ACCESS_32bit),
458 REGISTER_DESC_WITH_LENGTH(GICR_ISACTIVER0, 529 REGISTER_DESC_WITH_LENGTH(GICR_ISACTIVER0,
459 vgic_mmio_read_active, vgic_mmio_write_sactive, 4, 530 vgic_mmio_read_active, vgic_mmio_write_sactive, 4,
@@ -546,6 +617,54 @@ int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t redist_base_address)
546 return ret; 617 return ret;
547} 618}
548 619
620int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
621{
622 const struct vgic_register_region *region;
623 struct vgic_io_device iodev;
624 struct vgic_reg_attr reg_attr;
625 struct kvm_vcpu *vcpu;
626 gpa_t addr;
627 int ret;
628
629 ret = vgic_v3_parse_attr(dev, attr, &reg_attr);
630 if (ret)
631 return ret;
632
633 vcpu = reg_attr.vcpu;
634 addr = reg_attr.addr;
635
636 switch (attr->group) {
637 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
638 iodev.regions = vgic_v3_dist_registers;
639 iodev.nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
640 iodev.base_addr = 0;
641 break;
642 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:{
643 iodev.regions = vgic_v3_rdbase_registers;
644 iodev.nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers);
645 iodev.base_addr = 0;
646 break;
647 }
648 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
649 u64 reg, id;
650
651 id = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
652 return vgic_v3_has_cpu_sysregs_attr(vcpu, 0, id, &reg);
653 }
654 default:
655 return -ENXIO;
656 }
657
658 /* We only support aligned 32-bit accesses. */
659 if (addr & 3)
660 return -ENXIO;
661
662 region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
663 if (!region)
664 return -ENXIO;
665
666 return 0;
667}
549/* 668/*
550 * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI 669 * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
551 * generation register ICC_SGI1R_EL1) with a given VCPU. 670 * generation register ICC_SGI1R_EL1) with a given VCPU.
@@ -646,9 +765,55 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
646 irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); 765 irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
647 766
648 spin_lock(&irq->irq_lock); 767 spin_lock(&irq->irq_lock);
649 irq->pending = true; 768 irq->pending_latch = true;
650 769
651 vgic_queue_irq_unlock(vcpu->kvm, irq); 770 vgic_queue_irq_unlock(vcpu->kvm, irq);
652 vgic_put_irq(vcpu->kvm, irq); 771 vgic_put_irq(vcpu->kvm, irq);
653 } 772 }
654} 773}
774
775int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
776 int offset, u32 *val)
777{
778 struct vgic_io_device dev = {
779 .regions = vgic_v3_dist_registers,
780 .nr_regions = ARRAY_SIZE(vgic_v3_dist_registers),
781 };
782
783 return vgic_uaccess(vcpu, &dev, is_write, offset, val);
784}
785
786int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
787 int offset, u32 *val)
788{
789 struct vgic_io_device rd_dev = {
790 .regions = vgic_v3_rdbase_registers,
791 .nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers),
792 };
793
794 struct vgic_io_device sgi_dev = {
795 .regions = vgic_v3_sgibase_registers,
796 .nr_regions = ARRAY_SIZE(vgic_v3_sgibase_registers),
797 };
798
799 /* SGI_base is the next 64K frame after RD_base */
800 if (offset >= SZ_64K)
801 return vgic_uaccess(vcpu, &sgi_dev, is_write, offset - SZ_64K,
802 val);
803 else
804 return vgic_uaccess(vcpu, &rd_dev, is_write, offset, val);
805}
806
807int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
808 u32 intid, u64 *val)
809{
810 if (intid % 32)
811 return -EINVAL;
812
813 if (is_write)
814 vgic_write_irq_line_level_info(vcpu, intid, *val);
815 else
816 *val = vgic_read_irq_line_level_info(vcpu, intid);
817
818 return 0;
819}
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index ebe1b9fa3c4d..3654b4c835ef 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -111,7 +111,7 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
111 for (i = 0; i < len * 8; i++) { 111 for (i = 0; i < len * 8; i++) {
112 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 112 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
113 113
114 if (irq->pending) 114 if (irq_is_pending(irq))
115 value |= (1U << i); 115 value |= (1U << i);
116 116
117 vgic_put_irq(vcpu->kvm, irq); 117 vgic_put_irq(vcpu->kvm, irq);
@@ -131,9 +131,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
131 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 131 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
132 132
133 spin_lock(&irq->irq_lock); 133 spin_lock(&irq->irq_lock);
134 irq->pending = true; 134 irq->pending_latch = true;
135 if (irq->config == VGIC_CONFIG_LEVEL)
136 irq->soft_pending = true;
137 135
138 vgic_queue_irq_unlock(vcpu->kvm, irq); 136 vgic_queue_irq_unlock(vcpu->kvm, irq);
139 vgic_put_irq(vcpu->kvm, irq); 137 vgic_put_irq(vcpu->kvm, irq);
@@ -152,12 +150,7 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
152 150
153 spin_lock(&irq->irq_lock); 151 spin_lock(&irq->irq_lock);
154 152
155 if (irq->config == VGIC_CONFIG_LEVEL) { 153 irq->pending_latch = false;
156 irq->soft_pending = false;
157 irq->pending = irq->line_level;
158 } else {
159 irq->pending = false;
160 }
161 154
162 spin_unlock(&irq->irq_lock); 155 spin_unlock(&irq->irq_lock);
163 vgic_put_irq(vcpu->kvm, irq); 156 vgic_put_irq(vcpu->kvm, irq);
@@ -359,18 +352,70 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
359 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 352 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
360 spin_lock(&irq->irq_lock); 353 spin_lock(&irq->irq_lock);
361 354
362 if (test_bit(i * 2 + 1, &val)) { 355 if (test_bit(i * 2 + 1, &val))
363 irq->config = VGIC_CONFIG_EDGE; 356 irq->config = VGIC_CONFIG_EDGE;
364 } else { 357 else
365 irq->config = VGIC_CONFIG_LEVEL; 358 irq->config = VGIC_CONFIG_LEVEL;
366 irq->pending = irq->line_level | irq->soft_pending;
367 }
368 359
369 spin_unlock(&irq->irq_lock); 360 spin_unlock(&irq->irq_lock);
370 vgic_put_irq(vcpu->kvm, irq); 361 vgic_put_irq(vcpu->kvm, irq);
371 } 362 }
372} 363}
373 364
365u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
366{
367 int i;
368 u64 val = 0;
369 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
370
371 for (i = 0; i < 32; i++) {
372 struct vgic_irq *irq;
373
374 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
375 continue;
376
377 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
378 if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
379 val |= (1U << i);
380
381 vgic_put_irq(vcpu->kvm, irq);
382 }
383
384 return val;
385}
386
387void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
388 const u64 val)
389{
390 int i;
391 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
392
393 for (i = 0; i < 32; i++) {
394 struct vgic_irq *irq;
395 bool new_level;
396
397 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
398 continue;
399
400 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
401
402 /*
403 * Line level is set irrespective of irq type
404 * (level or edge) to avoid dependency that VM should
405 * restore irq config before line level.
406 */
407 new_level = !!(val & (1U << i));
408 spin_lock(&irq->irq_lock);
409 irq->line_level = new_level;
410 if (new_level)
411 vgic_queue_irq_unlock(vcpu->kvm, irq);
412 else
413 spin_unlock(&irq->irq_lock);
414
415 vgic_put_irq(vcpu->kvm, irq);
416 }
417}
418
374static int match_region(const void *key, const void *elt) 419static int match_region(const void *key, const void *elt)
375{ 420{
376 const unsigned int offset = (unsigned long)key; 421 const unsigned int offset = (unsigned long)key;
@@ -394,6 +439,22 @@ vgic_find_mmio_region(const struct vgic_register_region *region, int nr_regions,
394 sizeof(region[0]), match_region); 439 sizeof(region[0]), match_region);
395} 440}
396 441
442void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
443{
444 if (kvm_vgic_global_state.type == VGIC_V2)
445 vgic_v2_set_vmcr(vcpu, vmcr);
446 else
447 vgic_v3_set_vmcr(vcpu, vmcr);
448}
449
450void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
451{
452 if (kvm_vgic_global_state.type == VGIC_V2)
453 vgic_v2_get_vmcr(vcpu, vmcr);
454 else
455 vgic_v3_get_vmcr(vcpu, vmcr);
456}
457
397/* 458/*
398 * kvm_mmio_read_buf() returns a value in a format where it can be converted 459 * kvm_mmio_read_buf() returns a value in a format where it can be converted
399 * to a byte array and be directly observed as the guest wanted it to appear 460 * to a byte array and be directly observed as the guest wanted it to appear
@@ -484,6 +545,74 @@ static bool check_region(const struct kvm *kvm,
484 return false; 545 return false;
485} 546}
486 547
548const struct vgic_register_region *
549vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
550 gpa_t addr, int len)
551{
552 const struct vgic_register_region *region;
553
554 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
555 addr - iodev->base_addr);
556 if (!region || !check_region(vcpu->kvm, region, addr, len))
557 return NULL;
558
559 return region;
560}
561
562static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
563 gpa_t addr, u32 *val)
564{
565 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
566 const struct vgic_register_region *region;
567 struct kvm_vcpu *r_vcpu;
568
569 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
570 if (!region) {
571 *val = 0;
572 return 0;
573 }
574
575 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
576 if (region->uaccess_read)
577 *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
578 else
579 *val = region->read(r_vcpu, addr, sizeof(u32));
580
581 return 0;
582}
583
584static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
585 gpa_t addr, const u32 *val)
586{
587 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
588 const struct vgic_register_region *region;
589 struct kvm_vcpu *r_vcpu;
590
591 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
592 if (!region)
593 return 0;
594
595 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
596 if (region->uaccess_write)
597 region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
598 else
599 region->write(r_vcpu, addr, sizeof(u32), *val);
600
601 return 0;
602}
603
604/*
605 * Userland access to VGIC registers.
606 */
607int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
608 bool is_write, int offset, u32 *val)
609{
610 if (is_write)
611 return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
612 else
613 return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
614}
615
487static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, 616static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
488 gpa_t addr, int len, void *val) 617 gpa_t addr, int len, void *val)
489{ 618{
@@ -491,9 +620,8 @@ static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
491 const struct vgic_register_region *region; 620 const struct vgic_register_region *region;
492 unsigned long data = 0; 621 unsigned long data = 0;
493 622
494 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, 623 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
495 addr - iodev->base_addr); 624 if (!region) {
496 if (!region || !check_region(vcpu->kvm, region, addr, len)) {
497 memset(val, 0, len); 625 memset(val, 0, len);
498 return 0; 626 return 0;
499 } 627 }
@@ -524,9 +652,8 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
524 const struct vgic_register_region *region; 652 const struct vgic_register_region *region;
525 unsigned long data = vgic_data_mmio_bus_to_host(val, len); 653 unsigned long data = vgic_data_mmio_bus_to_host(val, len);
526 654
527 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, 655 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
528 addr - iodev->base_addr); 656 if (!region)
529 if (!region || !check_region(vcpu->kvm, region, addr, len))
530 return 0; 657 return 0;
531 658
532 switch (iodev->iodev_type) { 659 switch (iodev->iodev_type) {
diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h
index 84961b4e4422..98bb566b660a 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.h
+++ b/virt/kvm/arm/vgic/vgic-mmio.h
@@ -34,6 +34,10 @@ struct vgic_register_region {
34 gpa_t addr, unsigned int len, 34 gpa_t addr, unsigned int len,
35 unsigned long val); 35 unsigned long val);
36 }; 36 };
37 unsigned long (*uaccess_read)(struct kvm_vcpu *vcpu, gpa_t addr,
38 unsigned int len);
39 void (*uaccess_write)(struct kvm_vcpu *vcpu, gpa_t addr,
40 unsigned int len, unsigned long val);
37}; 41};
38 42
39extern struct kvm_io_device_ops kvm_io_gic_ops; 43extern struct kvm_io_device_ops kvm_io_gic_ops;
@@ -86,6 +90,18 @@ extern struct kvm_io_device_ops kvm_io_gic_ops;
86 .write = wr, \ 90 .write = wr, \
87 } 91 }
88 92
93#define REGISTER_DESC_WITH_LENGTH_UACCESS(off, rd, wr, urd, uwr, length, acc) \
94 { \
95 .reg_offset = off, \
96 .bits_per_irq = 0, \
97 .len = length, \
98 .access_flags = acc, \
99 .read = rd, \
100 .write = wr, \
101 .uaccess_read = urd, \
102 .uaccess_write = uwr, \
103 }
104
89int kvm_vgic_register_mmio_region(struct kvm *kvm, struct kvm_vcpu *vcpu, 105int kvm_vgic_register_mmio_region(struct kvm *kvm, struct kvm_vcpu *vcpu,
90 struct vgic_register_region *reg_desc, 106 struct vgic_register_region *reg_desc,
91 struct vgic_io_device *region, 107 struct vgic_io_device *region,
@@ -158,6 +174,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
158 gpa_t addr, unsigned int len, 174 gpa_t addr, unsigned int len,
159 unsigned long val); 175 unsigned long val);
160 176
177int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
178 bool is_write, int offset, u32 *val);
179
180u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid);
181
182void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
183 const u64 val);
184
161unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev); 185unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
162 186
163unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev); 187unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev);
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 9bab86757fa4..b834ecdf3225 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -104,7 +104,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
104 /* Edge is the only case where we preserve the pending bit */ 104 /* Edge is the only case where we preserve the pending bit */
105 if (irq->config == VGIC_CONFIG_EDGE && 105 if (irq->config == VGIC_CONFIG_EDGE &&
106 (val & GICH_LR_PENDING_BIT)) { 106 (val & GICH_LR_PENDING_BIT)) {
107 irq->pending = true; 107 irq->pending_latch = true;
108 108
109 if (vgic_irq_is_sgi(intid)) { 109 if (vgic_irq_is_sgi(intid)) {
110 u32 cpuid = val & GICH_LR_PHYSID_CPUID; 110 u32 cpuid = val & GICH_LR_PHYSID_CPUID;
@@ -120,9 +120,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
120 */ 120 */
121 if (irq->config == VGIC_CONFIG_LEVEL) { 121 if (irq->config == VGIC_CONFIG_LEVEL) {
122 if (!(val & GICH_LR_PENDING_BIT)) 122 if (!(val & GICH_LR_PENDING_BIT))
123 irq->soft_pending = false; 123 irq->pending_latch = false;
124
125 irq->pending = irq->line_level || irq->soft_pending;
126 } 124 }
127 125
128 spin_unlock(&irq->irq_lock); 126 spin_unlock(&irq->irq_lock);
@@ -145,11 +143,11 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
145{ 143{
146 u32 val = irq->intid; 144 u32 val = irq->intid;
147 145
148 if (irq->pending) { 146 if (irq_is_pending(irq)) {
149 val |= GICH_LR_PENDING_BIT; 147 val |= GICH_LR_PENDING_BIT;
150 148
151 if (irq->config == VGIC_CONFIG_EDGE) 149 if (irq->config == VGIC_CONFIG_EDGE)
152 irq->pending = false; 150 irq->pending_latch = false;
153 151
154 if (vgic_irq_is_sgi(irq->intid)) { 152 if (vgic_irq_is_sgi(irq->intid)) {
155 u32 src = ffs(irq->source); 153 u32 src = ffs(irq->source);
@@ -158,7 +156,7 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
158 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; 156 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
159 irq->source &= ~(1 << (src - 1)); 157 irq->source &= ~(1 << (src - 1));
160 if (irq->source) 158 if (irq->source)
161 irq->pending = true; 159 irq->pending_latch = true;
162 } 160 }
163 } 161 }
164 162
@@ -293,8 +291,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
293 dist->ready = true; 291 dist->ready = true;
294 292
295out: 293out:
296 if (ret)
297 kvm_vgic_destroy(kvm);
298 return ret; 294 return ret;
299} 295}
300 296
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 5c9f9745e6ca..edc6ee2dc852 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -94,7 +94,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
94 /* Edge is the only case where we preserve the pending bit */ 94 /* Edge is the only case where we preserve the pending bit */
95 if (irq->config == VGIC_CONFIG_EDGE && 95 if (irq->config == VGIC_CONFIG_EDGE &&
96 (val & ICH_LR_PENDING_BIT)) { 96 (val & ICH_LR_PENDING_BIT)) {
97 irq->pending = true; 97 irq->pending_latch = true;
98 98
99 if (vgic_irq_is_sgi(intid) && 99 if (vgic_irq_is_sgi(intid) &&
100 model == KVM_DEV_TYPE_ARM_VGIC_V2) { 100 model == KVM_DEV_TYPE_ARM_VGIC_V2) {
@@ -111,9 +111,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
111 */ 111 */
112 if (irq->config == VGIC_CONFIG_LEVEL) { 112 if (irq->config == VGIC_CONFIG_LEVEL) {
113 if (!(val & ICH_LR_PENDING_BIT)) 113 if (!(val & ICH_LR_PENDING_BIT))
114 irq->soft_pending = false; 114 irq->pending_latch = false;
115
116 irq->pending = irq->line_level || irq->soft_pending;
117 } 115 }
118 116
119 spin_unlock(&irq->irq_lock); 117 spin_unlock(&irq->irq_lock);
@@ -127,11 +125,11 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
127 u32 model = vcpu->kvm->arch.vgic.vgic_model; 125 u32 model = vcpu->kvm->arch.vgic.vgic_model;
128 u64 val = irq->intid; 126 u64 val = irq->intid;
129 127
130 if (irq->pending) { 128 if (irq_is_pending(irq)) {
131 val |= ICH_LR_PENDING_BIT; 129 val |= ICH_LR_PENDING_BIT;
132 130
133 if (irq->config == VGIC_CONFIG_EDGE) 131 if (irq->config == VGIC_CONFIG_EDGE)
134 irq->pending = false; 132 irq->pending_latch = false;
135 133
136 if (vgic_irq_is_sgi(irq->intid) && 134 if (vgic_irq_is_sgi(irq->intid) &&
137 model == KVM_DEV_TYPE_ARM_VGIC_V2) { 135 model == KVM_DEV_TYPE_ARM_VGIC_V2) {
@@ -141,7 +139,7 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
141 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; 139 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
142 irq->source &= ~(1 << (src - 1)); 140 irq->source &= ~(1 << (src - 1));
143 if (irq->source) 141 if (irq->source)
144 irq->pending = true; 142 irq->pending_latch = true;
145 } 143 }
146 } 144 }
147 145
@@ -177,10 +175,18 @@ void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
177{ 175{
178 u32 vmcr; 176 u32 vmcr;
179 177
180 vmcr = (vmcrp->ctlr << ICH_VMCR_CTLR_SHIFT) & ICH_VMCR_CTLR_MASK; 178 /*
179 * Ignore the FIQen bit, because GIC emulation always implies
180 * SRE=1 which means the vFIQEn bit is also RES1.
181 */
182 vmcr = ((vmcrp->ctlr >> ICC_CTLR_EL1_EOImode_SHIFT) <<
183 ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
184 vmcr |= (vmcrp->ctlr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
181 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK; 185 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
182 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK; 186 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
183 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK; 187 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
188 vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
189 vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
184 190
185 vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = vmcr; 191 vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = vmcr;
186} 192}
@@ -189,10 +195,18 @@ void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
189{ 195{
190 u32 vmcr = vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr; 196 u32 vmcr = vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr;
191 197
192 vmcrp->ctlr = (vmcr & ICH_VMCR_CTLR_MASK) >> ICH_VMCR_CTLR_SHIFT; 198 /*
199 * Ignore the FIQen bit, because GIC emulation always implies
200 * SRE=1 which means the vFIQEn bit is also RES1.
201 */
202 vmcrp->ctlr = ((vmcr >> ICH_VMCR_EOIM_SHIFT) <<
203 ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
204 vmcrp->ctlr |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
193 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; 205 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
194 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; 206 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
195 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; 207 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
208 vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT;
209 vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT;
196} 210}
197 211
198#define INITIAL_PENDBASER_VALUE \ 212#define INITIAL_PENDBASER_VALUE \
@@ -224,6 +238,13 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu)
224 vgic_v3->vgic_sre = 0; 238 vgic_v3->vgic_sre = 0;
225 } 239 }
226 240
241 vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 &
242 ICH_VTR_ID_BITS_MASK) >>
243 ICH_VTR_ID_BITS_SHIFT;
244 vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 &
245 ICH_VTR_PRI_BITS_MASK) >>
246 ICH_VTR_PRI_BITS_SHIFT) + 1;
247
227 /* Get the show on the road... */ 248 /* Get the show on the road... */
228 vgic_v3->vgic_hcr = ICH_HCR_EN; 249 vgic_v3->vgic_hcr = ICH_HCR_EN;
229} 250}
@@ -302,8 +323,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
302 dist->ready = true; 323 dist->ready = true;
303 324
304out: 325out:
305 if (ret)
306 kvm_vgic_destroy(kvm);
307 return ret; 326 return ret;
308} 327}
309 328
@@ -324,6 +343,7 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
324 */ 343 */
325 kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1; 344 kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
326 kvm_vgic_global_state.can_emulate_gicv2 = false; 345 kvm_vgic_global_state.can_emulate_gicv2 = false;
346 kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
327 347
328 if (!info->vcpu.start) { 348 if (!info->vcpu.start) {
329 kvm_info("GICv3: no GICV resource entry\n"); 349 kvm_info("GICv3: no GICV resource entry\n");
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 6440b56ec90e..654dfd40e449 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -160,7 +160,7 @@ static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
160 * If the distributor is disabled, pending interrupts shouldn't be 160 * If the distributor is disabled, pending interrupts shouldn't be
161 * forwarded. 161 * forwarded.
162 */ 162 */
163 if (irq->enabled && irq->pending) { 163 if (irq->enabled && irq_is_pending(irq)) {
164 if (unlikely(irq->target_vcpu && 164 if (unlikely(irq->target_vcpu &&
165 !irq->target_vcpu->kvm->arch.vgic.enabled)) 165 !irq->target_vcpu->kvm->arch.vgic.enabled))
166 return NULL; 166 return NULL;
@@ -204,8 +204,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
204 goto out; 204 goto out;
205 } 205 }
206 206
207 penda = irqa->enabled && irqa->pending; 207 penda = irqa->enabled && irq_is_pending(irqa);
208 pendb = irqb->enabled && irqb->pending; 208 pendb = irqb->enabled && irq_is_pending(irqb);
209 209
210 if (!penda || !pendb) { 210 if (!penda || !pendb) {
211 ret = (int)pendb - (int)penda; 211 ret = (int)pendb - (int)penda;
@@ -335,9 +335,22 @@ retry:
335 return true; 335 return true;
336} 336}
337 337
338static int vgic_update_irq_pending(struct kvm *kvm, int cpuid, 338/**
339 unsigned int intid, bool level, 339 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
340 bool mapped_irq) 340 * @kvm: The VM structure pointer
341 * @cpuid: The CPU for PPIs
342 * @intid: The INTID to inject a new state to.
343 * @level: Edge-triggered: true: to trigger the interrupt
344 * false: to ignore the call
345 * Level-sensitive true: raise the input signal
346 * false: lower the input signal
347 *
348 * The VGIC is not concerned with devices being active-LOW or active-HIGH for
349 * level-sensitive interrupts. You can think of the level parameter as 1
350 * being HIGH and 0 being LOW and all devices being active-HIGH.
351 */
352int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
353 bool level)
341{ 354{
342 struct kvm_vcpu *vcpu; 355 struct kvm_vcpu *vcpu;
343 struct vgic_irq *irq; 356 struct vgic_irq *irq;
@@ -357,11 +370,6 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
357 if (!irq) 370 if (!irq)
358 return -EINVAL; 371 return -EINVAL;
359 372
360 if (irq->hw != mapped_irq) {
361 vgic_put_irq(kvm, irq);
362 return -EINVAL;
363 }
364
365 spin_lock(&irq->irq_lock); 373 spin_lock(&irq->irq_lock);
366 374
367 if (!vgic_validate_injection(irq, level)) { 375 if (!vgic_validate_injection(irq, level)) {
@@ -371,12 +379,10 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
371 return 0; 379 return 0;
372 } 380 }
373 381
374 if (irq->config == VGIC_CONFIG_LEVEL) { 382 if (irq->config == VGIC_CONFIG_LEVEL)
375 irq->line_level = level; 383 irq->line_level = level;
376 irq->pending = level || irq->soft_pending; 384 else
377 } else { 385 irq->pending_latch = true;
378 irq->pending = true;
379 }
380 386
381 vgic_queue_irq_unlock(kvm, irq); 387 vgic_queue_irq_unlock(kvm, irq);
382 vgic_put_irq(kvm, irq); 388 vgic_put_irq(kvm, irq);
@@ -384,32 +390,6 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
384 return 0; 390 return 0;
385} 391}
386 392
387/**
388 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
389 * @kvm: The VM structure pointer
390 * @cpuid: The CPU for PPIs
391 * @intid: The INTID to inject a new state to.
392 * @level: Edge-triggered: true: to trigger the interrupt
393 * false: to ignore the call
394 * Level-sensitive true: raise the input signal
395 * false: lower the input signal
396 *
397 * The VGIC is not concerned with devices being active-LOW or active-HIGH for
398 * level-sensitive interrupts. You can think of the level parameter as 1
399 * being HIGH and 0 being LOW and all devices being active-HIGH.
400 */
401int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
402 bool level)
403{
404 return vgic_update_irq_pending(kvm, cpuid, intid, level, false);
405}
406
407int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, unsigned int intid,
408 bool level)
409{
410 return vgic_update_irq_pending(kvm, cpuid, intid, level, true);
411}
412
413int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq) 393int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
414{ 394{
415 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); 395 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
@@ -689,7 +669,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
689 669
690 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 670 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
691 spin_lock(&irq->irq_lock); 671 spin_lock(&irq->irq_lock);
692 pending = irq->pending && irq->enabled; 672 pending = irq_is_pending(irq) && irq->enabled;
693 spin_unlock(&irq->irq_lock); 673 spin_unlock(&irq->irq_lock);
694 674
695 if (pending) 675 if (pending)
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 859f65c6e056..db28f7cadab2 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -30,13 +30,79 @@
30 30
31#define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS) 31#define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS)
32 32
33#define VGIC_AFFINITY_0_SHIFT 0
34#define VGIC_AFFINITY_0_MASK (0xffUL << VGIC_AFFINITY_0_SHIFT)
35#define VGIC_AFFINITY_1_SHIFT 8
36#define VGIC_AFFINITY_1_MASK (0xffUL << VGIC_AFFINITY_1_SHIFT)
37#define VGIC_AFFINITY_2_SHIFT 16
38#define VGIC_AFFINITY_2_MASK (0xffUL << VGIC_AFFINITY_2_SHIFT)
39#define VGIC_AFFINITY_3_SHIFT 24
40#define VGIC_AFFINITY_3_MASK (0xffUL << VGIC_AFFINITY_3_SHIFT)
41
42#define VGIC_AFFINITY_LEVEL(reg, level) \
43 ((((reg) & VGIC_AFFINITY_## level ##_MASK) \
44 >> VGIC_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
45
46/*
47 * The Userspace encodes the affinity differently from the MPIDR,
48 * Below macro converts vgic userspace format to MPIDR reg format.
49 */
50#define VGIC_TO_MPIDR(val) (VGIC_AFFINITY_LEVEL(val, 0) | \
51 VGIC_AFFINITY_LEVEL(val, 1) | \
52 VGIC_AFFINITY_LEVEL(val, 2) | \
53 VGIC_AFFINITY_LEVEL(val, 3))
54
55/*
56 * As per Documentation/virtual/kvm/devices/arm-vgic-v3.txt,
57 * below macros are defined for CPUREG encoding.
58 */
59#define KVM_REG_ARM_VGIC_SYSREG_OP0_MASK 0x000000000000c000
60#define KVM_REG_ARM_VGIC_SYSREG_OP0_SHIFT 14
61#define KVM_REG_ARM_VGIC_SYSREG_OP1_MASK 0x0000000000003800
62#define KVM_REG_ARM_VGIC_SYSREG_OP1_SHIFT 11
63#define KVM_REG_ARM_VGIC_SYSREG_CRN_MASK 0x0000000000000780
64#define KVM_REG_ARM_VGIC_SYSREG_CRN_SHIFT 7
65#define KVM_REG_ARM_VGIC_SYSREG_CRM_MASK 0x0000000000000078
66#define KVM_REG_ARM_VGIC_SYSREG_CRM_SHIFT 3
67#define KVM_REG_ARM_VGIC_SYSREG_OP2_MASK 0x0000000000000007
68#define KVM_REG_ARM_VGIC_SYSREG_OP2_SHIFT 0
69
70#define KVM_DEV_ARM_VGIC_SYSREG_MASK (KVM_REG_ARM_VGIC_SYSREG_OP0_MASK | \
71 KVM_REG_ARM_VGIC_SYSREG_OP1_MASK | \
72 KVM_REG_ARM_VGIC_SYSREG_CRN_MASK | \
73 KVM_REG_ARM_VGIC_SYSREG_CRM_MASK | \
74 KVM_REG_ARM_VGIC_SYSREG_OP2_MASK)
75
76static inline bool irq_is_pending(struct vgic_irq *irq)
77{
78 if (irq->config == VGIC_CONFIG_EDGE)
79 return irq->pending_latch;
80 else
81 return irq->pending_latch || irq->line_level;
82}
83
33struct vgic_vmcr { 84struct vgic_vmcr {
34 u32 ctlr; 85 u32 ctlr;
35 u32 abpr; 86 u32 abpr;
36 u32 bpr; 87 u32 bpr;
37 u32 pmr; 88 u32 pmr;
89 /* Below member variable are valid only for GICv3 */
90 u32 grpen0;
91 u32 grpen1;
92};
93
94struct vgic_reg_attr {
95 struct kvm_vcpu *vcpu;
96 gpa_t addr;
38}; 97};
39 98
99int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
100 struct vgic_reg_attr *reg_attr);
101int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
102 struct vgic_reg_attr *reg_attr);
103const struct vgic_register_region *
104vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
105 gpa_t addr, int len);
40struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, 106struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
41 u32 intid); 107 u32 intid);
42void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq); 108void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq);
@@ -89,9 +155,24 @@ bool vgic_has_its(struct kvm *kvm);
89int kvm_vgic_register_its_device(void); 155int kvm_vgic_register_its_device(void);
90void vgic_enable_lpis(struct kvm_vcpu *vcpu); 156void vgic_enable_lpis(struct kvm_vcpu *vcpu);
91int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi); 157int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
92 158int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
159int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
160 int offset, u32 *val);
161int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
162 int offset, u32 *val);
163int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write,
164 u64 id, u64 *val);
165int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
166 u64 *reg);
167int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
168 u32 intid, u64 *val);
93int kvm_register_vgic_device(unsigned long type); 169int kvm_register_vgic_device(unsigned long type);
170void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
171void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
94int vgic_lazy_init(struct kvm *kvm); 172int vgic_lazy_init(struct kvm *kvm);
95int vgic_init(struct kvm *kvm); 173int vgic_init(struct kvm *kvm);
96 174
175int vgic_debug_init(struct kvm *kvm);
176int vgic_debug_destroy(struct kvm *kvm);
177
97#endif 178#endif