aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap4
-rw-r--r--Documentation/devicetree/bindings/arc/archs-pct.txt2
-rw-r--r--Documentation/devicetree/bindings/arc/pct.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-rk3x.txt4
-rw-r--r--Documentation/devicetree/bindings/net/cpsw.txt6
-rw-r--r--Documentation/networking/altera_tse.txt6
-rw-r--r--Documentation/networking/ipvlan.txt6
-rw-r--r--Documentation/networking/pktgen.txt6
-rw-r--r--Documentation/networking/vrf.txt2
-rw-r--r--Documentation/networking/xfrm_sync.txt6
-rw-r--r--Documentation/sysctl/vm.txt19
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile4
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h36
-rw-r--r--arch/arc/kernel/entry-arcv2.S10
-rw-r--r--arch/arc/kernel/entry-compact.S3
-rw-r--r--arch/arc/mm/init.c4
-rw-r--r--arch/nios2/lib/memset.c2
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h2
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h28
-rw-r--r--arch/s390/include/asm/pgalloc.h4
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/tlbflush.h9
-rw-r--r--arch/s390/mm/init.c3
-rw-r--r--arch/s390/mm/mmap.c6
-rw-r--r--arch/s390/mm/pgalloc.c85
-rw-r--r--arch/s390/pci/pci_dma.c16
-rw-r--r--arch/sparc/configs/sparc32_defconfig1
-rw-r--r--arch/sparc/configs/sparc64_defconfig1
-rw-r--r--arch/sparc/include/asm/spitfire.h1
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h4
-rw-r--r--arch/sparc/kernel/cherrs.S14
-rw-r--r--arch/sparc/kernel/cpu.c6
-rw-r--r--arch/sparc/kernel/cpumap.c1
-rw-r--r--arch/sparc/kernel/fpu_traps.S11
-rw-r--r--arch/sparc/kernel/head_64.S32
-rw-r--r--arch/sparc/kernel/misctrap.S12
-rw-r--r--arch/sparc/kernel/pci.c42
-rw-r--r--arch/sparc/kernel/setup_64.c7
-rw-r--r--arch/sparc/kernel/spiterrs.S18
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/sparc/kernel/utrap.S3
-rw-r--r--arch/sparc/kernel/vio.c18
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S4
-rw-r--r--arch/sparc/kernel/winfixup.S3
-rw-r--r--arch/sparc/mm/init_64.c3
-rw-r--r--arch/x86/events/amd/core.c2
-rw-r--r--arch/x86/events/intel/core.c1
-rw-r--r--arch/x86/events/intel/lbr.c6
-rw-r--r--arch/x86/events/intel/pt.c75
-rw-r--r--arch/x86/events/intel/pt.h3
-rw-r--r--arch/x86/events/intel/rapl.c1
-rw-r--r--arch/x86/include/asm/perf_event.h4
-rw-r--r--arch/x86/kernel/apic/vector.c3
-rw-r--r--arch/x86/kernel/head_32.S6
-rw-r--r--arch/x86/kvm/vmx.c4
-rw-r--r--arch/x86/mm/setup_nx.c5
-rw-r--r--arch/x86/xen/spinlock.c6
-rw-r--r--drivers/block/rbd.c52
-rw-r--r--drivers/clk/imx/clk-imx6q.c2
-rw-r--r--drivers/cpufreq/cpufreq_governor.c8
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/firmware/efi/vars.c37
-rw-r--r--drivers/gpio/gpio-rcar.c65
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c5
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c20
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c31
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c154
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h46
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c17
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c6
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_wac.c6
-rw-r--r--drivers/i2c/busses/Kconfig4
-rw-r--r--drivers/i2c/busses/i2c-cpm.c4
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c24
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c1
-rw-r--r--drivers/infiniband/core/cache.c3
-rw-r--r--drivers/infiniband/core/ucm.c4
-rw-r--r--drivers/infiniband/core/ucma.c3
-rw-r--r--drivers/infiniband/core/uverbs_main.c5
-rw-r--r--drivers/infiniband/core/verbs.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c24
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c4
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c20
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c20
-rw-r--r--drivers/misc/cxl/context.c7
-rw-r--r--drivers/misc/cxl/cxl.h2
-rw-r--r--drivers/misc/cxl/irq.c1
-rw-r--r--drivers/misc/cxl/native.c31
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/sdhci-acpi.c81
-rw-r--r--drivers/mmc/host/sunxi-mmc.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c53
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h13
-rw-r--r--drivers/net/ethernet/cadence/macb.c34
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c3
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c6
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.h11
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c15
-rw-r--r--drivers/net/ethernet/ti/cpsw.c67
-rw-r--r--drivers/net/ethernet/ti/cpsw.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c5
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c2
-rw-r--r--drivers/net/phy/at803x.c40
-rw-r--r--drivers/net/usb/lan78xx.c44
-rw-r--r--drivers/net/usb/pegasus.c10
-rw-r--r--drivers/net/usb/smsc75xx.c12
-rw-r--r--drivers/net/usb/smsc95xx.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c10
-rw-r--r--drivers/platform/x86/toshiba_acpi.c2
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c4
-rw-r--r--drivers/s390/char/sclp_ctl.c12
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c54
-rw-r--r--drivers/staging/rdma/hfi1/TODO2
-rw-r--r--drivers/staging/rdma/hfi1/file_ops.c91
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.c40
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.h3
-rw-r--r--drivers/staging/rdma/hfi1/qp.c2
-rw-r--r--drivers/staging/rdma/hfi1/user_exp_rcv.c11
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.c33
-rw-r--r--drivers/thermal/hisi_thermal.c4
-rw-r--r--drivers/thermal/thermal_core.c2
-rw-r--r--fs/ceph/mds_client.c6
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c2
-rw-r--r--fs/proc/task_mmu.c33
-rw-r--r--fs/udf/super.c4
-rw-r--r--fs/udf/udfdecl.h2
-rw-r--r--fs/udf/unicode.c16
-rw-r--r--include/linux/bpf.h3
-rw-r--r--include/linux/ceph/auth.h10
-rw-r--r--include/linux/ceph/osd_client.h1
-rw-r--r--include/linux/cgroup-defs.h1
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/hash.h20
-rw-r--r--include/linux/huge_mm.h5
-rw-r--r--include/linux/if_ether.h5
-rw-r--r--include/linux/lockdep.h8
-rw-r--r--include/linux/mlx5/device.h11
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/net.h10
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/media/videobuf2-core.h8
-rw-r--r--include/net/vxlan.h4
-rw-r--r--include/rdma/ib.h16
-rw-r--r--include/sound/hda_i915.h5
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h30
-rw-r--r--kernel/bpf/inode.c7
-rw-r--r--kernel/bpf/syscall.c24
-rw-r--r--kernel/bpf/verifier.c76
-rw-r--r--kernel/cgroup.c7
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/events/core.c55
-rw-r--r--kernel/kcov.c3
-rw-r--r--kernel/kexec_core.c7
-rw-r--r--kernel/locking/lockdep.c37
-rw-r--r--kernel/locking/lockdep_proc.c2
-rw-r--r--kernel/workqueue.c29
-rw-r--r--lib/stackdepot.c4
-rw-r--r--mm/huge_memory.c12
-rw-r--r--mm/memcontrol.c37
-rw-r--r--mm/memory-failure.c10
-rw-r--r--mm/memory.c40
-rw-r--r--mm/migrate.c8
-rw-r--r--mm/page_io.c6
-rw-r--r--mm/swap.c5
-rw-r--r--mm/vmscan.c30
-rw-r--r--net/batman-adv/bat_v.c12
-rw-r--r--net/batman-adv/distributed-arp-table.c17
-rw-r--r--net/batman-adv/hard-interface.c6
-rw-r--r--net/batman-adv/originator.c17
-rw-r--r--net/batman-adv/routing.c9
-rw-r--r--net/batman-adv/send.c6
-rw-r--r--net/batman-adv/soft-interface.c8
-rw-r--r--net/batman-adv/translation-table.c42
-rw-r--r--net/batman-adv/types.h7
-rw-r--r--net/ceph/auth.c8
-rw-r--r--net/ceph/auth_none.c71
-rw-r--r--net/ceph/auth_none.h3
-rw-r--r--net/ceph/auth_x.c21
-rw-r--r--net/ceph/auth_x.h1
-rw-r--r--net/ceph/osd_client.c6
-rw-r--r--net/core/dev.c2
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/ip_gre.c19
-rw-r--r--net/ipv4/ip_tunnel.c4
-rw-r--r--net/ipv6/ila/ila_lwt.c3
-rw-r--r--net/l2tp/l2tp_core.c4
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/rds/tcp.c3
-rw-r--r--net/rds/tcp.h4
-rw-r--r--net/rds/tcp_connect.c8
-rw-r--r--net/rds/tcp_listen.c54
-rw-r--r--net/sched/sch_netem.c61
-rw-r--r--net/tipc/node.c5
-rw-r--r--samples/bpf/trace_output_kern.c1
-rw-r--r--sound/hda/ext/hdac_ext_stream.c5
-rw-r--r--sound/hda/hdac_i915.c62
-rw-r--r--sound/pci/hda/hda_intel.c56
-rw-r--r--sound/pci/hda/patch_hdmi.c1
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/soc/codecs/Kconfig1
-rw-r--r--sound/soc/codecs/arizona.c12
-rw-r--r--sound/soc/codecs/arizona.h2
-rw-r--r--sound/soc/codecs/cs35l32.c17
-rw-r--r--sound/soc/codecs/cs47l24.c3
-rw-r--r--sound/soc/codecs/hdac_hdmi.c94
-rw-r--r--sound/soc/codecs/nau8825.c126
-rw-r--r--sound/soc/codecs/rt5640.c2
-rw-r--r--sound/soc/codecs/rt5640.h36
-rw-r--r--sound/soc/codecs/wm5102.c5
-rw-r--r--sound/soc/codecs/wm5110.c2
-rw-r--r--sound/soc/codecs/wm8962.c2
-rw-r--r--sound/soc/codecs/wm8997.c2
-rw-r--r--sound/soc/codecs/wm8998.c2
-rw-r--r--sound/soc/intel/Kconfig1
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c2
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.c5
-rw-r--r--sound/soc/intel/skylake/skl-topology.c42
-rw-r--r--sound/soc/intel/skylake/skl-topology.h8
-rw-r--r--sound/soc/intel/skylake/skl.c32
-rw-r--r--sound/soc/soc-dapm.c7
263 files changed, 2446 insertions, 1230 deletions
diff --git a/.mailmap b/.mailmap
index 90c0aefc276d..c156a8b4d845 100644
--- a/.mailmap
+++ b/.mailmap
@@ -48,6 +48,9 @@ Felix Kuhling <fxkuehl@gmx.de>
48Felix Moeller <felix@derklecks.de> 48Felix Moeller <felix@derklecks.de>
49Filipe Lautert <filipe@icewall.org> 49Filipe Lautert <filipe@icewall.org>
50Franck Bui-Huu <vagabon.xyz@gmail.com> 50Franck Bui-Huu <vagabon.xyz@gmail.com>
51Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
52Frank Rowand <frowand.list@gmail.com> <frank.rowand@am.sony.com>
53Frank Rowand <frowand.list@gmail.com> <frank.rowand@sonymobile.com>
51Frank Zago <fzago@systemfabricworks.com> 54Frank Zago <fzago@systemfabricworks.com>
52Greg Kroah-Hartman <greg@echidna.(none)> 55Greg Kroah-Hartman <greg@echidna.(none)>
53Greg Kroah-Hartman <gregkh@suse.de> 56Greg Kroah-Hartman <gregkh@suse.de>
@@ -79,6 +82,7 @@ Kay Sievers <kay.sievers@vrfy.org>
79Kenneth W Chen <kenneth.w.chen@intel.com> 82Kenneth W Chen <kenneth.w.chen@intel.com>
80Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com> 83Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
81Koushik <raghavendra.koushik@neterion.com> 84Koushik <raghavendra.koushik@neterion.com>
85Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
82Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 86Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
83Leonid I Ananiev <leonid.i.ananiev@intel.com> 87Leonid I Ananiev <leonid.i.ananiev@intel.com>
84Linas Vepstas <linas@austin.ibm.com> 88Linas Vepstas <linas@austin.ibm.com>
diff --git a/Documentation/devicetree/bindings/arc/archs-pct.txt b/Documentation/devicetree/bindings/arc/archs-pct.txt
index 1ae98b87c640..e4b9dcee6d41 100644
--- a/Documentation/devicetree/bindings/arc/archs-pct.txt
+++ b/Documentation/devicetree/bindings/arc/archs-pct.txt
@@ -2,7 +2,7 @@
2 2
3The ARC HS can be configured with a pipeline performance monitor for counting 3The ARC HS can be configured with a pipeline performance monitor for counting
4CPU and cache events like cache misses and hits. Like conventional PCT there 4CPU and cache events like cache misses and hits. Like conventional PCT there
5are 100+ hardware conditions dynamically mapped to upto 32 counters. 5are 100+ hardware conditions dynamically mapped to up to 32 counters.
6It also supports overflow interrupts. 6It also supports overflow interrupts.
7 7
8Required properties: 8Required properties:
diff --git a/Documentation/devicetree/bindings/arc/pct.txt b/Documentation/devicetree/bindings/arc/pct.txt
index 7b9588444f20..4e874d9a38a6 100644
--- a/Documentation/devicetree/bindings/arc/pct.txt
+++ b/Documentation/devicetree/bindings/arc/pct.txt
@@ -2,7 +2,7 @@
2 2
3The ARC700 can be configured with a pipeline performance monitor for counting 3The ARC700 can be configured with a pipeline performance monitor for counting
4CPU and cache events like cache misses and hits. Like conventional PCT there 4CPU and cache events like cache misses and hits. Like conventional PCT there
5are 100+ hardware conditions dynamically mapped to upto 32 counters 5are 100+ hardware conditions dynamically mapped to up to 32 counters
6 6
7Note that: 7Note that:
8 * The ARC 700 PCT does not support interrupts; although HW events may be 8 * The ARC 700 PCT does not support interrupts; although HW events may be
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
index f0d71bc52e64..0b4a85fe2d86 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
@@ -6,8 +6,8 @@ RK3xxx SoCs.
6Required properties : 6Required properties :
7 7
8 - reg : Offset and length of the register set for the device 8 - reg : Offset and length of the register set for the device
9 - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c" or 9 - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c",
10 "rockchip,rk3288-i2c". 10 "rockchip,rk3228-i2c" or "rockchip,rk3288-i2c".
11 - interrupts : interrupt number 11 - interrupts : interrupt number
12 - clocks : parent clock 12 - clocks : parent clock
13 13
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index 28a4781ab6d7..0ae06491b430 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -45,13 +45,13 @@ Required properties:
45Optional properties: 45Optional properties:
46- dual_emac_res_vlan : Specifies VID to be used to segregate the ports 46- dual_emac_res_vlan : Specifies VID to be used to segregate the ports
47- mac-address : See ethernet.txt file in the same directory 47- mac-address : See ethernet.txt file in the same directory
48- phy_id : Specifies slave phy id 48- phy_id : Specifies slave phy id (deprecated, use phy-handle)
49- phy-handle : See ethernet.txt file in the same directory 49- phy-handle : See ethernet.txt file in the same directory
50 50
51Slave sub-nodes: 51Slave sub-nodes:
52- fixed-link : See fixed-link.txt file in the same directory 52- fixed-link : See fixed-link.txt file in the same directory
53 Either the property phy_id, or the sub-node 53
54 fixed-link can be specified 54Note: Exactly one of phy_id, phy-handle, or fixed-link must be specified.
55 55
56Note: "ti,hwmods" field is used to fetch the base address and irq 56Note: "ti,hwmods" field is used to fetch the base address and irq
57resources from TI, omap hwmod data base during device registration. 57resources from TI, omap hwmod data base during device registration.
diff --git a/Documentation/networking/altera_tse.txt b/Documentation/networking/altera_tse.txt
index 3f24df8c6e65..50b8589d12fd 100644
--- a/Documentation/networking/altera_tse.txt
+++ b/Documentation/networking/altera_tse.txt
@@ -6,7 +6,7 @@ This is the driver for the Altera Triple-Speed Ethernet (TSE) controllers
6using the SGDMA and MSGDMA soft DMA IP components. The driver uses the 6using the SGDMA and MSGDMA soft DMA IP components. The driver uses the
7platform bus to obtain component resources. The designs used to test this 7platform bus to obtain component resources. The designs used to test this
8driver were built for a Cyclone(R) V SOC FPGA board, a Cyclone(R) V FPGA board, 8driver were built for a Cyclone(R) V SOC FPGA board, a Cyclone(R) V FPGA board,
9and tested with ARM and NIOS processor hosts seperately. The anticipated use 9and tested with ARM and NIOS processor hosts separately. The anticipated use
10cases are simple communications between an embedded system and an external peer 10cases are simple communications between an embedded system and an external peer
11for status and simple configuration of the embedded system. 11for status and simple configuration of the embedded system.
12 12
@@ -65,14 +65,14 @@ Driver parameters can be also passed in command line by using:
654.1) Transmit process 654.1) Transmit process
66When the driver's transmit routine is called by the kernel, it sets up a 66When the driver's transmit routine is called by the kernel, it sets up a
67transmit descriptor by calling the underlying DMA transmit routine (SGDMA or 67transmit descriptor by calling the underlying DMA transmit routine (SGDMA or
68MSGDMA), and initites a transmit operation. Once the transmit is complete, an 68MSGDMA), and initiates a transmit operation. Once the transmit is complete, an
69interrupt is driven by the transmit DMA logic. The driver handles the transmit 69interrupt is driven by the transmit DMA logic. The driver handles the transmit
70completion in the context of the interrupt handling chain by recycling 70completion in the context of the interrupt handling chain by recycling
71resource required to send and track the requested transmit operation. 71resource required to send and track the requested transmit operation.
72 72
734.2) Receive process 734.2) Receive process
74The driver will post receive buffers to the receive DMA logic during driver 74The driver will post receive buffers to the receive DMA logic during driver
75intialization. Receive buffers may or may not be queued depending upon the 75initialization. Receive buffers may or may not be queued depending upon the
76underlying DMA logic (MSGDMA is able queue receive buffers, SGDMA is not able 76underlying DMA logic (MSGDMA is able queue receive buffers, SGDMA is not able
77to queue receive buffers to the SGDMA receive logic). When a packet is 77to queue receive buffers to the SGDMA receive logic). When a packet is
78received, the DMA logic generates an interrupt. The driver handles a receive 78received, the DMA logic generates an interrupt. The driver handles a receive
diff --git a/Documentation/networking/ipvlan.txt b/Documentation/networking/ipvlan.txt
index cf996394e466..14422f8fcdc4 100644
--- a/Documentation/networking/ipvlan.txt
+++ b/Documentation/networking/ipvlan.txt
@@ -8,7 +8,7 @@ Initial Release:
8 This is conceptually very similar to the macvlan driver with one major 8 This is conceptually very similar to the macvlan driver with one major
9exception of using L3 for mux-ing /demux-ing among slaves. This property makes 9exception of using L3 for mux-ing /demux-ing among slaves. This property makes
10the master device share the L2 with it's slave devices. I have developed this 10the master device share the L2 with it's slave devices. I have developed this
11driver in conjuntion with network namespaces and not sure if there is use case 11driver in conjunction with network namespaces and not sure if there is use case
12outside of it. 12outside of it.
13 13
14 14
@@ -42,7 +42,7 @@ out. In this mode the slaves will RX/TX multicast and broadcast (if applicable)
42as well. 42as well.
43 43
444.2 L3 mode: 444.2 L3 mode:
45 In this mode TX processing upto L3 happens on the stack instance attached 45 In this mode TX processing up to L3 happens on the stack instance attached
46to the slave device and packets are switched to the stack instance of the 46to the slave device and packets are switched to the stack instance of the
47master device for the L2 processing and routing from that instance will be 47master device for the L2 processing and routing from that instance will be
48used before packets are queued on the outbound device. In this mode the slaves 48used before packets are queued on the outbound device. In this mode the slaves
@@ -56,7 +56,7 @@ situations defines your use case then you can choose to use ipvlan -
56 (a) The Linux host that is connected to the external switch / router has 56 (a) The Linux host that is connected to the external switch / router has
57policy configured that allows only one mac per port. 57policy configured that allows only one mac per port.
58 (b) No of virtual devices created on a master exceed the mac capacity and 58 (b) No of virtual devices created on a master exceed the mac capacity and
59puts the NIC in promiscous mode and degraded performance is a concern. 59puts the NIC in promiscuous mode and degraded performance is a concern.
60 (c) If the slave device is to be put into the hostile / untrusted network 60 (c) If the slave device is to be put into the hostile / untrusted network
61namespace where L2 on the slave could be changed / misused. 61namespace where L2 on the slave could be changed / misused.
62 62
diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt
index f4be85e96005..2c4e3354e128 100644
--- a/Documentation/networking/pktgen.txt
+++ b/Documentation/networking/pktgen.txt
@@ -67,12 +67,12 @@ The two basic thread commands are:
67 * add_device DEVICE@NAME -- adds a single device 67 * add_device DEVICE@NAME -- adds a single device
68 * rem_device_all -- remove all associated devices 68 * rem_device_all -- remove all associated devices
69 69
70When adding a device to a thread, a corrosponding procfile is created 70When adding a device to a thread, a corresponding procfile is created
71which is used for configuring this device. Thus, device names need to 71which is used for configuring this device. Thus, device names need to
72be unique. 72be unique.
73 73
74To support adding the same device to multiple threads, which is useful 74To support adding the same device to multiple threads, which is useful
75with multi queue NICs, a the device naming scheme is extended with "@": 75with multi queue NICs, the device naming scheme is extended with "@":
76 device@something 76 device@something
77 77
78The part after "@" can be anything, but it is custom to use the thread 78The part after "@" can be anything, but it is custom to use the thread
@@ -221,7 +221,7 @@ Sample scripts
221 221
222A collection of tutorial scripts and helpers for pktgen is in the 222A collection of tutorial scripts and helpers for pktgen is in the
223samples/pktgen directory. The helper parameters.sh file support easy 223samples/pktgen directory. The helper parameters.sh file support easy
224and consistant parameter parsing across the sample scripts. 224and consistent parameter parsing across the sample scripts.
225 225
226Usage example and help: 226Usage example and help:
227 ./pktgen_sample01_simple.sh -i eth4 -m 00:1B:21:3C:9D:F8 -d 192.168.8.2 227 ./pktgen_sample01_simple.sh -i eth4 -m 00:1B:21:3C:9D:F8 -d 192.168.8.2
diff --git a/Documentation/networking/vrf.txt b/Documentation/networking/vrf.txt
index d52aa10cfe91..5da679c573d2 100644
--- a/Documentation/networking/vrf.txt
+++ b/Documentation/networking/vrf.txt
@@ -41,7 +41,7 @@ using an rx_handler which gives the impression that packets flow through
41the VRF device. Similarly on egress routing rules are used to send packets 41the VRF device. Similarly on egress routing rules are used to send packets
42to the VRF device driver before getting sent out the actual interface. This 42to the VRF device driver before getting sent out the actual interface. This
43allows tcpdump on a VRF device to capture all packets into and out of the 43allows tcpdump on a VRF device to capture all packets into and out of the
44VRF as a whole.[1] Similiarly, netfilter [2] and tc rules can be applied 44VRF as a whole.[1] Similarly, netfilter [2] and tc rules can be applied
45using the VRF device to specify rules that apply to the VRF domain as a whole. 45using the VRF device to specify rules that apply to the VRF domain as a whole.
46 46
47[1] Packets in the forwarded state do not flow through the device, so those 47[1] Packets in the forwarded state do not flow through the device, so those
diff --git a/Documentation/networking/xfrm_sync.txt b/Documentation/networking/xfrm_sync.txt
index d7aac9dedeb4..8d88e0f2ec49 100644
--- a/Documentation/networking/xfrm_sync.txt
+++ b/Documentation/networking/xfrm_sync.txt
@@ -4,7 +4,7 @@ Krisztian <hidden@balabit.hu> and others and additional patches
4from Jamal <hadi@cyberus.ca>. 4from Jamal <hadi@cyberus.ca>.
5 5
6The end goal for syncing is to be able to insert attributes + generate 6The end goal for syncing is to be able to insert attributes + generate
7events so that the an SA can be safely moved from one machine to another 7events so that the SA can be safely moved from one machine to another
8for HA purposes. 8for HA purposes.
9The idea is to synchronize the SA so that the takeover machine can do 9The idea is to synchronize the SA so that the takeover machine can do
10the processing of the SA as accurate as possible if it has access to it. 10the processing of the SA as accurate as possible if it has access to it.
@@ -13,7 +13,7 @@ We already have the ability to generate SA add/del/upd events.
13These patches add ability to sync and have accurate lifetime byte (to 13These patches add ability to sync and have accurate lifetime byte (to
14ensure proper decay of SAs) and replay counters to avoid replay attacks 14ensure proper decay of SAs) and replay counters to avoid replay attacks
15with as minimal loss at failover time. 15with as minimal loss at failover time.
16This way a backup stays as closely uptodate as an active member. 16This way a backup stays as closely up-to-date as an active member.
17 17
18Because the above items change for every packet the SA receives, 18Because the above items change for every packet the SA receives,
19it is possible for a lot of the events to be generated. 19it is possible for a lot of the events to be generated.
@@ -163,7 +163,7 @@ If you have an SA that is getting hit by traffic in bursts such that
163there is a period where the timer threshold expires with no packets 163there is a period where the timer threshold expires with no packets
164seen, then an odd behavior is seen as follows: 164seen, then an odd behavior is seen as follows:
165The first packet arrival after a timer expiry will trigger a timeout 165The first packet arrival after a timer expiry will trigger a timeout
166aevent; i.e we dont wait for a timeout period or a packet threshold 166event; i.e we don't wait for a timeout period or a packet threshold
167to be reached. This is done for simplicity and efficiency reasons. 167to be reached. This is done for simplicity and efficiency reasons.
168 168
169-JHS 169-JHS
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index cb0368459da3..34a5fece3121 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -581,15 +581,16 @@ Specify "[Nn]ode" for node order
581"Zone Order" orders the zonelists by zone type, then by node within each 581"Zone Order" orders the zonelists by zone type, then by node within each
582zone. Specify "[Zz]one" for zone order. 582zone. Specify "[Zz]one" for zone order.
583 583
584Specify "[Dd]efault" to request automatic configuration. Autoconfiguration 584Specify "[Dd]efault" to request automatic configuration.
585will select "node" order in following case. 585
586(1) if the DMA zone does not exist or 586On 32-bit, the Normal zone needs to be preserved for allocations accessible
587(2) if the DMA zone comprises greater than 50% of the available memory or 587by the kernel, so "zone" order will be selected.
588(3) if any node's DMA zone comprises greater than 70% of its local memory and 588
589 the amount of local memory is big enough. 589On 64-bit, devices that require DMA32/DMA are relatively rare, so "node"
590 590order will be selected.
591Otherwise, "zone" order will be selected. Default order is recommended unless 591
592this is causing problems for your system/application. 592Default order is recommended unless this is causing problems for your
593system/application.
593 594
594============================================================== 595==============================================================
595 596
diff --git a/MAINTAINERS b/MAINTAINERS
index ab008013cfec..867d6be32cc8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4745,7 +4745,7 @@ F: drivers/platform/x86/fujitsu-tablet.c
4745 4745
4746FUSE: FILESYSTEM IN USERSPACE 4746FUSE: FILESYSTEM IN USERSPACE
4747M: Miklos Szeredi <miklos@szeredi.hu> 4747M: Miklos Szeredi <miklos@szeredi.hu>
4748L: fuse-devel@lists.sourceforge.net 4748L: linux-fsdevel@vger.kernel.org
4749W: http://fuse.sourceforge.net/ 4749W: http://fuse.sourceforge.net/
4750T: git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git 4750T: git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git
4751S: Maintained 4751S: Maintained
@@ -4904,7 +4904,7 @@ F: net/ipv4/gre_offload.c
4904F: include/net/gre.h 4904F: include/net/gre.h
4905 4905
4906GRETH 10/100/1G Ethernet MAC device driver 4906GRETH 10/100/1G Ethernet MAC device driver
4907M: Kristoffer Glembo <kristoffer@gaisler.com> 4907M: Andreas Larsson <andreas@gaisler.com>
4908L: netdev@vger.kernel.org 4908L: netdev@vger.kernel.org
4909S: Maintained 4909S: Maintained
4910F: drivers/net/ethernet/aeroflex/ 4910F: drivers/net/ethernet/aeroflex/
@@ -6028,7 +6028,7 @@ F: include/scsi/*iscsi*
6028 6028
6029ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR 6029ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
6030M: Or Gerlitz <ogerlitz@mellanox.com> 6030M: Or Gerlitz <ogerlitz@mellanox.com>
6031M: Sagi Grimberg <sagig@mellanox.com> 6031M: Sagi Grimberg <sagi@grimberg.me>
6032M: Roi Dayan <roid@mellanox.com> 6032M: Roi Dayan <roid@mellanox.com>
6033L: linux-rdma@vger.kernel.org 6033L: linux-rdma@vger.kernel.org
6034S: Supported 6034S: Supported
@@ -6038,7 +6038,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/
6038F: drivers/infiniband/ulp/iser/ 6038F: drivers/infiniband/ulp/iser/
6039 6039
6040ISCSI EXTENSIONS FOR RDMA (ISER) TARGET 6040ISCSI EXTENSIONS FOR RDMA (ISER) TARGET
6041M: Sagi Grimberg <sagig@mellanox.com> 6041M: Sagi Grimberg <sagi@grimberg.me>
6042T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master 6042T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
6043L: linux-rdma@vger.kernel.org 6043L: linux-rdma@vger.kernel.org
6044L: target-devel@vger.kernel.org 6044L: target-devel@vger.kernel.org
@@ -6401,7 +6401,7 @@ F: mm/kmemleak.c
6401F: mm/kmemleak-test.c 6401F: mm/kmemleak-test.c
6402 6402
6403KPROBES 6403KPROBES
6404M: Ananth N Mavinakayanahalli <ananth@in.ibm.com> 6404M: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
6405M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 6405M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
6406M: "David S. Miller" <davem@davemloft.net> 6406M: "David S. Miller" <davem@davemloft.net>
6407M: Masami Hiramatsu <mhiramat@kernel.org> 6407M: Masami Hiramatsu <mhiramat@kernel.org>
@@ -10015,7 +10015,8 @@ F: drivers/infiniband/hw/ocrdma/
10015 10015
10016SFC NETWORK DRIVER 10016SFC NETWORK DRIVER
10017M: Solarflare linux maintainers <linux-net-drivers@solarflare.com> 10017M: Solarflare linux maintainers <linux-net-drivers@solarflare.com>
10018M: Shradha Shah <sshah@solarflare.com> 10018M: Edward Cree <ecree@solarflare.com>
10019M: Bert Kenward <bkenward@solarflare.com>
10019L: netdev@vger.kernel.org 10020L: netdev@vger.kernel.org
10020S: Supported 10021S: Supported
10021F: drivers/net/ethernet/sfc/ 10022F: drivers/net/ethernet/sfc/
diff --git a/Makefile b/Makefile
index 9496df81b9a8..7466de60ddc7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc5 4EXTRAVERSION = -rc6
5NAME = Blurry Fish Butt 5NAME = Charred Weasel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
8# To see a list of typical targets execute "make help" 8# To see a list of typical targets execute "make help"
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 12d0284a46e5..ec4791ea6911 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -35,8 +35,10 @@ config ARC
35 select NO_BOOTMEM 35 select NO_BOOTMEM
36 select OF 36 select OF
37 select OF_EARLY_FLATTREE 37 select OF_EARLY_FLATTREE
38 select OF_RESERVED_MEM
38 select PERF_USE_VMALLOC 39 select PERF_USE_VMALLOC
39 select HAVE_DEBUG_STACKOVERFLOW 40 select HAVE_DEBUG_STACKOVERFLOW
41 select HAVE_GENERIC_DMA_COHERENT
40 42
41config MIGHT_HAVE_PCI 43config MIGHT_HAVE_PCI
42 bool 44 bool
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index 37c2f751eebf..d1ec7f6b31e0 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -18,6 +18,12 @@
18#define STATUS_AD_MASK (1<<STATUS_AD_BIT) 18#define STATUS_AD_MASK (1<<STATUS_AD_BIT)
19#define STATUS_IE_MASK (1<<STATUS_IE_BIT) 19#define STATUS_IE_MASK (1<<STATUS_IE_BIT)
20 20
21/* status32 Bits as encoded/expected by CLRI/SETI */
22#define CLRI_STATUS_IE_BIT 4
23
24#define CLRI_STATUS_E_MASK 0xF
25#define CLRI_STATUS_IE_MASK (1 << CLRI_STATUS_IE_BIT)
26
21#define AUX_USER_SP 0x00D 27#define AUX_USER_SP 0x00D
22#define AUX_IRQ_CTRL 0x00E 28#define AUX_IRQ_CTRL 0x00E
23#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */ 29#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */
@@ -100,6 +106,13 @@ static inline long arch_local_save_flags(void)
100 : 106 :
101 : "memory"); 107 : "memory");
102 108
109 /* To be compatible with irq_save()/irq_restore()
110 * encode the irq bits as expected by CLRI/SETI
111 * (this was needed to make CONFIG_TRACE_IRQFLAGS work)
112 */
113 temp = (1 << 5) |
114 ((!!(temp & STATUS_IE_MASK)) << CLRI_STATUS_IE_BIT) |
115 (temp & CLRI_STATUS_E_MASK);
103 return temp; 116 return temp;
104} 117}
105 118
@@ -108,7 +121,7 @@ static inline long arch_local_save_flags(void)
108 */ 121 */
109static inline int arch_irqs_disabled_flags(unsigned long flags) 122static inline int arch_irqs_disabled_flags(unsigned long flags)
110{ 123{
111 return !(flags & (STATUS_IE_MASK)); 124 return !(flags & CLRI_STATUS_IE_MASK);
112} 125}
113 126
114static inline int arch_irqs_disabled(void) 127static inline int arch_irqs_disabled(void)
@@ -128,11 +141,32 @@ static inline void arc_softirq_clear(int irq)
128 141
129#else 142#else
130 143
144#ifdef CONFIG_TRACE_IRQFLAGS
145
146.macro TRACE_ASM_IRQ_DISABLE
147 bl trace_hardirqs_off
148.endm
149
150.macro TRACE_ASM_IRQ_ENABLE
151 bl trace_hardirqs_on
152.endm
153
154#else
155
156.macro TRACE_ASM_IRQ_DISABLE
157.endm
158
159.macro TRACE_ASM_IRQ_ENABLE
160.endm
161
162#endif
131.macro IRQ_DISABLE scratch 163.macro IRQ_DISABLE scratch
132 clri 164 clri
165 TRACE_ASM_IRQ_DISABLE
133.endm 166.endm
134 167
135.macro IRQ_ENABLE scratch 168.macro IRQ_ENABLE scratch
169 TRACE_ASM_IRQ_ENABLE
136 seti 170 seti
137.endm 171.endm
138 172
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index c1264607bbff..7a1c124ff021 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -69,8 +69,11 @@ ENTRY(handle_interrupt)
69 69
70 clri ; To make status32.IE agree with CPU internal state 70 clri ; To make status32.IE agree with CPU internal state
71 71
72 lr r0, [ICAUSE] 72#ifdef CONFIG_TRACE_IRQFLAGS
73 TRACE_ASM_IRQ_DISABLE
74#endif
73 75
76 lr r0, [ICAUSE]
74 mov blink, ret_from_exception 77 mov blink, ret_from_exception
75 78
76 b.d arch_do_IRQ 79 b.d arch_do_IRQ
@@ -169,6 +172,11 @@ END(EV_TLBProtV)
169 172
170.Lrestore_regs: 173.Lrestore_regs:
171 174
175 # Interrpts are actually disabled from this point on, but will get
176 # reenabled after we return from interrupt/exception.
177 # But irq tracer needs to be told now...
178 TRACE_ASM_IRQ_ENABLE
179
172 ld r0, [sp, PT_status32] ; U/K mode at time of entry 180 ld r0, [sp, PT_status32] ; U/K mode at time of entry
173 lr r10, [AUX_IRQ_ACT] 181 lr r10, [AUX_IRQ_ACT]
174 182
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S
index 431433929189..0cb0abaa0479 100644
--- a/arch/arc/kernel/entry-compact.S
+++ b/arch/arc/kernel/entry-compact.S
@@ -341,6 +341,9 @@ END(call_do_page_fault)
341 341
342.Lrestore_regs: 342.Lrestore_regs:
343 343
344 # Interrpts are actually disabled from this point on, but will get
345 # reenabled after we return from interrupt/exception.
346 # But irq tracer needs to be told now...
344 TRACE_ASM_IRQ_ENABLE 347 TRACE_ASM_IRQ_ENABLE
345 348
346 lr r10, [status32] 349 lr r10, [status32]
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 7d2c4fbf4f22..5487d0b97400 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -13,6 +13,7 @@
13#ifdef CONFIG_BLK_DEV_INITRD 13#ifdef CONFIG_BLK_DEV_INITRD
14#include <linux/initrd.h> 14#include <linux/initrd.h>
15#endif 15#endif
16#include <linux/of_fdt.h>
16#include <linux/swap.h> 17#include <linux/swap.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/highmem.h> 19#include <linux/highmem.h>
@@ -136,6 +137,9 @@ void __init setup_arch_memory(void)
136 memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); 137 memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
137#endif 138#endif
138 139
140 early_init_fdt_reserve_self();
141 early_init_fdt_scan_reserved_mem();
142
139 memblock_dump_all(); 143 memblock_dump_all();
140 144
141 /*----------------- node/zones setup --------------------------*/ 145 /*----------------- node/zones setup --------------------------*/
diff --git a/arch/nios2/lib/memset.c b/arch/nios2/lib/memset.c
index c2cfcb121e34..2fcefe720283 100644
--- a/arch/nios2/lib/memset.c
+++ b/arch/nios2/lib/memset.c
@@ -68,7 +68,7 @@ void *memset(void *s, int c, size_t count)
68 "=r" (charcnt), /* %1 Output */ 68 "=r" (charcnt), /* %1 Output */
69 "=r" (dwordcnt), /* %2 Output */ 69 "=r" (dwordcnt), /* %2 Output */
70 "=r" (fill8reg), /* %3 Output */ 70 "=r" (fill8reg), /* %3 Output */
71 "=r" (wrkrega) /* %4 Output */ 71 "=&r" (wrkrega) /* %4 Output only */
72 : "r" (c), /* %5 Input */ 72 : "r" (c), /* %5 Input */
73 "0" (s), /* %0 Input/Output */ 73 "0" (s), /* %0 Input/Output */
74 "1" (count) /* %1 Input/Output */ 74 "1" (count) /* %1 Input/Output */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 3fa9df70aa20..2fc5d4db503c 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -384,3 +384,5 @@ SYSCALL(ni_syscall)
384SYSCALL(ni_syscall) 384SYSCALL(ni_syscall)
385SYSCALL(mlock2) 385SYSCALL(mlock2)
386SYSCALL(copy_file_range) 386SYSCALL(copy_file_range)
387COMPAT_SYS_SPU(preadv2)
388COMPAT_SYS_SPU(pwritev2)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 1f2594d45605..cf12c580f6b2 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define NR_syscalls 380 15#define NR_syscalls 382
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18 18
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 940290d45b08..e9f5f41aa55a 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -390,5 +390,7 @@
390#define __NR_membarrier 365 390#define __NR_membarrier 365
391#define __NR_mlock2 378 391#define __NR_mlock2 378
392#define __NR_copy_file_range 379 392#define __NR_copy_file_range 379
393#define __NR_preadv2 380
394#define __NR_pwritev2 381
393 395
394#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 396#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index d29ad9545b41..081b2ad99d73 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -11,7 +11,7 @@ typedef struct {
11 spinlock_t list_lock; 11 spinlock_t list_lock;
12 struct list_head pgtable_list; 12 struct list_head pgtable_list;
13 struct list_head gmap_list; 13 struct list_head gmap_list;
14 unsigned long asce_bits; 14 unsigned long asce;
15 unsigned long asce_limit; 15 unsigned long asce_limit;
16 unsigned long vdso_base; 16 unsigned long vdso_base;
17 /* The mmu context allocates 4K page tables. */ 17 /* The mmu context allocates 4K page tables. */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index d321469eeda7..c837b79b455d 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -26,12 +26,28 @@ static inline int init_new_context(struct task_struct *tsk,
26 mm->context.has_pgste = 0; 26 mm->context.has_pgste = 0;
27 mm->context.use_skey = 0; 27 mm->context.use_skey = 0;
28#endif 28#endif
29 if (mm->context.asce_limit == 0) { 29 switch (mm->context.asce_limit) {
30 case 1UL << 42:
31 /*
32 * forked 3-level task, fall through to set new asce with new
33 * mm->pgd
34 */
35 case 0:
30 /* context created by exec, set asce limit to 4TB */ 36 /* context created by exec, set asce limit to 4TB */
31 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
32 _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
33 mm->context.asce_limit = STACK_TOP_MAX; 37 mm->context.asce_limit = STACK_TOP_MAX;
34 } else if (mm->context.asce_limit == (1UL << 31)) { 38 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
39 _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
40 break;
41 case 1UL << 53:
42 /* forked 4-level task, set new asce with new mm->pgd */
43 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
44 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
45 break;
46 case 1UL << 31:
47 /* forked 2-level compat task, set new asce with new mm->pgd */
48 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
49 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
50 /* pgd_alloc() did not increase mm->nr_pmds */
35 mm_inc_nr_pmds(mm); 51 mm_inc_nr_pmds(mm);
36 } 52 }
37 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); 53 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
@@ -42,7 +58,7 @@ static inline int init_new_context(struct task_struct *tsk,
42 58
43static inline void set_user_asce(struct mm_struct *mm) 59static inline void set_user_asce(struct mm_struct *mm)
44{ 60{
45 S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd); 61 S390_lowcore.user_asce = mm->context.asce;
46 if (current->thread.mm_segment.ar4) 62 if (current->thread.mm_segment.ar4)
47 __ctl_load(S390_lowcore.user_asce, 7, 7); 63 __ctl_load(S390_lowcore.user_asce, 7, 7);
48 set_cpu_flag(CIF_ASCE); 64 set_cpu_flag(CIF_ASCE);
@@ -71,7 +87,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
71{ 87{
72 int cpu = smp_processor_id(); 88 int cpu = smp_processor_id();
73 89
74 S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); 90 S390_lowcore.user_asce = next->context.asce;
75 if (prev == next) 91 if (prev == next)
76 return; 92 return;
77 if (MACHINE_HAS_TLB_LC) 93 if (MACHINE_HAS_TLB_LC)
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 9b3d9b6099f2..da34cb6b1f3b 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -52,8 +52,8 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
52 return _REGION2_ENTRY_EMPTY; 52 return _REGION2_ENTRY_EMPTY;
53} 53}
54 54
55int crst_table_upgrade(struct mm_struct *, unsigned long limit); 55int crst_table_upgrade(struct mm_struct *);
56void crst_table_downgrade(struct mm_struct *, unsigned long limit); 56void crst_table_downgrade(struct mm_struct *);
57 57
58static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) 58static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
59{ 59{
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index d6fd22ea270d..18cdede1aeda 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -175,7 +175,7 @@ extern __vector128 init_task_fpu_regs[__NUM_VXRS];
175 regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \ 175 regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
176 regs->psw.addr = new_psw; \ 176 regs->psw.addr = new_psw; \
177 regs->gprs[15] = new_stackp; \ 177 regs->gprs[15] = new_stackp; \
178 crst_table_downgrade(current->mm, 1UL << 31); \ 178 crst_table_downgrade(current->mm); \
179 execve_tail(); \ 179 execve_tail(); \
180} while (0) 180} while (0)
181 181
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index ca148f7c3eaa..a2e6ef32e054 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -110,8 +110,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
110static inline void __tlb_flush_kernel(void) 110static inline void __tlb_flush_kernel(void)
111{ 111{
112 if (MACHINE_HAS_IDTE) 112 if (MACHINE_HAS_IDTE)
113 __tlb_flush_idte((unsigned long) init_mm.pgd | 113 __tlb_flush_idte(init_mm.context.asce);
114 init_mm.context.asce_bits);
115 else 114 else
116 __tlb_flush_global(); 115 __tlb_flush_global();
117} 116}
@@ -133,8 +132,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
133static inline void __tlb_flush_kernel(void) 132static inline void __tlb_flush_kernel(void)
134{ 133{
135 if (MACHINE_HAS_TLB_LC) 134 if (MACHINE_HAS_TLB_LC)
136 __tlb_flush_idte_local((unsigned long) init_mm.pgd | 135 __tlb_flush_idte_local(init_mm.context.asce);
137 init_mm.context.asce_bits);
138 else 136 else
139 __tlb_flush_local(); 137 __tlb_flush_local();
140} 138}
@@ -148,8 +146,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
148 * only ran on the local cpu. 146 * only ran on the local cpu.
149 */ 147 */
150 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) 148 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
151 __tlb_flush_asce(mm, (unsigned long) mm->pgd | 149 __tlb_flush_asce(mm, mm->context.asce);
152 mm->context.asce_bits);
153 else 150 else
154 __tlb_flush_full(mm); 151 __tlb_flush_full(mm);
155} 152}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index c7b0451397d6..2489b2e917c8 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -89,7 +89,8 @@ void __init paging_init(void)
89 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; 89 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
90 pgd_type = _REGION3_ENTRY_EMPTY; 90 pgd_type = _REGION3_ENTRY_EMPTY;
91 } 91 }
92 S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; 92 init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
93 S390_lowcore.kernel_asce = init_mm.context.asce;
93 clear_table((unsigned long *) init_mm.pgd, pgd_type, 94 clear_table((unsigned long *) init_mm.pgd, pgd_type,
94 sizeof(unsigned long)*2048); 95 sizeof(unsigned long)*2048);
95 vmem_map_init(); 96 vmem_map_init();
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 45c4daa49930..89cf09e5f168 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -174,7 +174,7 @@ int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
174 if (!(flags & MAP_FIXED)) 174 if (!(flags & MAP_FIXED))
175 addr = 0; 175 addr = 0;
176 if ((addr + len) >= TASK_SIZE) 176 if ((addr + len) >= TASK_SIZE)
177 return crst_table_upgrade(current->mm, TASK_MAX_SIZE); 177 return crst_table_upgrade(current->mm);
178 return 0; 178 return 0;
179} 179}
180 180
@@ -191,7 +191,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
191 return area; 191 return area;
192 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { 192 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
193 /* Upgrade the page table to 4 levels and retry. */ 193 /* Upgrade the page table to 4 levels and retry. */
194 rc = crst_table_upgrade(mm, TASK_MAX_SIZE); 194 rc = crst_table_upgrade(mm);
195 if (rc) 195 if (rc)
196 return (unsigned long) rc; 196 return (unsigned long) rc;
197 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 197 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
@@ -213,7 +213,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
213 return area; 213 return area;
214 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { 214 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
215 /* Upgrade the page table to 4 levels and retry. */ 215 /* Upgrade the page table to 4 levels and retry. */
216 rc = crst_table_upgrade(mm, TASK_MAX_SIZE); 216 rc = crst_table_upgrade(mm);
217 if (rc) 217 if (rc)
218 return (unsigned long) rc; 218 return (unsigned long) rc;
219 area = arch_get_unmapped_area_topdown(filp, addr, len, 219 area = arch_get_unmapped_area_topdown(filp, addr, len,
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index f6c3de26cda8..e8b5962ac12a 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -76,81 +76,52 @@ static void __crst_table_upgrade(void *arg)
76 __tlb_flush_local(); 76 __tlb_flush_local();
77} 77}
78 78
79int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) 79int crst_table_upgrade(struct mm_struct *mm)
80{ 80{
81 unsigned long *table, *pgd; 81 unsigned long *table, *pgd;
82 unsigned long entry;
83 int flush;
84 82
85 BUG_ON(limit > TASK_MAX_SIZE); 83 /* upgrade should only happen from 3 to 4 levels */
86 flush = 0; 84 BUG_ON(mm->context.asce_limit != (1UL << 42));
87repeat: 85
88 table = crst_table_alloc(mm); 86 table = crst_table_alloc(mm);
89 if (!table) 87 if (!table)
90 return -ENOMEM; 88 return -ENOMEM;
89
91 spin_lock_bh(&mm->page_table_lock); 90 spin_lock_bh(&mm->page_table_lock);
92 if (mm->context.asce_limit < limit) { 91 pgd = (unsigned long *) mm->pgd;
93 pgd = (unsigned long *) mm->pgd; 92 crst_table_init(table, _REGION2_ENTRY_EMPTY);
94 if (mm->context.asce_limit <= (1UL << 31)) { 93 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
95 entry = _REGION3_ENTRY_EMPTY; 94 mm->pgd = (pgd_t *) table;
96 mm->context.asce_limit = 1UL << 42; 95 mm->context.asce_limit = 1UL << 53;
97 mm->context.asce_bits = _ASCE_TABLE_LENGTH | 96 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
98 _ASCE_USER_BITS | 97 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
99 _ASCE_TYPE_REGION3; 98 mm->task_size = mm->context.asce_limit;
100 } else {
101 entry = _REGION2_ENTRY_EMPTY;
102 mm->context.asce_limit = 1UL << 53;
103 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
104 _ASCE_USER_BITS |
105 _ASCE_TYPE_REGION2;
106 }
107 crst_table_init(table, entry);
108 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
109 mm->pgd = (pgd_t *) table;
110 mm->task_size = mm->context.asce_limit;
111 table = NULL;
112 flush = 1;
113 }
114 spin_unlock_bh(&mm->page_table_lock); 99 spin_unlock_bh(&mm->page_table_lock);
115 if (table) 100
116 crst_table_free(mm, table); 101 on_each_cpu(__crst_table_upgrade, mm, 0);
117 if (mm->context.asce_limit < limit)
118 goto repeat;
119 if (flush)
120 on_each_cpu(__crst_table_upgrade, mm, 0);
121 return 0; 102 return 0;
122} 103}
123 104
124void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) 105void crst_table_downgrade(struct mm_struct *mm)
125{ 106{
126 pgd_t *pgd; 107 pgd_t *pgd;
127 108
109 /* downgrade should only happen from 3 to 2 levels (compat only) */
110 BUG_ON(mm->context.asce_limit != (1UL << 42));
111
128 if (current->active_mm == mm) { 112 if (current->active_mm == mm) {
129 clear_user_asce(); 113 clear_user_asce();
130 __tlb_flush_mm(mm); 114 __tlb_flush_mm(mm);
131 } 115 }
132 while (mm->context.asce_limit > limit) { 116
133 pgd = mm->pgd; 117 pgd = mm->pgd;
134 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { 118 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
135 case _REGION_ENTRY_TYPE_R2: 119 mm->context.asce_limit = 1UL << 31;
136 mm->context.asce_limit = 1UL << 42; 120 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
137 mm->context.asce_bits = _ASCE_TABLE_LENGTH | 121 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
138 _ASCE_USER_BITS | 122 mm->task_size = mm->context.asce_limit;
139 _ASCE_TYPE_REGION3; 123 crst_table_free(mm, (unsigned long *) pgd);
140 break; 124
141 case _REGION_ENTRY_TYPE_R3:
142 mm->context.asce_limit = 1UL << 31;
143 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
144 _ASCE_USER_BITS |
145 _ASCE_TYPE_SEGMENT;
146 break;
147 default:
148 BUG();
149 }
150 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
151 mm->task_size = mm->context.asce_limit;
152 crst_table_free(mm, (unsigned long *) pgd);
153 }
154 if (current->active_mm == mm) 125 if (current->active_mm == mm)
155 set_user_asce(mm); 126 set_user_asce(mm);
156} 127}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index e595e89eac65..1ea8c07eab84 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -457,7 +457,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
457 zdev->dma_table = dma_alloc_cpu_table(); 457 zdev->dma_table = dma_alloc_cpu_table();
458 if (!zdev->dma_table) { 458 if (!zdev->dma_table) {
459 rc = -ENOMEM; 459 rc = -ENOMEM;
460 goto out_clean; 460 goto out;
461 } 461 }
462 462
463 /* 463 /*
@@ -477,18 +477,22 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
477 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8); 477 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
478 if (!zdev->iommu_bitmap) { 478 if (!zdev->iommu_bitmap) {
479 rc = -ENOMEM; 479 rc = -ENOMEM;
480 goto out_reg; 480 goto free_dma_table;
481 } 481 }
482 482
483 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, 483 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
484 (u64) zdev->dma_table); 484 (u64) zdev->dma_table);
485 if (rc) 485 if (rc)
486 goto out_reg; 486 goto free_bitmap;
487 return 0;
488 487
489out_reg: 488 return 0;
489free_bitmap:
490 vfree(zdev->iommu_bitmap);
491 zdev->iommu_bitmap = NULL;
492free_dma_table:
490 dma_free_cpu_table(zdev->dma_table); 493 dma_free_cpu_table(zdev->dma_table);
491out_clean: 494 zdev->dma_table = NULL;
495out:
492 return rc; 496 return rc;
493} 497}
494 498
diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig
index fb23fd6b186a..c74d3701ad68 100644
--- a/arch/sparc/configs/sparc32_defconfig
+++ b/arch/sparc/configs/sparc32_defconfig
@@ -24,7 +24,6 @@ CONFIG_INET_AH=y
24CONFIG_INET_ESP=y 24CONFIG_INET_ESP=y
25CONFIG_INET_IPCOMP=y 25CONFIG_INET_IPCOMP=y
26# CONFIG_INET_LRO is not set 26# CONFIG_INET_LRO is not set
27CONFIG_IPV6_PRIVACY=y
28CONFIG_INET6_AH=m 27CONFIG_INET6_AH=m
29CONFIG_INET6_ESP=m 28CONFIG_INET6_ESP=m
30CONFIG_INET6_IPCOMP=m 29CONFIG_INET6_IPCOMP=m
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index 04920ab8e292..3583d676a916 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -48,7 +48,6 @@ CONFIG_SYN_COOKIES=y
48CONFIG_INET_AH=y 48CONFIG_INET_AH=y
49CONFIG_INET_ESP=y 49CONFIG_INET_ESP=y
50CONFIG_INET_IPCOMP=y 50CONFIG_INET_IPCOMP=y
51CONFIG_IPV6_PRIVACY=y
52CONFIG_IPV6_ROUTER_PREF=y 51CONFIG_IPV6_ROUTER_PREF=y
53CONFIG_IPV6_ROUTE_INFO=y 52CONFIG_IPV6_ROUTE_INFO=y
54CONFIG_IPV6_OPTIMISTIC_DAD=y 53CONFIG_IPV6_OPTIMISTIC_DAD=y
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h
index 56f933816144..1d8321c827a8 100644
--- a/arch/sparc/include/asm/spitfire.h
+++ b/arch/sparc/include/asm/spitfire.h
@@ -48,6 +48,7 @@
48#define SUN4V_CHIP_SPARC_M6 0x06 48#define SUN4V_CHIP_SPARC_M6 0x06
49#define SUN4V_CHIP_SPARC_M7 0x07 49#define SUN4V_CHIP_SPARC_M7 0x07
50#define SUN4V_CHIP_SPARC64X 0x8a 50#define SUN4V_CHIP_SPARC64X 0x8a
51#define SUN4V_CHIP_SPARC_SN 0x8b
51#define SUN4V_CHIP_UNKNOWN 0xff 52#define SUN4V_CHIP_UNKNOWN 0xff
52 53
53#ifndef __ASSEMBLY__ 54#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index b6de8b10a55b..36eee8132c22 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -423,8 +423,10 @@
423#define __NR_setsockopt 355 423#define __NR_setsockopt 355
424#define __NR_mlock2 356 424#define __NR_mlock2 356
425#define __NR_copy_file_range 357 425#define __NR_copy_file_range 357
426#define __NR_preadv2 358
427#define __NR_pwritev2 359
426 428
427#define NR_syscalls 358 429#define NR_syscalls 360
428 430
429/* Bitmask values returned from kern_features system call. */ 431/* Bitmask values returned from kern_features system call. */
430#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 432#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
diff --git a/arch/sparc/kernel/cherrs.S b/arch/sparc/kernel/cherrs.S
index 4ee1ad420862..655628def68e 100644
--- a/arch/sparc/kernel/cherrs.S
+++ b/arch/sparc/kernel/cherrs.S
@@ -214,8 +214,7 @@ do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
214 subcc %g1, %g2, %g1 ! Next cacheline 214 subcc %g1, %g2, %g1 ! Next cacheline
215 bge,pt %icc, 1b 215 bge,pt %icc, 1b
216 nop 216 nop
217 ba,pt %xcc, dcpe_icpe_tl1_common 217 ba,a,pt %xcc, dcpe_icpe_tl1_common
218 nop
219 218
220do_dcpe_tl1_fatal: 219do_dcpe_tl1_fatal:
221 sethi %hi(1f), %g7 220 sethi %hi(1f), %g7
@@ -224,8 +223,7 @@ do_dcpe_tl1_fatal:
224 mov 0x2, %o0 223 mov 0x2, %o0
225 call cheetah_plus_parity_error 224 call cheetah_plus_parity_error
226 add %sp, PTREGS_OFF, %o1 225 add %sp, PTREGS_OFF, %o1
227 ba,pt %xcc, rtrap 226 ba,a,pt %xcc, rtrap
228 nop
229 .size do_dcpe_tl1,.-do_dcpe_tl1 227 .size do_dcpe_tl1,.-do_dcpe_tl1
230 228
231 .globl do_icpe_tl1 229 .globl do_icpe_tl1
@@ -259,8 +257,7 @@ do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
259 subcc %g1, %g2, %g1 257 subcc %g1, %g2, %g1
260 bge,pt %icc, 1b 258 bge,pt %icc, 1b
261 nop 259 nop
262 ba,pt %xcc, dcpe_icpe_tl1_common 260 ba,a,pt %xcc, dcpe_icpe_tl1_common
263 nop
264 261
265do_icpe_tl1_fatal: 262do_icpe_tl1_fatal:
266 sethi %hi(1f), %g7 263 sethi %hi(1f), %g7
@@ -269,8 +266,7 @@ do_icpe_tl1_fatal:
269 mov 0x3, %o0 266 mov 0x3, %o0
270 call cheetah_plus_parity_error 267 call cheetah_plus_parity_error
271 add %sp, PTREGS_OFF, %o1 268 add %sp, PTREGS_OFF, %o1
272 ba,pt %xcc, rtrap 269 ba,a,pt %xcc, rtrap
273 nop
274 .size do_icpe_tl1,.-do_icpe_tl1 270 .size do_icpe_tl1,.-do_icpe_tl1
275 271
276 .type dcpe_icpe_tl1_common,#function 272 .type dcpe_icpe_tl1_common,#function
@@ -456,7 +452,7 @@ __cheetah_log_error:
456 cmp %g2, 0x63 452 cmp %g2, 0x63
457 be c_cee 453 be c_cee
458 nop 454 nop
459 ba,pt %xcc, c_deferred 455 ba,a,pt %xcc, c_deferred
460 .size __cheetah_log_error,.-__cheetah_log_error 456 .size __cheetah_log_error,.-__cheetah_log_error
461 457
462 /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc 458 /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index dfad8b1aea9f..493e023a468a 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -506,6 +506,12 @@ static void __init sun4v_cpu_probe(void)
506 sparc_pmu_type = "sparc-m7"; 506 sparc_pmu_type = "sparc-m7";
507 break; 507 break;
508 508
509 case SUN4V_CHIP_SPARC_SN:
510 sparc_cpu_type = "SPARC-SN";
511 sparc_fpu_type = "SPARC-SN integrated FPU";
512 sparc_pmu_type = "sparc-sn";
513 break;
514
509 case SUN4V_CHIP_SPARC64X: 515 case SUN4V_CHIP_SPARC64X:
510 sparc_cpu_type = "SPARC64-X"; 516 sparc_cpu_type = "SPARC64-X";
511 sparc_fpu_type = "SPARC64-X integrated FPU"; 517 sparc_fpu_type = "SPARC64-X integrated FPU";
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
index e69ec0e3f155..45c820e1cba5 100644
--- a/arch/sparc/kernel/cpumap.c
+++ b/arch/sparc/kernel/cpumap.c
@@ -328,6 +328,7 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
328 case SUN4V_CHIP_NIAGARA5: 328 case SUN4V_CHIP_NIAGARA5:
329 case SUN4V_CHIP_SPARC_M6: 329 case SUN4V_CHIP_SPARC_M6:
330 case SUN4V_CHIP_SPARC_M7: 330 case SUN4V_CHIP_SPARC_M7:
331 case SUN4V_CHIP_SPARC_SN:
331 case SUN4V_CHIP_SPARC64X: 332 case SUN4V_CHIP_SPARC64X:
332 rover_inc_table = niagara_iterate_method; 333 rover_inc_table = niagara_iterate_method;
333 break; 334 break;
diff --git a/arch/sparc/kernel/fpu_traps.S b/arch/sparc/kernel/fpu_traps.S
index a6864826a4bd..336d2750fe78 100644
--- a/arch/sparc/kernel/fpu_traps.S
+++ b/arch/sparc/kernel/fpu_traps.S
@@ -100,8 +100,8 @@ do_fpdis:
100 fmuld %f0, %f2, %f26 100 fmuld %f0, %f2, %f26
101 faddd %f0, %f2, %f28 101 faddd %f0, %f2, %f28
102 fmuld %f0, %f2, %f30 102 fmuld %f0, %f2, %f30
103 b,pt %xcc, fpdis_exit 103 ba,a,pt %xcc, fpdis_exit
104 nop 104
1052: andcc %g5, FPRS_DU, %g0 1052: andcc %g5, FPRS_DU, %g0
106 bne,pt %icc, 3f 106 bne,pt %icc, 3f
107 fzero %f32 107 fzero %f32
@@ -144,8 +144,8 @@ do_fpdis:
144 fmuld %f32, %f34, %f58 144 fmuld %f32, %f34, %f58
145 faddd %f32, %f34, %f60 145 faddd %f32, %f34, %f60
146 fmuld %f32, %f34, %f62 146 fmuld %f32, %f34, %f62
147 ba,pt %xcc, fpdis_exit 147 ba,a,pt %xcc, fpdis_exit
148 nop 148
1493: mov SECONDARY_CONTEXT, %g3 1493: mov SECONDARY_CONTEXT, %g3
150 add %g6, TI_FPREGS, %g1 150 add %g6, TI_FPREGS, %g1
151 151
@@ -197,8 +197,7 @@ fpdis_exit2:
197fp_other_bounce: 197fp_other_bounce:
198 call do_fpother 198 call do_fpother
199 add %sp, PTREGS_OFF, %o0 199 add %sp, PTREGS_OFF, %o0
200 ba,pt %xcc, rtrap 200 ba,a,pt %xcc, rtrap
201 nop
202 .size fp_other_bounce,.-fp_other_bounce 201 .size fp_other_bounce,.-fp_other_bounce
203 202
204 .align 32 203 .align 32
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index cd1f592cd347..a076b4249e62 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -414,6 +414,8 @@ sun4v_chip_type:
414 cmp %g2, 'T' 414 cmp %g2, 'T'
415 be,pt %xcc, 70f 415 be,pt %xcc, 70f
416 cmp %g2, 'M' 416 cmp %g2, 'M'
417 be,pt %xcc, 70f
418 cmp %g2, 'S'
417 bne,pn %xcc, 49f 419 bne,pn %xcc, 49f
418 nop 420 nop
419 421
@@ -433,6 +435,9 @@ sun4v_chip_type:
433 cmp %g2, '7' 435 cmp %g2, '7'
434 be,pt %xcc, 5f 436 be,pt %xcc, 5f
435 mov SUN4V_CHIP_SPARC_M7, %g4 437 mov SUN4V_CHIP_SPARC_M7, %g4
438 cmp %g2, 'N'
439 be,pt %xcc, 5f
440 mov SUN4V_CHIP_SPARC_SN, %g4
436 ba,pt %xcc, 49f 441 ba,pt %xcc, 49f
437 nop 442 nop
438 443
@@ -461,9 +466,8 @@ sun4v_chip_type:
461 subcc %g3, 1, %g3 466 subcc %g3, 1, %g3
462 bne,pt %xcc, 41b 467 bne,pt %xcc, 41b
463 add %g1, 1, %g1 468 add %g1, 1, %g1
464 mov SUN4V_CHIP_SPARC64X, %g4
465 ba,pt %xcc, 5f 469 ba,pt %xcc, 5f
466 nop 470 mov SUN4V_CHIP_SPARC64X, %g4
467 471
46849: 47249:
469 mov SUN4V_CHIP_UNKNOWN, %g4 473 mov SUN4V_CHIP_UNKNOWN, %g4
@@ -548,8 +552,7 @@ sun4u_init:
548 stxa %g0, [%g7] ASI_DMMU 552 stxa %g0, [%g7] ASI_DMMU
549 membar #Sync 553 membar #Sync
550 554
551 ba,pt %xcc, sun4u_continue 555 ba,a,pt %xcc, sun4u_continue
552 nop
553 556
554sun4v_init: 557sun4v_init:
555 /* Set ctx 0 */ 558 /* Set ctx 0 */
@@ -560,14 +563,12 @@ sun4v_init:
560 mov SECONDARY_CONTEXT, %g7 563 mov SECONDARY_CONTEXT, %g7
561 stxa %g0, [%g7] ASI_MMU 564 stxa %g0, [%g7] ASI_MMU
562 membar #Sync 565 membar #Sync
563 ba,pt %xcc, niagara_tlb_fixup 566 ba,a,pt %xcc, niagara_tlb_fixup
564 nop
565 567
566sun4u_continue: 568sun4u_continue:
567 BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup) 569 BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
568 570
569 ba,pt %xcc, spitfire_tlb_fixup 571 ba,a,pt %xcc, spitfire_tlb_fixup
570 nop
571 572
572niagara_tlb_fixup: 573niagara_tlb_fixup:
573 mov 3, %g2 /* Set TLB type to hypervisor. */ 574 mov 3, %g2 /* Set TLB type to hypervisor. */
@@ -597,6 +598,9 @@ niagara_tlb_fixup:
597 cmp %g1, SUN4V_CHIP_SPARC_M7 598 cmp %g1, SUN4V_CHIP_SPARC_M7
598 be,pt %xcc, niagara4_patch 599 be,pt %xcc, niagara4_patch
599 nop 600 nop
601 cmp %g1, SUN4V_CHIP_SPARC_SN
602 be,pt %xcc, niagara4_patch
603 nop
600 604
601 call generic_patch_copyops 605 call generic_patch_copyops
602 nop 606 nop
@@ -639,8 +643,7 @@ niagara_patch:
639 call hypervisor_patch_cachetlbops 643 call hypervisor_patch_cachetlbops
640 nop 644 nop
641 645
642 ba,pt %xcc, tlb_fixup_done 646 ba,a,pt %xcc, tlb_fixup_done
643 nop
644 647
645cheetah_tlb_fixup: 648cheetah_tlb_fixup:
646 mov 2, %g2 /* Set TLB type to cheetah+. */ 649 mov 2, %g2 /* Set TLB type to cheetah+. */
@@ -659,8 +662,7 @@ cheetah_tlb_fixup:
659 call cheetah_patch_cachetlbops 662 call cheetah_patch_cachetlbops
660 nop 663 nop
661 664
662 ba,pt %xcc, tlb_fixup_done 665 ba,a,pt %xcc, tlb_fixup_done
663 nop
664 666
665spitfire_tlb_fixup: 667spitfire_tlb_fixup:
666 /* Set TLB type to spitfire. */ 668 /* Set TLB type to spitfire. */
@@ -774,8 +776,7 @@ setup_trap_table:
774 call %o1 776 call %o1
775 add %sp, (2047 + 128), %o0 777 add %sp, (2047 + 128), %o0
776 778
777 ba,pt %xcc, 2f 779 ba,a,pt %xcc, 2f
778 nop
779 780
7801: sethi %hi(sparc64_ttable_tl0), %o0 7811: sethi %hi(sparc64_ttable_tl0), %o0
781 set prom_set_trap_table_name, %g2 782 set prom_set_trap_table_name, %g2
@@ -814,8 +815,7 @@ setup_trap_table:
814 815
815 BRANCH_IF_ANY_CHEETAH(o2, o3, 1f) 816 BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
816 817
817 ba,pt %xcc, 2f 818 ba,a,pt %xcc, 2f
818 nop
819 819
820 /* Disable STICK_INT interrupts. */ 820 /* Disable STICK_INT interrupts. */
8211: 8211:
diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S
index 753b4f031bfb..34b4933900bf 100644
--- a/arch/sparc/kernel/misctrap.S
+++ b/arch/sparc/kernel/misctrap.S
@@ -18,8 +18,7 @@ __do_privact:
18109: or %g7, %lo(109b), %g7 18109: or %g7, %lo(109b), %g7
19 call do_privact 19 call do_privact
20 add %sp, PTREGS_OFF, %o0 20 add %sp, PTREGS_OFF, %o0
21 ba,pt %xcc, rtrap 21 ba,a,pt %xcc, rtrap
22 nop
23 .size __do_privact,.-__do_privact 22 .size __do_privact,.-__do_privact
24 23
25 .type do_mna,#function 24 .type do_mna,#function
@@ -46,8 +45,7 @@ do_mna:
46 mov %l5, %o2 45 mov %l5, %o2
47 call mem_address_unaligned 46 call mem_address_unaligned
48 add %sp, PTREGS_OFF, %o0 47 add %sp, PTREGS_OFF, %o0
49 ba,pt %xcc, rtrap 48 ba,a,pt %xcc, rtrap
50 nop
51 .size do_mna,.-do_mna 49 .size do_mna,.-do_mna
52 50
53 .type do_lddfmna,#function 51 .type do_lddfmna,#function
@@ -65,8 +63,7 @@ do_lddfmna:
65 mov %l5, %o2 63 mov %l5, %o2
66 call handle_lddfmna 64 call handle_lddfmna
67 add %sp, PTREGS_OFF, %o0 65 add %sp, PTREGS_OFF, %o0
68 ba,pt %xcc, rtrap 66 ba,a,pt %xcc, rtrap
69 nop
70 .size do_lddfmna,.-do_lddfmna 67 .size do_lddfmna,.-do_lddfmna
71 68
72 .type do_stdfmna,#function 69 .type do_stdfmna,#function
@@ -84,8 +81,7 @@ do_stdfmna:
84 mov %l5, %o2 81 mov %l5, %o2
85 call handle_stdfmna 82 call handle_stdfmna
86 add %sp, PTREGS_OFF, %o0 83 add %sp, PTREGS_OFF, %o0
87 ba,pt %xcc, rtrap 84 ba,a,pt %xcc, rtrap
88 nop
89 .size do_stdfmna,.-do_stdfmna 85 .size do_stdfmna,.-do_stdfmna
90 86
91 .type breakpoint_trap,#function 87 .type breakpoint_trap,#function
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index badf0951d73c..c2b202d763a1 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -245,6 +245,18 @@ static void pci_parse_of_addrs(struct platform_device *op,
245 } 245 }
246} 246}
247 247
248static void pci_init_dev_archdata(struct dev_archdata *sd, void *iommu,
249 void *stc, void *host_controller,
250 struct platform_device *op,
251 int numa_node)
252{
253 sd->iommu = iommu;
254 sd->stc = stc;
255 sd->host_controller = host_controller;
256 sd->op = op;
257 sd->numa_node = numa_node;
258}
259
248static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, 260static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
249 struct device_node *node, 261 struct device_node *node,
250 struct pci_bus *bus, int devfn) 262 struct pci_bus *bus, int devfn)
@@ -259,13 +271,10 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
259 if (!dev) 271 if (!dev)
260 return NULL; 272 return NULL;
261 273
274 op = of_find_device_by_node(node);
262 sd = &dev->dev.archdata; 275 sd = &dev->dev.archdata;
263 sd->iommu = pbm->iommu; 276 pci_init_dev_archdata(sd, pbm->iommu, &pbm->stc, pbm, op,
264 sd->stc = &pbm->stc; 277 pbm->numa_node);
265 sd->host_controller = pbm;
266 sd->op = op = of_find_device_by_node(node);
267 sd->numa_node = pbm->numa_node;
268
269 sd = &op->dev.archdata; 278 sd = &op->dev.archdata;
270 sd->iommu = pbm->iommu; 279 sd->iommu = pbm->iommu;
271 sd->stc = &pbm->stc; 280 sd->stc = &pbm->stc;
@@ -994,6 +1003,27 @@ void pcibios_set_master(struct pci_dev *dev)
994 /* No special bus mastering setup handling */ 1003 /* No special bus mastering setup handling */
995} 1004}
996 1005
1006#ifdef CONFIG_PCI_IOV
1007int pcibios_add_device(struct pci_dev *dev)
1008{
1009 struct pci_dev *pdev;
1010
1011 /* Add sriov arch specific initialization here.
1012 * Copy dev_archdata from PF to VF
1013 */
1014 if (dev->is_virtfn) {
1015 struct dev_archdata *psd;
1016
1017 pdev = dev->physfn;
1018 psd = &pdev->dev.archdata;
1019 pci_init_dev_archdata(&dev->dev.archdata, psd->iommu,
1020 psd->stc, psd->host_controller, NULL,
1021 psd->numa_node);
1022 }
1023 return 0;
1024}
1025#endif /* CONFIG_PCI_IOV */
1026
997static int __init pcibios_init(void) 1027static int __init pcibios_init(void)
998{ 1028{
999 pci_dfl_cache_line_size = 64 >> 2; 1029 pci_dfl_cache_line_size = 64 >> 2;
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 26db95b54ee9..599f1207eed2 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -285,7 +285,8 @@ static void __init sun4v_patch(void)
285 285
286 sun4v_patch_2insn_range(&__sun4v_2insn_patch, 286 sun4v_patch_2insn_range(&__sun4v_2insn_patch,
287 &__sun4v_2insn_patch_end); 287 &__sun4v_2insn_patch_end);
288 if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7) 288 if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
289 sun4v_chip_type == SUN4V_CHIP_SPARC_SN)
289 sun_m7_patch_2insn_range(&__sun_m7_2insn_patch, 290 sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
290 &__sun_m7_2insn_patch_end); 291 &__sun_m7_2insn_patch_end);
291 292
@@ -524,6 +525,7 @@ static void __init init_sparc64_elf_hwcap(void)
524 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 525 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
525 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 526 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
526 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 527 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
528 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
527 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 529 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
528 cap |= HWCAP_SPARC_BLKINIT; 530 cap |= HWCAP_SPARC_BLKINIT;
529 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || 531 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
@@ -532,6 +534,7 @@ static void __init init_sparc64_elf_hwcap(void)
532 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 534 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
533 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 535 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
534 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 536 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
537 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
535 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 538 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
536 cap |= HWCAP_SPARC_N2; 539 cap |= HWCAP_SPARC_N2;
537 } 540 }
@@ -561,6 +564,7 @@ static void __init init_sparc64_elf_hwcap(void)
561 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 564 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
562 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 565 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
563 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 566 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
567 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
564 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 568 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
565 cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | 569 cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
566 AV_SPARC_ASI_BLK_INIT | 570 AV_SPARC_ASI_BLK_INIT |
@@ -570,6 +574,7 @@ static void __init init_sparc64_elf_hwcap(void)
570 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 574 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
571 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 575 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
572 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 576 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
577 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
573 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 578 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
574 cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | 579 cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
575 AV_SPARC_FMAF); 580 AV_SPARC_FMAF);
diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S
index c357e40ffd01..4a73009f66a5 100644
--- a/arch/sparc/kernel/spiterrs.S
+++ b/arch/sparc/kernel/spiterrs.S
@@ -85,8 +85,7 @@ __spitfire_cee_trap_continue:
85 ba,pt %xcc, etraptl1 85 ba,pt %xcc, etraptl1
86 rd %pc, %g7 86 rd %pc, %g7
87 87
88 ba,pt %xcc, 2f 88 ba,a,pt %xcc, 2f
89 nop
90 89
911: ba,pt %xcc, etrap_irq 901: ba,pt %xcc, etrap_irq
92 rd %pc, %g7 91 rd %pc, %g7
@@ -100,8 +99,7 @@ __spitfire_cee_trap_continue:
100 mov %l5, %o2 99 mov %l5, %o2
101 call spitfire_access_error 100 call spitfire_access_error
102 add %sp, PTREGS_OFF, %o0 101 add %sp, PTREGS_OFF, %o0
103 ba,pt %xcc, rtrap 102 ba,a,pt %xcc, rtrap
104 nop
105 .size __spitfire_access_error,.-__spitfire_access_error 103 .size __spitfire_access_error,.-__spitfire_access_error
106 104
107 /* This is the trap handler entry point for ECC correctable 105 /* This is the trap handler entry point for ECC correctable
@@ -179,8 +177,7 @@ __spitfire_data_access_exception_tl1:
179 mov %l5, %o2 177 mov %l5, %o2
180 call spitfire_data_access_exception_tl1 178 call spitfire_data_access_exception_tl1
181 add %sp, PTREGS_OFF, %o0 179 add %sp, PTREGS_OFF, %o0
182 ba,pt %xcc, rtrap 180 ba,a,pt %xcc, rtrap
183 nop
184 .size __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1 181 .size __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
185 182
186 .type __spitfire_data_access_exception,#function 183 .type __spitfire_data_access_exception,#function
@@ -200,8 +197,7 @@ __spitfire_data_access_exception:
200 mov %l5, %o2 197 mov %l5, %o2
201 call spitfire_data_access_exception 198 call spitfire_data_access_exception
202 add %sp, PTREGS_OFF, %o0 199 add %sp, PTREGS_OFF, %o0
203 ba,pt %xcc, rtrap 200 ba,a,pt %xcc, rtrap
204 nop
205 .size __spitfire_data_access_exception,.-__spitfire_data_access_exception 201 .size __spitfire_data_access_exception,.-__spitfire_data_access_exception
206 202
207 .type __spitfire_insn_access_exception_tl1,#function 203 .type __spitfire_insn_access_exception_tl1,#function
@@ -220,8 +216,7 @@ __spitfire_insn_access_exception_tl1:
220 mov %l5, %o2 216 mov %l5, %o2
221 call spitfire_insn_access_exception_tl1 217 call spitfire_insn_access_exception_tl1
222 add %sp, PTREGS_OFF, %o0 218 add %sp, PTREGS_OFF, %o0
223 ba,pt %xcc, rtrap 219 ba,a,pt %xcc, rtrap
224 nop
225 .size __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1 220 .size __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
226 221
227 .type __spitfire_insn_access_exception,#function 222 .type __spitfire_insn_access_exception,#function
@@ -240,6 +235,5 @@ __spitfire_insn_access_exception:
240 mov %l5, %o2 235 mov %l5, %o2
241 call spitfire_insn_access_exception 236 call spitfire_insn_access_exception
242 add %sp, PTREGS_OFF, %o0 237 add %sp, PTREGS_OFF, %o0
243 ba,pt %xcc, rtrap 238 ba,a,pt %xcc, rtrap
244 nop
245 .size __spitfire_insn_access_exception,.-__spitfire_insn_access_exception 239 .size __spitfire_insn_access_exception,.-__spitfire_insn_access_exception
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 6c3dd6c52f8b..eac7f0db5c8c 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -88,4 +88,4 @@ sys_call_table:
88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
89/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf 89/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
90/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen 90/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
91/*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range 91/*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 12b524cfcfa0..b0f17ff2ddba 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -89,7 +89,7 @@ sys_call_table32:
89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
90 .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf 90 .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
91/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen 91/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
92 .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range 92 .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2
93 93
94#endif /* CONFIG_COMPAT */ 94#endif /* CONFIG_COMPAT */
95 95
@@ -170,4 +170,4 @@ sys_call_table:
170/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 170/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
171 .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf 171 .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
172/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen 172/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
173 .word sys_setsockopt, sys_mlock2, sys_copy_file_range 173 .word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
diff --git a/arch/sparc/kernel/utrap.S b/arch/sparc/kernel/utrap.S
index b7f0f3f3a909..c731e8023d3e 100644
--- a/arch/sparc/kernel/utrap.S
+++ b/arch/sparc/kernel/utrap.S
@@ -11,8 +11,7 @@ utrap_trap: /* %g3=handler,%g4=level */
11 mov %l4, %o1 11 mov %l4, %o1
12 call bad_trap 12 call bad_trap
13 add %sp, PTREGS_OFF, %o0 13 add %sp, PTREGS_OFF, %o0
14 ba,pt %xcc, rtrap 14 ba,a,pt %xcc, rtrap
15 nop
16 15
17invoke_utrap: 16invoke_utrap:
18 sllx %g3, 3, %g3 17 sllx %g3, 3, %g3
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index cb5789c9f961..f6bb857254fc 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -45,6 +45,14 @@ static const struct vio_device_id *vio_match_device(
45 return NULL; 45 return NULL;
46} 46}
47 47
48static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
49{
50 const struct vio_dev *vio_dev = to_vio_dev(dev);
51
52 add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, vio_dev->compat);
53 return 0;
54}
55
48static int vio_bus_match(struct device *dev, struct device_driver *drv) 56static int vio_bus_match(struct device *dev, struct device_driver *drv)
49{ 57{
50 struct vio_dev *vio_dev = to_vio_dev(dev); 58 struct vio_dev *vio_dev = to_vio_dev(dev);
@@ -105,15 +113,25 @@ static ssize_t type_show(struct device *dev,
105 return sprintf(buf, "%s\n", vdev->type); 113 return sprintf(buf, "%s\n", vdev->type);
106} 114}
107 115
116static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
117 char *buf)
118{
119 const struct vio_dev *vdev = to_vio_dev(dev);
120
121 return sprintf(buf, "vio:T%sS%s\n", vdev->type, vdev->compat);
122}
123
108static struct device_attribute vio_dev_attrs[] = { 124static struct device_attribute vio_dev_attrs[] = {
109 __ATTR_RO(devspec), 125 __ATTR_RO(devspec),
110 __ATTR_RO(type), 126 __ATTR_RO(type),
127 __ATTR_RO(modalias),
111 __ATTR_NULL 128 __ATTR_NULL
112}; 129};
113 130
114static struct bus_type vio_bus_type = { 131static struct bus_type vio_bus_type = {
115 .name = "vio", 132 .name = "vio",
116 .dev_attrs = vio_dev_attrs, 133 .dev_attrs = vio_dev_attrs,
134 .uevent = vio_hotplug,
117 .match = vio_bus_match, 135 .match = vio_bus_match,
118 .probe = vio_device_probe, 136 .probe = vio_device_probe,
119 .remove = vio_device_remove, 137 .remove = vio_device_remove,
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index aadd321aa05d..7d02b1fef025 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -33,6 +33,10 @@ ENTRY(_start)
33jiffies = jiffies_64; 33jiffies = jiffies_64;
34#endif 34#endif
35 35
36#ifdef CONFIG_SPARC64
37ASSERT((swapper_tsb == 0x0000000000408000), "Error: sparc64 early assembler too large")
38#endif
39
36SECTIONS 40SECTIONS
37{ 41{
38#ifdef CONFIG_SPARC64 42#ifdef CONFIG_SPARC64
diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S
index 1e67ce958369..855019a8590e 100644
--- a/arch/sparc/kernel/winfixup.S
+++ b/arch/sparc/kernel/winfixup.S
@@ -32,8 +32,7 @@ fill_fixup:
32 rd %pc, %g7 32 rd %pc, %g7
33 call do_sparc64_fault 33 call do_sparc64_fault
34 add %sp, PTREGS_OFF, %o0 34 add %sp, PTREGS_OFF, %o0
35 ba,pt %xcc, rtrap 35 ba,a,pt %xcc, rtrap
36 nop
37 36
38 /* Be very careful about usage of the trap globals here. 37 /* Be very careful about usage of the trap globals here.
39 * You cannot touch %g5 as that has the fault information. 38 * You cannot touch %g5 as that has the fault information.
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 1cfe6aab7a11..09e838801e39 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1769,6 +1769,7 @@ static void __init setup_page_offset(void)
1769 max_phys_bits = 47; 1769 max_phys_bits = 47;
1770 break; 1770 break;
1771 case SUN4V_CHIP_SPARC_M7: 1771 case SUN4V_CHIP_SPARC_M7:
1772 case SUN4V_CHIP_SPARC_SN:
1772 default: 1773 default:
1773 /* M7 and later support 52-bit virtual addresses. */ 1774 /* M7 and later support 52-bit virtual addresses. */
1774 sparc64_va_hole_top = 0xfff8000000000000UL; 1775 sparc64_va_hole_top = 0xfff8000000000000UL;
@@ -1986,6 +1987,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
1986 */ 1987 */
1987 switch (sun4v_chip_type) { 1988 switch (sun4v_chip_type) {
1988 case SUN4V_CHIP_SPARC_M7: 1989 case SUN4V_CHIP_SPARC_M7:
1990 case SUN4V_CHIP_SPARC_SN:
1989 pagecv_flag = 0x00; 1991 pagecv_flag = 0x00;
1990 break; 1992 break;
1991 default: 1993 default:
@@ -2138,6 +2140,7 @@ void __init paging_init(void)
2138 */ 2140 */
2139 switch (sun4v_chip_type) { 2141 switch (sun4v_chip_type) {
2140 case SUN4V_CHIP_SPARC_M7: 2142 case SUN4V_CHIP_SPARC_M7:
2143 case SUN4V_CHIP_SPARC_SN:
2141 page_cache4v_flag = _PAGE_CP_4V; 2144 page_cache4v_flag = _PAGE_CP_4V;
2142 break; 2145 break;
2143 default: 2146 default:
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 86a9bec18dab..bd3e8421b57c 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -115,7 +115,7 @@ static __initconst const u64 amd_hw_cache_event_ids
115/* 115/*
116 * AMD Performance Monitor K7 and later. 116 * AMD Performance Monitor K7 and later.
117 */ 117 */
118static const u64 amd_perfmon_event_map[] = 118static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
119{ 119{
120 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, 120 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
121 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 121 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 68fa55b4d42e..aff79884e17d 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3639,6 +3639,7 @@ __init int intel_pmu_init(void)
3639 3639
3640 case 78: /* 14nm Skylake Mobile */ 3640 case 78: /* 14nm Skylake Mobile */
3641 case 94: /* 14nm Skylake Desktop */ 3641 case 94: /* 14nm Skylake Desktop */
3642 case 85: /* 14nm Skylake Server */
3642 x86_pmu.late_ack = true; 3643 x86_pmu.late_ack = true;
3643 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 3644 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
3644 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 3645 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 6c3b7c1780c9..1ca5d1e7d4f2 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -63,7 +63,7 @@ static enum {
63 63
64#define LBR_PLM (LBR_KERNEL | LBR_USER) 64#define LBR_PLM (LBR_KERNEL | LBR_USER)
65 65
66#define LBR_SEL_MASK 0x1ff /* valid bits in LBR_SELECT */ 66#define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
67#define LBR_NOT_SUPP -1 /* LBR filter not supported */ 67#define LBR_NOT_SUPP -1 /* LBR filter not supported */
68#define LBR_IGN 0 /* ignored */ 68#define LBR_IGN 0 /* ignored */
69 69
@@ -610,8 +610,10 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
610 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate 610 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
611 * in suppress mode. So LBR_SELECT should be set to 611 * in suppress mode. So LBR_SELECT should be set to
612 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK) 612 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
613 * But the 10th bit LBR_CALL_STACK does not operate
614 * in suppress mode.
613 */ 615 */
614 reg->config = mask ^ x86_pmu.lbr_sel_mask; 616 reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
615 617
616 if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) && 618 if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
617 (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) && 619 (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index 6af7cf71d6b2..09a77dbc73c9 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -136,9 +136,21 @@ static int __init pt_pmu_hw_init(void)
136 struct dev_ext_attribute *de_attrs; 136 struct dev_ext_attribute *de_attrs;
137 struct attribute **attrs; 137 struct attribute **attrs;
138 size_t size; 138 size_t size;
139 u64 reg;
139 int ret; 140 int ret;
140 long i; 141 long i;
141 142
143 if (boot_cpu_has(X86_FEATURE_VMX)) {
144 /*
145 * Intel SDM, 36.5 "Tracing post-VMXON" says that
146 * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace
147 * post-VMXON.
148 */
149 rdmsrl(MSR_IA32_VMX_MISC, reg);
150 if (reg & BIT(14))
151 pt_pmu.vmx = true;
152 }
153
142 attrs = NULL; 154 attrs = NULL;
143 155
144 for (i = 0; i < PT_CPUID_LEAVES; i++) { 156 for (i = 0; i < PT_CPUID_LEAVES; i++) {
@@ -269,20 +281,23 @@ static void pt_config(struct perf_event *event)
269 281
270 reg |= (event->attr.config & PT_CONFIG_MASK); 282 reg |= (event->attr.config & PT_CONFIG_MASK);
271 283
284 event->hw.config = reg;
272 wrmsrl(MSR_IA32_RTIT_CTL, reg); 285 wrmsrl(MSR_IA32_RTIT_CTL, reg);
273} 286}
274 287
275static void pt_config_start(bool start) 288static void pt_config_stop(struct perf_event *event)
276{ 289{
277 u64 ctl; 290 u64 ctl = READ_ONCE(event->hw.config);
291
292 /* may be already stopped by a PMI */
293 if (!(ctl & RTIT_CTL_TRACEEN))
294 return;
278 295
279 rdmsrl(MSR_IA32_RTIT_CTL, ctl); 296 ctl &= ~RTIT_CTL_TRACEEN;
280 if (start)
281 ctl |= RTIT_CTL_TRACEEN;
282 else
283 ctl &= ~RTIT_CTL_TRACEEN;
284 wrmsrl(MSR_IA32_RTIT_CTL, ctl); 297 wrmsrl(MSR_IA32_RTIT_CTL, ctl);
285 298
299 WRITE_ONCE(event->hw.config, ctl);
300
286 /* 301 /*
287 * A wrmsr that disables trace generation serializes other PT 302 * A wrmsr that disables trace generation serializes other PT
288 * registers and causes all data packets to be written to memory, 303 * registers and causes all data packets to be written to memory,
@@ -291,8 +306,7 @@ static void pt_config_start(bool start)
291 * The below WMB, separating data store and aux_head store matches 306 * The below WMB, separating data store and aux_head store matches
292 * the consumer's RMB that separates aux_head load and data load. 307 * the consumer's RMB that separates aux_head load and data load.
293 */ 308 */
294 if (!start) 309 wmb();
295 wmb();
296} 310}
297 311
298static void pt_config_buffer(void *buf, unsigned int topa_idx, 312static void pt_config_buffer(void *buf, unsigned int topa_idx,
@@ -942,11 +956,17 @@ void intel_pt_interrupt(void)
942 if (!ACCESS_ONCE(pt->handle_nmi)) 956 if (!ACCESS_ONCE(pt->handle_nmi))
943 return; 957 return;
944 958
945 pt_config_start(false); 959 /*
960 * If VMX is on and PT does not support it, don't touch anything.
961 */
962 if (READ_ONCE(pt->vmx_on))
963 return;
946 964
947 if (!event) 965 if (!event)
948 return; 966 return;
949 967
968 pt_config_stop(event);
969
950 buf = perf_get_aux(&pt->handle); 970 buf = perf_get_aux(&pt->handle);
951 if (!buf) 971 if (!buf)
952 return; 972 return;
@@ -983,6 +1003,35 @@ void intel_pt_interrupt(void)
983 } 1003 }
984} 1004}
985 1005
1006void intel_pt_handle_vmx(int on)
1007{
1008 struct pt *pt = this_cpu_ptr(&pt_ctx);
1009 struct perf_event *event;
1010 unsigned long flags;
1011
1012 /* PT plays nice with VMX, do nothing */
1013 if (pt_pmu.vmx)
1014 return;
1015
1016 /*
1017 * VMXON will clear RTIT_CTL.TraceEn; we need to make
1018 * sure to not try to set it while VMX is on. Disable
1019 * interrupts to avoid racing with pmu callbacks;
1020 * concurrent PMI should be handled fine.
1021 */
1022 local_irq_save(flags);
1023 WRITE_ONCE(pt->vmx_on, on);
1024
1025 if (on) {
1026 /* prevent pt_config_stop() from writing RTIT_CTL */
1027 event = pt->handle.event;
1028 if (event)
1029 event->hw.config = 0;
1030 }
1031 local_irq_restore(flags);
1032}
1033EXPORT_SYMBOL_GPL(intel_pt_handle_vmx);
1034
986/* 1035/*
987 * PMU callbacks 1036 * PMU callbacks
988 */ 1037 */
@@ -992,6 +1041,9 @@ static void pt_event_start(struct perf_event *event, int mode)
992 struct pt *pt = this_cpu_ptr(&pt_ctx); 1041 struct pt *pt = this_cpu_ptr(&pt_ctx);
993 struct pt_buffer *buf = perf_get_aux(&pt->handle); 1042 struct pt_buffer *buf = perf_get_aux(&pt->handle);
994 1043
1044 if (READ_ONCE(pt->vmx_on))
1045 return;
1046
995 if (!buf || pt_buffer_is_full(buf, pt)) { 1047 if (!buf || pt_buffer_is_full(buf, pt)) {
996 event->hw.state = PERF_HES_STOPPED; 1048 event->hw.state = PERF_HES_STOPPED;
997 return; 1049 return;
@@ -1014,7 +1066,8 @@ static void pt_event_stop(struct perf_event *event, int mode)
1014 * see comment in intel_pt_interrupt(). 1066 * see comment in intel_pt_interrupt().
1015 */ 1067 */
1016 ACCESS_ONCE(pt->handle_nmi) = 0; 1068 ACCESS_ONCE(pt->handle_nmi) = 0;
1017 pt_config_start(false); 1069
1070 pt_config_stop(event);
1018 1071
1019 if (event->hw.state == PERF_HES_STOPPED) 1072 if (event->hw.state == PERF_HES_STOPPED)
1020 return; 1073 return;
diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h
index 336878a5d205..3abb5f5cccc8 100644
--- a/arch/x86/events/intel/pt.h
+++ b/arch/x86/events/intel/pt.h
@@ -65,6 +65,7 @@ enum pt_capabilities {
65struct pt_pmu { 65struct pt_pmu {
66 struct pmu pmu; 66 struct pmu pmu;
67 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; 67 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
68 bool vmx;
68}; 69};
69 70
70/** 71/**
@@ -107,10 +108,12 @@ struct pt_buffer {
107 * struct pt - per-cpu pt context 108 * struct pt - per-cpu pt context
108 * @handle: perf output handle 109 * @handle: perf output handle
109 * @handle_nmi: do handle PT PMI on this cpu, there's an active event 110 * @handle_nmi: do handle PT PMI on this cpu, there's an active event
111 * @vmx_on: 1 if VMX is ON on this cpu
110 */ 112 */
111struct pt { 113struct pt {
112 struct perf_output_handle handle; 114 struct perf_output_handle handle;
113 int handle_nmi; 115 int handle_nmi;
116 int vmx_on;
114}; 117};
115 118
116#endif /* __INTEL_PT_H__ */ 119#endif /* __INTEL_PT_H__ */
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 70c93f9b03ac..1705c9d75e44 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -718,6 +718,7 @@ static int __init rapl_pmu_init(void)
718 break; 718 break;
719 case 60: /* Haswell */ 719 case 60: /* Haswell */
720 case 69: /* Haswell-Celeron */ 720 case 69: /* Haswell-Celeron */
721 case 70: /* Haswell GT3e */
721 case 61: /* Broadwell */ 722 case 61: /* Broadwell */
722 case 71: /* Broadwell-H */ 723 case 71: /* Broadwell-H */
723 rapl_cntr_mask = RAPL_IDX_HSW; 724 rapl_cntr_mask = RAPL_IDX_HSW;
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 5a2ed3ed2f26..f353061bba1d 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -285,6 +285,10 @@ static inline void perf_events_lapic_init(void) { }
285static inline void perf_check_microcode(void) { } 285static inline void perf_check_microcode(void) { }
286#endif 286#endif
287 287
288#ifdef CONFIG_CPU_SUP_INTEL
289 extern void intel_pt_handle_vmx(int on);
290#endif
291
288#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) 292#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
289 extern void amd_pmu_enable_virt(void); 293 extern void amd_pmu_enable_virt(void);
290 extern void amd_pmu_disable_virt(void); 294 extern void amd_pmu_disable_virt(void);
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index ad59d70bcb1a..ef495511f019 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -256,7 +256,8 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
256 struct irq_desc *desc; 256 struct irq_desc *desc;
257 int cpu, vector; 257 int cpu, vector;
258 258
259 BUG_ON(!data->cfg.vector); 259 if (!data->cfg.vector)
260 return;
260 261
261 vector = data->cfg.vector; 262 vector = data->cfg.vector;
262 for_each_cpu_and(cpu, data->domain, cpu_online_mask) 263 for_each_cpu_and(cpu, data->domain, cpu_online_mask)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 54cdbd2003fe..af1112980dd4 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -389,12 +389,6 @@ default_entry:
389 /* Make changes effective */ 389 /* Make changes effective */
390 wrmsr 390 wrmsr
391 391
392 /*
393 * And make sure that all the mappings we set up have NX set from
394 * the beginning.
395 */
396 orl $(1 << (_PAGE_BIT_NX - 32)), pa(__supported_pte_mask + 4)
397
398enable_paging: 392enable_paging:
399 393
400/* 394/*
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ee1c8a93871c..133679d520af 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3103,6 +3103,8 @@ static __init int vmx_disabled_by_bios(void)
3103 3103
3104static void kvm_cpu_vmxon(u64 addr) 3104static void kvm_cpu_vmxon(u64 addr)
3105{ 3105{
3106 intel_pt_handle_vmx(1);
3107
3106 asm volatile (ASM_VMX_VMXON_RAX 3108 asm volatile (ASM_VMX_VMXON_RAX
3107 : : "a"(&addr), "m"(addr) 3109 : : "a"(&addr), "m"(addr)
3108 : "memory", "cc"); 3110 : "memory", "cc");
@@ -3172,6 +3174,8 @@ static void vmclear_local_loaded_vmcss(void)
3172static void kvm_cpu_vmxoff(void) 3174static void kvm_cpu_vmxoff(void)
3173{ 3175{
3174 asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); 3176 asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
3177
3178 intel_pt_handle_vmx(0);
3175} 3179}
3176 3180
3177static void hardware_disable(void) 3181static void hardware_disable(void)
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
index 8bea84724a7d..f65a33f505b6 100644
--- a/arch/x86/mm/setup_nx.c
+++ b/arch/x86/mm/setup_nx.c
@@ -32,8 +32,9 @@ early_param("noexec", noexec_setup);
32 32
33void x86_configure_nx(void) 33void x86_configure_nx(void)
34{ 34{
35 /* If disable_nx is set, clear NX on all new mappings going forward. */ 35 if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
36 if (disable_nx) 36 __supported_pte_mask |= _PAGE_NX;
37 else
37 __supported_pte_mask &= ~_PAGE_NX; 38 __supported_pte_mask &= ~_PAGE_NX;
38} 39}
39 40
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 9e2ba5c6e1dd..f42e78de1e10 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -27,6 +27,12 @@ static bool xen_pvspin = true;
27 27
28static void xen_qlock_kick(int cpu) 28static void xen_qlock_kick(int cpu)
29{ 29{
30 int irq = per_cpu(lock_kicker_irq, cpu);
31
32 /* Don't kick if the target's kicker interrupt is not initialized. */
33 if (irq == -1)
34 return;
35
30 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); 36 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
31} 37}
32 38
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 94a1843b0426..0ede6d7e2568 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -538,7 +538,6 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
538 u8 *order, u64 *snap_size); 538 u8 *order, u64 *snap_size);
539static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, 539static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
540 u64 *snap_features); 540 u64 *snap_features);
541static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
542 541
543static int rbd_open(struct block_device *bdev, fmode_t mode) 542static int rbd_open(struct block_device *bdev, fmode_t mode)
544{ 543{
@@ -3127,9 +3126,6 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
3127 struct rbd_device *rbd_dev = (struct rbd_device *)data; 3126 struct rbd_device *rbd_dev = (struct rbd_device *)data;
3128 int ret; 3127 int ret;
3129 3128
3130 if (!rbd_dev)
3131 return;
3132
3133 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, 3129 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
3134 rbd_dev->header_name, (unsigned long long)notify_id, 3130 rbd_dev->header_name, (unsigned long long)notify_id,
3135 (unsigned int)opcode); 3131 (unsigned int)opcode);
@@ -3263,6 +3259,9 @@ static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3263 3259
3264 ceph_osdc_cancel_event(rbd_dev->watch_event); 3260 ceph_osdc_cancel_event(rbd_dev->watch_event);
3265 rbd_dev->watch_event = NULL; 3261 rbd_dev->watch_event = NULL;
3262
3263 dout("%s flushing notifies\n", __func__);
3264 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3266} 3265}
3267 3266
3268/* 3267/*
@@ -3642,21 +3641,14 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
3642static void rbd_dev_update_size(struct rbd_device *rbd_dev) 3641static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3643{ 3642{
3644 sector_t size; 3643 sector_t size;
3645 bool removing;
3646 3644
3647 /* 3645 /*
3648 * Don't hold the lock while doing disk operations, 3646 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
3649 * or lock ordering will conflict with the bdev mutex via: 3647 * try to update its size. If REMOVING is set, updating size
3650 * rbd_add() -> blkdev_get() -> rbd_open() 3648 * is just useless work since the device can't be opened.
3651 */ 3649 */
3652 spin_lock_irq(&rbd_dev->lock); 3650 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
3653 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags); 3651 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
3654 spin_unlock_irq(&rbd_dev->lock);
3655 /*
3656 * If the device is being removed, rbd_dev->disk has
3657 * been destroyed, so don't try to update its size
3658 */
3659 if (!removing) {
3660 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; 3652 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3661 dout("setting size to %llu sectors", (unsigned long long)size); 3653 dout("setting size to %llu sectors", (unsigned long long)size);
3662 set_capacity(rbd_dev->disk, size); 3654 set_capacity(rbd_dev->disk, size);
@@ -4191,7 +4183,7 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4191 __le64 features; 4183 __le64 features;
4192 __le64 incompat; 4184 __le64 incompat;
4193 } __attribute__ ((packed)) features_buf = { 0 }; 4185 } __attribute__ ((packed)) features_buf = { 0 };
4194 u64 incompat; 4186 u64 unsup;
4195 int ret; 4187 int ret;
4196 4188
4197 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, 4189 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
@@ -4204,9 +4196,12 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4204 if (ret < sizeof (features_buf)) 4196 if (ret < sizeof (features_buf))
4205 return -ERANGE; 4197 return -ERANGE;
4206 4198
4207 incompat = le64_to_cpu(features_buf.incompat); 4199 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4208 if (incompat & ~RBD_FEATURES_SUPPORTED) 4200 if (unsup) {
4201 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4202 unsup);
4209 return -ENXIO; 4203 return -ENXIO;
4204 }
4210 4205
4211 *snap_features = le64_to_cpu(features_buf.features); 4206 *snap_features = le64_to_cpu(features_buf.features);
4212 4207
@@ -5187,6 +5182,10 @@ out_err:
5187 return ret; 5182 return ret;
5188} 5183}
5189 5184
5185/*
5186 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5187 * upon return.
5188 */
5190static int rbd_dev_device_setup(struct rbd_device *rbd_dev) 5189static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5191{ 5190{
5192 int ret; 5191 int ret;
@@ -5195,7 +5194,7 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5195 5194
5196 ret = rbd_dev_id_get(rbd_dev); 5195 ret = rbd_dev_id_get(rbd_dev);
5197 if (ret) 5196 if (ret)
5198 return ret; 5197 goto err_out_unlock;
5199 5198
5200 BUILD_BUG_ON(DEV_NAME_LEN 5199 BUILD_BUG_ON(DEV_NAME_LEN
5201 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH); 5200 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
@@ -5236,8 +5235,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5236 /* Everything's ready. Announce the disk to the world. */ 5235 /* Everything's ready. Announce the disk to the world. */
5237 5236
5238 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 5237 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5239 add_disk(rbd_dev->disk); 5238 up_write(&rbd_dev->header_rwsem);
5240 5239
5240 add_disk(rbd_dev->disk);
5241 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name, 5241 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5242 (unsigned long long) rbd_dev->mapping.size); 5242 (unsigned long long) rbd_dev->mapping.size);
5243 5243
@@ -5252,6 +5252,8 @@ err_out_blkdev:
5252 unregister_blkdev(rbd_dev->major, rbd_dev->name); 5252 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5253err_out_id: 5253err_out_id:
5254 rbd_dev_id_put(rbd_dev); 5254 rbd_dev_id_put(rbd_dev);
5255err_out_unlock:
5256 up_write(&rbd_dev->header_rwsem);
5255 return ret; 5257 return ret;
5256} 5258}
5257 5259
@@ -5442,6 +5444,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
5442 spec = NULL; /* rbd_dev now owns this */ 5444 spec = NULL; /* rbd_dev now owns this */
5443 rbd_opts = NULL; /* rbd_dev now owns this */ 5445 rbd_opts = NULL; /* rbd_dev now owns this */
5444 5446
5447 down_write(&rbd_dev->header_rwsem);
5445 rc = rbd_dev_image_probe(rbd_dev, 0); 5448 rc = rbd_dev_image_probe(rbd_dev, 0);
5446 if (rc < 0) 5449 if (rc < 0)
5447 goto err_out_rbd_dev; 5450 goto err_out_rbd_dev;
@@ -5471,6 +5474,7 @@ out:
5471 return rc; 5474 return rc;
5472 5475
5473err_out_rbd_dev: 5476err_out_rbd_dev:
5477 up_write(&rbd_dev->header_rwsem);
5474 rbd_dev_destroy(rbd_dev); 5478 rbd_dev_destroy(rbd_dev);
5475err_out_client: 5479err_out_client:
5476 rbd_put_client(rbdc); 5480 rbd_put_client(rbdc);
@@ -5577,12 +5581,6 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
5577 return ret; 5581 return ret;
5578 5582
5579 rbd_dev_header_unwatch_sync(rbd_dev); 5583 rbd_dev_header_unwatch_sync(rbd_dev);
5580 /*
5581 * flush remaining watch callbacks - these must be complete
5582 * before the osd_client is shutdown
5583 */
5584 dout("%s: flushing notifies", __func__);
5585 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5586 5584
5587 /* 5585 /*
5588 * Don't free anything from rbd_dev->disk until after all 5586 * Don't free anything from rbd_dev->disk until after all
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index 02e18182fcb5..2beb396fe652 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -394,7 +394,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
394 clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1", 2, 7); 394 clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1", 2, 7);
395 } else { 395 } else {
396 clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6); 396 clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6);
397 clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60", base + 0x20, 2, 6); 397 clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6);
398 clk[IMX6QDL_CLK_IPG_PER] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup); 398 clk[IMX6QDL_CLK_IPG_PER] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup);
399 clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6); 399 clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6);
400 clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); 400 clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 10a5cfeae8c5..5f1147fa9239 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -193,12 +193,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
193 wall_time = cur_wall_time - j_cdbs->prev_cpu_wall; 193 wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
194 j_cdbs->prev_cpu_wall = cur_wall_time; 194 j_cdbs->prev_cpu_wall = cur_wall_time;
195 195
196 if (cur_idle_time <= j_cdbs->prev_cpu_idle) { 196 idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
197 idle_time = 0; 197 j_cdbs->prev_cpu_idle = cur_idle_time;
198 } else {
199 idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
200 j_cdbs->prev_cpu_idle = cur_idle_time;
201 }
202 198
203 if (ignore_nice) { 199 if (ignore_nice) {
204 u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 200 u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 30fe323c4551..f502d5b90c25 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -813,6 +813,11 @@ static int core_get_max_pstate(void)
813 if (err) 813 if (err)
814 goto skip_tar; 814 goto skip_tar;
815 815
816 /* For level 1 and 2, bits[23:16] contain the ratio */
817 if (tdp_ctrl)
818 tdp_ratio >>= 16;
819
820 tdp_ratio &= 0xff; /* ratios are only 8 bits long */
816 if (tdp_ratio - 1 == tar) { 821 if (tdp_ratio - 1 == tar) {
817 max_pstate = tar; 822 max_pstate = tar;
818 pr_debug("max_pstate=TAC %x\n", max_pstate); 823 pr_debug("max_pstate=TAC %x\n", max_pstate);
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 01087a38da22..792bdae2b91d 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1866 1866
1867 i7_dev = get_i7core_dev(mce->socketid); 1867 i7_dev = get_i7core_dev(mce->socketid);
1868 if (!i7_dev) 1868 if (!i7_dev)
1869 return NOTIFY_BAD; 1869 return NOTIFY_DONE;
1870 1870
1871 mci = i7_dev->mci; 1871 mci = i7_dev->mci;
1872 pvt = mci->pvt_info; 1872 pvt = mci->pvt_info;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 468447aff8eb..8bf745d2da7e 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -3168,7 +3168,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
3168 3168
3169 mci = get_mci_for_node_id(mce->socketid); 3169 mci = get_mci_for_node_id(mce->socketid);
3170 if (!mci) 3170 if (!mci)
3171 return NOTIFY_BAD; 3171 return NOTIFY_DONE;
3172 pvt = mci->pvt_info; 3172 pvt = mci->pvt_info;
3173 3173
3174 /* 3174 /*
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 0ac594c0a234..34b741940494 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
202 { NULL_GUID, "", NULL }, 202 { NULL_GUID, "", NULL },
203}; 203};
204 204
205/*
206 * Check if @var_name matches the pattern given in @match_name.
207 *
208 * @var_name: an array of @len non-NUL characters.
209 * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
210 * final "*" character matches any trailing characters @var_name,
211 * including the case when there are none left in @var_name.
212 * @match: on output, the number of non-wildcard characters in @match_name
213 * that @var_name matches, regardless of the return value.
214 * @return: whether @var_name fully matches @match_name.
215 */
205static bool 216static bool
206variable_matches(const char *var_name, size_t len, const char *match_name, 217variable_matches(const char *var_name, size_t len, const char *match_name,
207 int *match) 218 int *match)
208{ 219{
209 for (*match = 0; ; (*match)++) { 220 for (*match = 0; ; (*match)++) {
210 char c = match_name[*match]; 221 char c = match_name[*match];
211 char u = var_name[*match];
212 222
213 /* Wildcard in the matching name means we've matched */ 223 switch (c) {
214 if (c == '*') 224 case '*':
225 /* Wildcard in @match_name means we've matched. */
215 return true; 226 return true;
216 227
217 /* Case sensitive match */ 228 case '\0':
218 if (!c && *match == len) 229 /* @match_name has ended. Has @var_name too? */
219 return true; 230 return (*match == len);
220 231
221 if (c != u) 232 default:
233 /*
234 * We've reached a non-wildcard char in @match_name.
235 * Continue only if there's an identical character in
236 * @var_name.
237 */
238 if (*match < len && c == var_name[*match])
239 continue;
222 return false; 240 return false;
223 241 }
224 if (!c)
225 return true;
226 } 242 }
227 return true;
228} 243}
229 244
230bool 245bool
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index d9ab0cd1d205..4d9a315cfd43 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -196,44 +196,6 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
196 return 0; 196 return 0;
197} 197}
198 198
199static void gpio_rcar_irq_bus_lock(struct irq_data *d)
200{
201 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
202 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
203
204 pm_runtime_get_sync(&p->pdev->dev);
205}
206
207static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
208{
209 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
210 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
211
212 pm_runtime_put(&p->pdev->dev);
213}
214
215
216static int gpio_rcar_irq_request_resources(struct irq_data *d)
217{
218 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
219 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
220 int error;
221
222 error = pm_runtime_get_sync(&p->pdev->dev);
223 if (error < 0)
224 return error;
225
226 return 0;
227}
228
229static void gpio_rcar_irq_release_resources(struct irq_data *d)
230{
231 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
232 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
233
234 pm_runtime_put(&p->pdev->dev);
235}
236
237static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id) 199static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
238{ 200{
239 struct gpio_rcar_priv *p = dev_id; 201 struct gpio_rcar_priv *p = dev_id;
@@ -280,32 +242,18 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
280 242
281static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset) 243static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
282{ 244{
283 struct gpio_rcar_priv *p = gpiochip_get_data(chip); 245 return pinctrl_request_gpio(chip->base + offset);
284 int error;
285
286 error = pm_runtime_get_sync(&p->pdev->dev);
287 if (error < 0)
288 return error;
289
290 error = pinctrl_request_gpio(chip->base + offset);
291 if (error)
292 pm_runtime_put(&p->pdev->dev);
293
294 return error;
295} 246}
296 247
297static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset) 248static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
298{ 249{
299 struct gpio_rcar_priv *p = gpiochip_get_data(chip);
300
301 pinctrl_free_gpio(chip->base + offset); 250 pinctrl_free_gpio(chip->base + offset);
302 251
303 /* Set the GPIO as an input to ensure that the next GPIO request won't 252 /*
253 * Set the GPIO as an input to ensure that the next GPIO request won't
304 * drive the GPIO pin as an output. 254 * drive the GPIO pin as an output.
305 */ 255 */
306 gpio_rcar_config_general_input_output_mode(chip, offset, false); 256 gpio_rcar_config_general_input_output_mode(chip, offset, false);
307
308 pm_runtime_put(&p->pdev->dev);
309} 257}
310 258
311static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset) 259static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -452,6 +400,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
452 } 400 }
453 401
454 pm_runtime_enable(dev); 402 pm_runtime_enable(dev);
403 pm_runtime_get_sync(dev);
455 404
456 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 405 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
457 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 406 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -488,10 +437,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
488 irq_chip->irq_unmask = gpio_rcar_irq_enable; 437 irq_chip->irq_unmask = gpio_rcar_irq_enable;
489 irq_chip->irq_set_type = gpio_rcar_irq_set_type; 438 irq_chip->irq_set_type = gpio_rcar_irq_set_type;
490 irq_chip->irq_set_wake = gpio_rcar_irq_set_wake; 439 irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
491 irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
492 irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
493 irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
494 irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
495 irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND; 440 irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
496 441
497 ret = gpiochip_add_data(gpio_chip, p); 442 ret = gpiochip_add_data(gpio_chip, p);
@@ -522,6 +467,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
522err1: 467err1:
523 gpiochip_remove(gpio_chip); 468 gpiochip_remove(gpio_chip);
524err0: 469err0:
470 pm_runtime_put(dev);
525 pm_runtime_disable(dev); 471 pm_runtime_disable(dev);
526 return ret; 472 return ret;
527} 473}
@@ -532,6 +478,7 @@ static int gpio_rcar_remove(struct platform_device *pdev)
532 478
533 gpiochip_remove(&p->gpio_chip); 479 gpiochip_remove(&p->gpio_chip);
534 480
481 pm_runtime_put(&pdev->dev);
535 pm_runtime_disable(&pdev->dev); 482 pm_runtime_disable(&pdev->dev);
536 return 0; 483 return 0;
537} 484}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 682070d20f00..2dc52585e3f2 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -977,7 +977,7 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
977 lookup = kmalloc(sizeof(*lookup), GFP_KERNEL); 977 lookup = kmalloc(sizeof(*lookup), GFP_KERNEL);
978 if (lookup) { 978 if (lookup) {
979 lookup->adev = adev; 979 lookup->adev = adev;
980 lookup->con_id = con_id; 980 lookup->con_id = kstrdup(con_id, GFP_KERNEL);
981 list_add_tail(&lookup->node, &acpi_crs_lookup_list); 981 list_add_tail(&lookup->node, &acpi_crs_lookup_list);
982 } 982 }
983 } 983 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 0020a0ea43ff..35a1248aaa77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -63,10 +63,6 @@ bool amdgpu_has_atpx(void) {
63 return amdgpu_atpx_priv.atpx_detected; 63 return amdgpu_atpx_priv.atpx_detected;
64} 64}
65 65
66bool amdgpu_has_atpx_dgpu_power_cntl(void) {
67 return amdgpu_atpx_priv.atpx.functions.power_cntl;
68}
69
70/** 66/**
71 * amdgpu_atpx_call - call an ATPX method 67 * amdgpu_atpx_call - call an ATPX method
72 * 68 *
@@ -146,6 +142,13 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
146 */ 142 */
147static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) 143static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
148{ 144{
145 /* make sure required functions are enabled */
146 /* dGPU power control is required */
147 if (atpx->functions.power_cntl == false) {
148 printk("ATPX dGPU power cntl not present, forcing\n");
149 atpx->functions.power_cntl = true;
150 }
151
149 if (atpx->functions.px_params) { 152 if (atpx->functions.px_params) {
150 union acpi_object *info; 153 union acpi_object *info;
151 struct atpx_px_params output; 154 struct atpx_px_params output;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 612117478b57..2139da773da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -62,12 +62,6 @@ static const char *amdgpu_asic_name[] = {
62 "LAST", 62 "LAST",
63}; 63};
64 64
65#if defined(CONFIG_VGA_SWITCHEROO)
66bool amdgpu_has_atpx_dgpu_power_cntl(void);
67#else
68static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
69#endif
70
71bool amdgpu_device_is_px(struct drm_device *dev) 65bool amdgpu_device_is_px(struct drm_device *dev)
72{ 66{
73 struct amdgpu_device *adev = dev->dev_private; 67 struct amdgpu_device *adev = dev->dev_private;
@@ -1485,7 +1479,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1485 1479
1486 if (amdgpu_runtime_pm == 1) 1480 if (amdgpu_runtime_pm == 1)
1487 runtime = true; 1481 runtime = true;
1488 if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl()) 1482 if (amdgpu_device_is_px(ddev))
1489 runtime = true; 1483 runtime = true;
1490 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime); 1484 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1491 if (runtime) 1485 if (runtime)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 05b0353d3880..a4a2e6cc61bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -910,7 +910,10 @@ static int gmc_v7_0_late_init(void *handle)
910{ 910{
911 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 911 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
912 912
913 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 913 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
914 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
915 else
916 return 0;
914} 917}
915 918
916static int gmc_v7_0_sw_init(void *handle) 919static int gmc_v7_0_sw_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 02deb3229405..7a9db2c72c89 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -870,7 +870,10 @@ static int gmc_v8_0_late_init(void *handle)
870{ 870{
871 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 871 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
872 872
873 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 873 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
874 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
875 else
876 return 0;
874} 877}
875 878
876#define mmMC_SEQ_MISC0_FIJI 0xA71 879#define mmMC_SEQ_MISC0_FIJI 0xA71
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index e17fbdaf874b..71ea0521ea96 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1796,6 +1796,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1796 req_payload.start_slot = cur_slots; 1796 req_payload.start_slot = cur_slots;
1797 if (mgr->proposed_vcpis[i]) { 1797 if (mgr->proposed_vcpis[i]) {
1798 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 1798 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1799 port = drm_dp_get_validated_port_ref(mgr, port);
1800 if (!port) {
1801 mutex_unlock(&mgr->payload_lock);
1802 return -EINVAL;
1803 }
1799 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; 1804 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1800 req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi; 1805 req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
1801 } else { 1806 } else {
@@ -1823,6 +1828,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1823 mgr->payloads[i].payload_state = req_payload.payload_state; 1828 mgr->payloads[i].payload_state = req_payload.payload_state;
1824 } 1829 }
1825 cur_slots += req_payload.num_slots; 1830 cur_slots += req_payload.num_slots;
1831
1832 if (port)
1833 drm_dp_put_port(port);
1826 } 1834 }
1827 1835
1828 for (i = 0; i < mgr->max_payloads; i++) { 1836 for (i = 0; i < mgr->max_payloads; i++) {
@@ -2128,6 +2136,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2128 2136
2129 if (mgr->mst_primary) { 2137 if (mgr->mst_primary) {
2130 int sret; 2138 int sret;
2139 u8 guid[16];
2140
2131 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); 2141 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2132 if (sret != DP_RECEIVER_CAP_SIZE) { 2142 if (sret != DP_RECEIVER_CAP_SIZE) {
2133 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); 2143 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
@@ -2142,6 +2152,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2142 ret = -1; 2152 ret = -1;
2143 goto out_unlock; 2153 goto out_unlock;
2144 } 2154 }
2155
2156 /* Some hubs forget their guids after they resume */
2157 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2158 if (sret != 16) {
2159 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2160 ret = -1;
2161 goto out_unlock;
2162 }
2163 drm_dp_check_mstb_guid(mgr->mst_primary, guid);
2164
2145 ret = 0; 2165 ret = 0;
2146 } else 2166 } else
2147 ret = -1; 2167 ret = -1;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 09198d0b5814..306dde18a94a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -572,6 +572,24 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
572 goto fail; 572 goto fail;
573 } 573 }
574 574
575 /*
576 * Set the GPU linear window to be at the end of the DMA window, where
577 * the CMA area is likely to reside. This ensures that we are able to
578 * map the command buffers while having the linear window overlap as
579 * much RAM as possible, so we can optimize mappings for other buffers.
580 *
581 * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
582 * to different views of the memory on the individual engines.
583 */
584 if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
585 (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
586 u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
587 if (dma_mask < PHYS_OFFSET + SZ_2G)
588 gpu->memory_base = PHYS_OFFSET;
589 else
590 gpu->memory_base = dma_mask - SZ_2G + 1;
591 }
592
575 ret = etnaviv_hw_reset(gpu); 593 ret = etnaviv_hw_reset(gpu);
576 if (ret) 594 if (ret)
577 goto fail; 595 goto fail;
@@ -1566,7 +1584,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1566{ 1584{
1567 struct device *dev = &pdev->dev; 1585 struct device *dev = &pdev->dev;
1568 struct etnaviv_gpu *gpu; 1586 struct etnaviv_gpu *gpu;
1569 u32 dma_mask;
1570 int err = 0; 1587 int err = 0;
1571 1588
1572 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL); 1589 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
@@ -1576,18 +1593,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1576 gpu->dev = &pdev->dev; 1593 gpu->dev = &pdev->dev;
1577 mutex_init(&gpu->lock); 1594 mutex_init(&gpu->lock);
1578 1595
1579 /*
1580 * Set the GPU linear window to be at the end of the DMA window, where
1581 * the CMA area is likely to reside. This ensures that we are able to
1582 * map the command buffers while having the linear window overlap as
1583 * much RAM as possible, so we can optimize mappings for other buffers.
1584 */
1585 dma_mask = (u32)dma_get_required_mask(dev);
1586 if (dma_mask < PHYS_OFFSET + SZ_2G)
1587 gpu->memory_base = PHYS_OFFSET;
1588 else
1589 gpu->memory_base = dma_mask - SZ_2G + 1;
1590
1591 /* Map registers: */ 1596 /* Map registers: */
1592 gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev)); 1597 gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
1593 if (IS_ERR(gpu->mmio)) 1598 if (IS_ERR(gpu->mmio))
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 76c4bdf21b20..34f7a29d9366 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
2608 WREG32(VM_CONTEXT1_CNTL, 0); 2608 WREG32(VM_CONTEXT1_CNTL, 0);
2609} 2609}
2610 2610
2611static const unsigned ni_dig_offsets[] =
2612{
2613 NI_DIG0_REGISTER_OFFSET,
2614 NI_DIG1_REGISTER_OFFSET,
2615 NI_DIG2_REGISTER_OFFSET,
2616 NI_DIG3_REGISTER_OFFSET,
2617 NI_DIG4_REGISTER_OFFSET,
2618 NI_DIG5_REGISTER_OFFSET
2619};
2620
2621static const unsigned ni_tx_offsets[] =
2622{
2623 NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
2624 NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
2625 NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
2626 NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
2627 NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
2628 NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
2629};
2630
2631static const unsigned evergreen_dp_offsets[] =
2632{
2633 EVERGREEN_DP0_REGISTER_OFFSET,
2634 EVERGREEN_DP1_REGISTER_OFFSET,
2635 EVERGREEN_DP2_REGISTER_OFFSET,
2636 EVERGREEN_DP3_REGISTER_OFFSET,
2637 EVERGREEN_DP4_REGISTER_OFFSET,
2638 EVERGREEN_DP5_REGISTER_OFFSET
2639};
2640
2641
2642/*
2643 * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
2644 * We go from crtc to connector and it is not relible since it
2645 * should be an opposite direction .If crtc is enable then
2646 * find the dig_fe which selects this crtc and insure that it enable.
2647 * if such dig_fe is found then find dig_be which selects found dig_be and
2648 * insure that it enable and in DP_SST mode.
2649 * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
2650 * from dp symbols clocks .
2651 */
2652static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
2653 unsigned crtc_id, unsigned *ret_dig_fe)
2654{
2655 unsigned i;
2656 unsigned dig_fe;
2657 unsigned dig_be;
2658 unsigned dig_en_be;
2659 unsigned uniphy_pll;
2660 unsigned digs_fe_selected;
2661 unsigned dig_be_mode;
2662 unsigned dig_fe_mask;
2663 bool is_enabled = false;
2664 bool found_crtc = false;
2665
2666 /* loop through all running dig_fe to find selected crtc */
2667 for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
2668 dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
2669 if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
2670 crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
2671 /* found running pipe */
2672 found_crtc = true;
2673 dig_fe_mask = 1 << i;
2674 dig_fe = i;
2675 break;
2676 }
2677 }
2678
2679 if (found_crtc) {
2680 /* loop through all running dig_be to find selected dig_fe */
2681 for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
2682 dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
2683 /* if dig_fe_selected by dig_be? */
2684 digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
2685 dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
2686 if (dig_fe_mask & digs_fe_selected &&
2687 /* if dig_be in sst mode? */
2688 dig_be_mode == NI_DIG_BE_DPSST) {
2689 dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
2690 ni_dig_offsets[i]);
2691 uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
2692 ni_tx_offsets[i]);
2693 /* dig_be enable and tx is running */
2694 if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
2695 dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
2696 uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
2697 is_enabled = true;
2698 *ret_dig_fe = dig_fe;
2699 break;
2700 }
2701 }
2702 }
2703 }
2704
2705 return is_enabled;
2706}
2707
2708/*
2709 * Blank dig when in dp sst mode
2710 * Dig ignores crtc timing
2711 */
2712static void evergreen_blank_dp_output(struct radeon_device *rdev,
2713 unsigned dig_fe)
2714{
2715 unsigned stream_ctrl;
2716 unsigned fifo_ctrl;
2717 unsigned counter = 0;
2718
2719 if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
2720 DRM_ERROR("invalid dig_fe %d\n", dig_fe);
2721 return;
2722 }
2723
2724 stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2725 evergreen_dp_offsets[dig_fe]);
2726 if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
2727 DRM_ERROR("dig %d , should be enable\n", dig_fe);
2728 return;
2729 }
2730
2731 stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
2732 WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2733 evergreen_dp_offsets[dig_fe], stream_ctrl);
2734
2735 stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2736 evergreen_dp_offsets[dig_fe]);
2737 while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
2738 msleep(1);
2739 counter++;
2740 stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2741 evergreen_dp_offsets[dig_fe]);
2742 }
2743 if (counter >= 32 )
2744 DRM_ERROR("counter exceeds %d\n", counter);
2745
2746 fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
2747 fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
2748 WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
2749
2750}
2751
2611void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) 2752void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
2612{ 2753{
2613 u32 crtc_enabled, tmp, frame_count, blackout; 2754 u32 crtc_enabled, tmp, frame_count, blackout;
2614 int i, j; 2755 int i, j;
2756 unsigned dig_fe;
2615 2757
2616 if (!ASIC_IS_NODCE(rdev)) { 2758 if (!ASIC_IS_NODCE(rdev)) {
2617 save->vga_render_control = RREG32(VGA_RENDER_CONTROL); 2759 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
2651 break; 2793 break;
2652 udelay(1); 2794 udelay(1);
2653 } 2795 }
2654 2796 /*we should disable dig if it drives dp sst*/
2797 /*but we are in radeon_device_init and the topology is unknown*/
2798 /*and it is available after radeon_modeset_init*/
2799 /*the following method radeon_atom_encoder_dpms_dig*/
2800 /*does the job if we initialize it properly*/
2801 /*for now we do it this manually*/
2802 /**/
2803 if (ASIC_IS_DCE5(rdev) &&
2804 evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
2805 evergreen_blank_dp_output(rdev, dig_fe);
2806 /*we could remove 6 lines below*/
2655 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ 2807 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
2656 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 2808 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2657 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); 2809 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index aa939dfed3a3..b436badf9efa 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -250,8 +250,43 @@
250 250
251/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */ 251/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
252#define EVERGREEN_HDMI_BASE 0x7030 252#define EVERGREEN_HDMI_BASE 0x7030
253/*DIG block*/
254#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
255#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
256#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
257#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
258#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
259#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
260
261
262#define NI_DIG_FE_CNTL 0x7000
263# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
264# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
265
266
267#define NI_DIG_BE_CNTL 0x7140
268# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
269# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
270
271#define NI_DIG_BE_EN_CNTL 0x7144
272# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
273# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
274# define NI_DIG_BE_DPSST 0
253 275
254/* Display Port block */ 276/* Display Port block */
277#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
278#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
279#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
280#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
281#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
282#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
283
284
285#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
286# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
287# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
288#define EVERGREEN_DP_STEER_FIFO 0x7310
289# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
255#define EVERGREEN_DP_SEC_CNTL 0x7280 290#define EVERGREEN_DP_SEC_CNTL 0x7280
256# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0) 291# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
257# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4) 292# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
@@ -266,4 +301,15 @@
266# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24) 301# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
267# define EVERGREEN_DP_SEC_SS_EN (1 << 28) 302# define EVERGREEN_DP_SEC_SS_EN (1 << 28)
268 303
304/*DCIO_UNIPHY block*/
305#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
306#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
307#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
308#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
309#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
310#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
311
312#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
313# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
314
269#endif 315#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4cbf26555093..e3daafa1be13 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -230,22 +230,13 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
230 230
231void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) 231void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
232{ 232{
233 struct ttm_bo_device *bdev = bo->bdev; 233 int put_count = 0;
234 struct ttm_mem_type_manager *man;
235 234
236 lockdep_assert_held(&bo->resv->lock.base); 235 lockdep_assert_held(&bo->resv->lock.base);
237 236
238 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { 237 put_count = ttm_bo_del_from_lru(bo);
239 list_del_init(&bo->swap); 238 ttm_bo_list_ref_sub(bo, put_count, true);
240 list_del_init(&bo->lru); 239 ttm_bo_add_to_lru(bo);
241
242 } else {
243 if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
244 list_move_tail(&bo->swap, &bo->glob->swap_lru);
245
246 man = &bdev->man[bo->mem.mem_type];
247 list_move_tail(&bo->lru, &man->lru);
248 }
249} 240}
250EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); 241EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
251 242
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 4854dac87e24..5fd1fd06effc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -267,11 +267,23 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
267 return 0; 267 return 0;
268} 268}
269 269
270static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
271 struct drm_crtc_state *old_state)
272{
273 unsigned long flags;
274
275 spin_lock_irqsave(&crtc->dev->event_lock, flags);
276 if (crtc->state->event)
277 drm_crtc_send_vblank_event(crtc, crtc->state->event);
278 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
279}
280
270static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = { 281static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
271 .enable = virtio_gpu_crtc_enable, 282 .enable = virtio_gpu_crtc_enable,
272 .disable = virtio_gpu_crtc_disable, 283 .disable = virtio_gpu_crtc_disable,
273 .mode_set_nofb = virtio_gpu_crtc_mode_set_nofb, 284 .mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
274 .atomic_check = virtio_gpu_crtc_atomic_check, 285 .atomic_check = virtio_gpu_crtc_atomic_check,
286 .atomic_flush = virtio_gpu_crtc_atomic_flush,
275}; 287};
276 288
277static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder, 289static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 723ba16c6084..1a1a87cbf109 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3293,19 +3293,19 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3293 &vmw_cmd_dx_cid_check, true, false, true), 3293 &vmw_cmd_dx_cid_check, true, false, true),
3294 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query, 3294 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3295 true, false, true), 3295 true, false, true),
3296 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok, 3296 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3297 true, false, true), 3297 true, false, true),
3298 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query, 3298 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3299 true, false, true), 3299 true, false, true),
3300 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET, 3300 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3301 &vmw_cmd_ok, true, false, true), 3301 &vmw_cmd_dx_cid_check, true, false, true),
3302 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok, 3302 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3303 true, false, true), 3303 true, false, true),
3304 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok, 3304 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3305 true, false, true), 3305 true, false, true),
3306 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, 3306 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3307 true, false, true), 3307 true, false, true),
3308 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid, 3308 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3309 true, false, true), 3309 true, false, true),
3310 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check, 3310 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3311 true, false, true), 3311 true, false, true),
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 3b1faf7862a5..679a4cb98ee3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -573,9 +573,9 @@ static int vmw_fb_set_par(struct fb_info *info)
573 mode = old_mode; 573 mode = old_mode;
574 old_mode = NULL; 574 old_mode = NULL;
575 } else if (!vmw_kms_validate_mode_vram(vmw_priv, 575 } else if (!vmw_kms_validate_mode_vram(vmw_priv,
576 mode->hdisplay * 576 mode->hdisplay *
577 (var->bits_per_pixel + 7) / 8, 577 DIV_ROUND_UP(var->bits_per_pixel, 8),
578 mode->vdisplay)) { 578 mode->vdisplay)) {
579 drm_mode_destroy(vmw_priv->dev, mode); 579 drm_mode_destroy(vmw_priv->dev, mode);
580 return -EINVAL; 580 return -EINVAL;
581 } 581 }
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index c6eaff5f8845..0238f0169e48 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -259,6 +259,7 @@
259#define USB_DEVICE_ID_CORSAIR_K90 0x1b02 259#define USB_DEVICE_ID_CORSAIR_K90 0x1b02
260 260
261#define USB_VENDOR_ID_CREATIVELABS 0x041e 261#define USB_VENDOR_ID_CREATIVELABS 0x041e
262#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c
262#define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801 263#define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801
263 264
264#define USB_VENDOR_ID_CVTOUCH 0x1ff7 265#define USB_VENDOR_ID_CVTOUCH 0x1ff7
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index ed2f68edc8f1..53fc856d6867 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -71,6 +71,7 @@ static const struct hid_blacklist {
71 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET }, 71 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
72 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, 72 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
73 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, 73 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
74 { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
74 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 75 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
75 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, 76 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
76 { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL }, 77 { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 02c4efea241c..cf2ba43453fd 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -684,6 +684,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
684 684
685 wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]); 685 wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]);
686 686
687 wacom->shared->stylus_in_proximity = true;
687 return 1; 688 return 1;
688 } 689 }
689 690
@@ -3395,6 +3396,10 @@ static const struct wacom_features wacom_features_0x33E =
3395 { "Wacom Intuos PT M 2", 21600, 13500, 2047, 63, 3396 { "Wacom Intuos PT M 2", 21600, 13500, 2047, 63,
3396 INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16, 3397 INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
3397 .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; 3398 .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
3399static const struct wacom_features wacom_features_0x343 =
3400 { "Wacom DTK1651", 34616, 19559, 1023, 0,
3401 DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
3402 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
3398 3403
3399static const struct wacom_features wacom_features_HID_ANY_ID = 3404static const struct wacom_features wacom_features_HID_ANY_ID =
3400 { "Wacom HID", .type = HID_GENERIC }; 3405 { "Wacom HID", .type = HID_GENERIC };
@@ -3560,6 +3565,7 @@ const struct hid_device_id wacom_ids[] = {
3560 { USB_DEVICE_WACOM(0x33C) }, 3565 { USB_DEVICE_WACOM(0x33C) },
3561 { USB_DEVICE_WACOM(0x33D) }, 3566 { USB_DEVICE_WACOM(0x33D) },
3562 { USB_DEVICE_WACOM(0x33E) }, 3567 { USB_DEVICE_WACOM(0x33E) },
3568 { USB_DEVICE_WACOM(0x343) },
3563 { USB_DEVICE_WACOM(0x4001) }, 3569 { USB_DEVICE_WACOM(0x4001) },
3564 { USB_DEVICE_WACOM(0x4004) }, 3570 { USB_DEVICE_WACOM(0x4004) },
3565 { USB_DEVICE_WACOM(0x5000) }, 3571 { USB_DEVICE_WACOM(0x5000) },
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index faa8e6821fea..0967e1a5b3a2 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -975,10 +975,10 @@ config I2C_XLR
975 975
976config I2C_XLP9XX 976config I2C_XLP9XX
977 tristate "XLP9XX I2C support" 977 tristate "XLP9XX I2C support"
978 depends on CPU_XLP || COMPILE_TEST 978 depends on CPU_XLP || ARCH_VULCAN || COMPILE_TEST
979 help 979 help
980 This driver enables support for the on-chip I2C interface of 980 This driver enables support for the on-chip I2C interface of
981 the Broadcom XLP9xx/XLP5xx MIPS processors. 981 the Broadcom XLP9xx/XLP5xx MIPS and Vulcan ARM64 processors.
982 982
983 This driver can also be built as a module. If so, the module will 983 This driver can also be built as a module. If so, the module will
984 be called i2c-xlp9xx. 984 be called i2c-xlp9xx.
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 714bdc837769..b167ab25310a 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -116,8 +116,8 @@ struct cpm_i2c {
116 cbd_t __iomem *rbase; 116 cbd_t __iomem *rbase;
117 u_char *txbuf[CPM_MAXBD]; 117 u_char *txbuf[CPM_MAXBD];
118 u_char *rxbuf[CPM_MAXBD]; 118 u_char *rxbuf[CPM_MAXBD];
119 u32 txdma[CPM_MAXBD]; 119 dma_addr_t txdma[CPM_MAXBD];
120 u32 rxdma[CPM_MAXBD]; 120 dma_addr_t rxdma[CPM_MAXBD];
121}; 121};
122 122
123static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id) 123static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index b29c7500461a..f54ece8fce78 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
671 return -EIO; 671 return -EIO;
672 } 672 }
673 673
674 clk_prepare_enable(i2c->clk); 674 ret = clk_enable(i2c->clk);
675 if (ret)
676 return ret;
675 677
676 for (i = 0; i < num; i++, msgs++) { 678 for (i = 0; i < num; i++, msgs++) {
677 stop = (i == num - 1); 679 stop = (i == num - 1);
@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
695 } 697 }
696 698
697 out: 699 out:
698 clk_disable_unprepare(i2c->clk); 700 clk_disable(i2c->clk);
699 return ret; 701 return ret;
700} 702}
701 703
@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
747 return -ENOENT; 749 return -ENOENT;
748 } 750 }
749 751
750 clk_prepare_enable(i2c->clk); 752 ret = clk_prepare_enable(i2c->clk);
753 if (ret)
754 return ret;
751 755
752 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 756 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
753 i2c->regs = devm_ioremap_resource(&pdev->dev, mem); 757 i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
799 803
800 platform_set_drvdata(pdev, i2c); 804 platform_set_drvdata(pdev, i2c);
801 805
806 clk_disable(i2c->clk);
807
808 return 0;
809
802 err_clk: 810 err_clk:
803 clk_disable_unprepare(i2c->clk); 811 clk_disable_unprepare(i2c->clk);
804 return ret; 812 return ret;
@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
810 818
811 i2c_del_adapter(&i2c->adap); 819 i2c_del_adapter(&i2c->adap);
812 820
821 clk_unprepare(i2c->clk);
822
813 return 0; 823 return 0;
814} 824}
815 825
@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
821 831
822 i2c->suspended = 1; 832 i2c->suspended = 1;
823 833
834 clk_unprepare(i2c->clk);
835
824 return 0; 836 return 0;
825} 837}
826 838
@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
830 struct exynos5_i2c *i2c = platform_get_drvdata(pdev); 842 struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
831 int ret = 0; 843 int ret = 0;
832 844
833 clk_prepare_enable(i2c->clk); 845 ret = clk_prepare_enable(i2c->clk);
846 if (ret)
847 return ret;
834 848
835 ret = exynos5_hsi2c_clock_setup(i2c); 849 ret = exynos5_hsi2c_clock_setup(i2c);
836 if (ret) { 850 if (ret) {
@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
839 } 853 }
840 854
841 exynos5_i2c_init(i2c); 855 exynos5_i2c_init(i2c);
842 clk_disable_unprepare(i2c->clk); 856 clk_disable(i2c->clk);
843 i2c->suspended = 0; 857 i2c->suspended = 0;
844 858
845 return 0; 859 return 0;
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 7ba795b24e75..1c8707710098 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -75,6 +75,7 @@
75/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */ 75/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */
76#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59 76#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59
77#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a 77#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a
78#define PCI_DEVICE_ID_INTEL_DNV_SMT 0x19ac
78#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15 79#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15
79 80
80#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */ 81#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
@@ -180,6 +181,7 @@ struct ismt_priv {
180static const struct pci_device_id ismt_ids[] = { 181static const struct pci_device_id ismt_ids[] = {
181 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) }, 182 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
182 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) }, 183 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
184 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMT) },
183 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) }, 185 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) },
184 { 0, } 186 { 0, }
185}; 187};
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 9096d17beb5b..3dcc5f3f26cb 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -855,6 +855,7 @@ static struct rk3x_i2c_soc_data soc_data[3] = {
855static const struct of_device_id rk3x_i2c_match[] = { 855static const struct of_device_id rk3x_i2c_match[] = {
856 { .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] }, 856 { .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] },
857 { .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] }, 857 { .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] },
858 { .compatible = "rockchip,rk3228-i2c", .data = (void *)&soc_data[2] },
858 { .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] }, 859 { .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] },
859 {}, 860 {},
860}; 861};
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index cb00d59da456..c2e257d97eff 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -691,7 +691,8 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
691 NULL); 691 NULL);
692 692
693 /* Coudn't find default GID location */ 693 /* Coudn't find default GID location */
694 WARN_ON(ix < 0); 694 if (WARN_ON(ix < 0))
695 goto release;
695 696
696 zattr_type.gid_type = gid_type; 697 zattr_type.gid_type = gid_type;
697 698
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 4a9aa0433b07..7713ef089c3c 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -48,6 +48,7 @@
48 48
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
50 50
51#include <rdma/ib.h>
51#include <rdma/ib_cm.h> 52#include <rdma/ib_cm.h>
52#include <rdma/ib_user_cm.h> 53#include <rdma/ib_user_cm.h>
53#include <rdma/ib_marshall.h> 54#include <rdma/ib_marshall.h>
@@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
1103 struct ib_ucm_cmd_hdr hdr; 1104 struct ib_ucm_cmd_hdr hdr;
1104 ssize_t result; 1105 ssize_t result;
1105 1106
1107 if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
1108 return -EACCES;
1109
1106 if (len < sizeof(hdr)) 1110 if (len < sizeof(hdr))
1107 return -EINVAL; 1111 return -EINVAL;
1108 1112
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index dd3bcceadfde..c0f3826abb30 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
1574 struct rdma_ucm_cmd_hdr hdr; 1574 struct rdma_ucm_cmd_hdr hdr;
1575 ssize_t ret; 1575 ssize_t ret;
1576 1576
1577 if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
1578 return -EACCES;
1579
1577 if (len < sizeof(hdr)) 1580 if (len < sizeof(hdr))
1578 return -EINVAL; 1581 return -EINVAL;
1579 1582
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 28ba2cc81535..31f422a70623 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -48,6 +48,8 @@
48 48
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
50 50
51#include <rdma/ib.h>
52
51#include "uverbs.h" 53#include "uverbs.h"
52 54
53MODULE_AUTHOR("Roland Dreier"); 55MODULE_AUTHOR("Roland Dreier");
@@ -709,6 +711,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
709 int srcu_key; 711 int srcu_key;
710 ssize_t ret; 712 ssize_t ret;
711 713
714 if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
715 return -EACCES;
716
712 if (count < sizeof hdr) 717 if (count < sizeof hdr)
713 return -EINVAL; 718 return -EINVAL;
714 719
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 15b8adbf39c0..b65b3541e732 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1860,6 +1860,7 @@ EXPORT_SYMBOL(ib_drain_rq);
1860void ib_drain_qp(struct ib_qp *qp) 1860void ib_drain_qp(struct ib_qp *qp)
1861{ 1861{
1862 ib_drain_sq(qp); 1862 ib_drain_sq(qp);
1863 ib_drain_rq(qp); 1863 if (!qp->srq)
1864 ib_drain_rq(qp);
1864} 1865}
1865EXPORT_SYMBOL(ib_drain_qp); 1866EXPORT_SYMBOL(ib_drain_qp);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 42a7b8952d13..3234a8be16f6 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1390,6 +1390,8 @@ int iwch_register_device(struct iwch_dev *dev)
1390 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref; 1390 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1391 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref; 1391 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1392 dev->ibdev.iwcm->get_qp = iwch_get_qp; 1392 dev->ibdev.iwcm->get_qp = iwch_get_qp;
1393 memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name,
1394 sizeof(dev->ibdev.iwcm->ifname));
1393 1395
1394 ret = ib_register_device(&dev->ibdev, NULL); 1396 ret = ib_register_device(&dev->ibdev, NULL);
1395 if (ret) 1397 if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index b4eeb783573c..b0b955724458 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -162,7 +162,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
162 cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS, 162 cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
163 &cq->bar2_qid, 163 &cq->bar2_qid,
164 user ? &cq->bar2_pa : NULL); 164 user ? &cq->bar2_pa : NULL);
165 if (user && !cq->bar2_va) { 165 if (user && !cq->bar2_pa) {
166 pr_warn(MOD "%s: cqid %u not in BAR2 range.\n", 166 pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
167 pci_name(rdev->lldi.pdev), cq->cqid); 167 pci_name(rdev->lldi.pdev), cq->cqid);
168 ret = -EINVAL; 168 ret = -EINVAL;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 124682dc5709..7574f394fdac 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -580,6 +580,8 @@ int c4iw_register_device(struct c4iw_dev *dev)
580 dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref; 580 dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
581 dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref; 581 dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
582 dev->ibdev.iwcm->get_qp = c4iw_get_qp; 582 dev->ibdev.iwcm->get_qp = c4iw_get_qp;
583 memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
584 sizeof(dev->ibdev.iwcm->ifname));
583 585
584 ret = ib_register_device(&dev->ibdev, NULL); 586 ret = ib_register_device(&dev->ibdev, NULL);
585 if (ret) 587 if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index e17fb5d5e033..e8993e49b8b3 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -185,6 +185,10 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
185 185
186 if (pbar2_pa) 186 if (pbar2_pa)
187 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK; 187 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
188
189 if (is_t4(rdev->lldi.adapter_type))
190 return NULL;
191
188 return rdev->bar2_kva + bar2_qoffset; 192 return rdev->bar2_kva + bar2_qoffset;
189} 193}
190 194
@@ -270,7 +274,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
270 /* 274 /*
271 * User mode must have bar2 access. 275 * User mode must have bar2 access.
272 */ 276 */
273 if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) { 277 if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
274 pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n", 278 pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
275 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); 279 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
276 goto free_dma; 280 goto free_dma;
@@ -1895,13 +1899,27 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1895void c4iw_drain_sq(struct ib_qp *ibqp) 1899void c4iw_drain_sq(struct ib_qp *ibqp)
1896{ 1900{
1897 struct c4iw_qp *qp = to_c4iw_qp(ibqp); 1901 struct c4iw_qp *qp = to_c4iw_qp(ibqp);
1902 unsigned long flag;
1903 bool need_to_wait;
1898 1904
1899 wait_for_completion(&qp->sq_drained); 1905 spin_lock_irqsave(&qp->lock, flag);
1906 need_to_wait = !t4_sq_empty(&qp->wq);
1907 spin_unlock_irqrestore(&qp->lock, flag);
1908
1909 if (need_to_wait)
1910 wait_for_completion(&qp->sq_drained);
1900} 1911}
1901 1912
1902void c4iw_drain_rq(struct ib_qp *ibqp) 1913void c4iw_drain_rq(struct ib_qp *ibqp)
1903{ 1914{
1904 struct c4iw_qp *qp = to_c4iw_qp(ibqp); 1915 struct c4iw_qp *qp = to_c4iw_qp(ibqp);
1916 unsigned long flag;
1917 bool need_to_wait;
1918
1919 spin_lock_irqsave(&qp->lock, flag);
1920 need_to_wait = !t4_rq_empty(&qp->wq);
1921 spin_unlock_irqrestore(&qp->lock, flag);
1905 1922
1906 wait_for_completion(&qp->rq_drained); 1923 if (need_to_wait)
1924 wait_for_completion(&qp->rq_drained);
1907} 1925}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 3ff663c35bac..4cb81f68d850 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -530,7 +530,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
530 sizeof(struct mlx5_wqe_ctrl_seg)) / 530 sizeof(struct mlx5_wqe_ctrl_seg)) /
531 sizeof(struct mlx5_wqe_data_seg); 531 sizeof(struct mlx5_wqe_data_seg);
532 props->max_sge = min(max_rq_sg, max_sq_sg); 532 props->max_sge = min(max_rq_sg, max_sq_sg);
533 props->max_sge_rd = props->max_sge; 533 props->max_sge_rd = MLX5_MAX_SGE_RD;
534 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); 534 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
535 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1; 535 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
536 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 536 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 99cef26e74b4..77630cad7f81 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -498,9 +498,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
498 * skb_shinfo(skb)->nr_frags, skb_is_gso(skb)); 498 * skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
499 */ 499 */
500 500
501 if (!netif_carrier_ok(netdev))
502 return NETDEV_TX_OK;
503
504 if (netif_queue_stopped(netdev)) 501 if (netif_queue_stopped(netdev))
505 return NETDEV_TX_BUSY; 502 return NETDEV_TX_BUSY;
506 503
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index e449e394963f..24f4a782e0f4 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -45,6 +45,8 @@
45#include <linux/export.h> 45#include <linux/export.h>
46#include <linux/uio.h> 46#include <linux/uio.h>
47 47
48#include <rdma/ib.h>
49
48#include "qib.h" 50#include "qib.h"
49#include "qib_common.h" 51#include "qib_common.h"
50#include "qib_user_sdma.h" 52#include "qib_user_sdma.h"
@@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
2067 ssize_t ret = 0; 2069 ssize_t ret = 0;
2068 void *dest; 2070 void *dest;
2069 2071
2072 if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
2073 return -EACCES;
2074
2070 if (count < sizeof(cmd.type)) { 2075 if (count < sizeof(cmd.type)) {
2071 ret = -EINVAL; 2076 ret = -EINVAL;
2072 goto bail; 2077 goto bail;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index bd82a6948dc8..a9e3bcc522c4 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1637,9 +1637,9 @@ bail:
1637 spin_unlock_irqrestore(&qp->s_hlock, flags); 1637 spin_unlock_irqrestore(&qp->s_hlock, flags);
1638 if (nreq) { 1638 if (nreq) {
1639 if (call_send) 1639 if (call_send)
1640 rdi->driver_f.schedule_send_no_lock(qp);
1641 else
1642 rdi->driver_f.do_send(qp); 1640 rdi->driver_f.do_send(qp);
1641 else
1642 rdi->driver_f.schedule_send_no_lock(qp);
1643 } 1643 }
1644 return err; 1644 return err;
1645} 1645}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 194580fba7fd..14d3b37944df 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -284,6 +284,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
284 * go away inside make_request 284 * go away inside make_request
285 */ 285 */
286 sectors = bio_sectors(bio); 286 sectors = bio_sectors(bio);
287 /* bio could be mergeable after passing to underlayer */
288 bio->bi_rw &= ~REQ_NOMERGE;
287 mddev->pers->make_request(mddev, bio); 289 mddev->pers->make_request(mddev, bio);
288 290
289 cpu = part_stat_lock(); 291 cpu = part_stat_lock();
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 2ea12c6bf659..34783a3c8b3c 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -70,7 +70,6 @@ static void dump_zones(struct mddev *mddev)
70 (unsigned long long)zone_size>>1); 70 (unsigned long long)zone_size>>1);
71 zone_start = conf->strip_zone[j].zone_end; 71 zone_start = conf->strip_zone[j].zone_end;
72 } 72 }
73 printk(KERN_INFO "\n");
74} 73}
75 74
76static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) 75static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
@@ -85,6 +84,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
85 struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL); 84 struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
86 unsigned short blksize = 512; 85 unsigned short blksize = 512;
87 86
87 *private_conf = ERR_PTR(-ENOMEM);
88 if (!conf) 88 if (!conf)
89 return -ENOMEM; 89 return -ENOMEM;
90 rdev_for_each(rdev1, mddev) { 90 rdev_for_each(rdev1, mddev) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8ab8b65e1741..e48c262ce032 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3502,8 +3502,6 @@ returnbi:
3502 dev = &sh->dev[i]; 3502 dev = &sh->dev[i];
3503 } else if (test_bit(R5_Discard, &dev->flags)) 3503 } else if (test_bit(R5_Discard, &dev->flags))
3504 discard_pending = 1; 3504 discard_pending = 1;
3505 WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
3506 WARN_ON(dev->page != dev->orig_page);
3507 } 3505 }
3508 3506
3509 r5l_stripe_write_finished(sh); 3507 r5l_stripe_write_finished(sh);
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 12f5ebbd0436..ad2f3d27b266 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1452,13 +1452,6 @@ static int usbvision_probe(struct usb_interface *intf,
1452 printk(KERN_INFO "%s: %s found\n", __func__, 1452 printk(KERN_INFO "%s: %s found\n", __func__,
1453 usbvision_device_data[model].model_string); 1453 usbvision_device_data[model].model_string);
1454 1454
1455 /*
1456 * this is a security check.
1457 * an exploit using an incorrect bInterfaceNumber is known
1458 */
1459 if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
1460 return -ENODEV;
1461
1462 if (usbvision_device_data[model].interface >= 0) 1455 if (usbvision_device_data[model].interface >= 0)
1463 interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0]; 1456 interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
1464 else if (ifnum < dev->actconfig->desc.bNumInterfaces) 1457 else if (ifnum < dev->actconfig->desc.bNumInterfaces)
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 5d016f496e0e..9fbcb67a9ee6 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1645,7 +1645,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1645 * Will sleep if required for nonblocking == false. 1645 * Will sleep if required for nonblocking == false.
1646 */ 1646 */
1647static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, 1647static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1648 int nonblocking) 1648 void *pb, int nonblocking)
1649{ 1649{
1650 unsigned long flags; 1650 unsigned long flags;
1651 int ret; 1651 int ret;
@@ -1666,10 +1666,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1666 /* 1666 /*
1667 * Only remove the buffer from done_list if v4l2_buffer can handle all 1667 * Only remove the buffer from done_list if v4l2_buffer can handle all
1668 * the planes. 1668 * the planes.
1669 * Verifying planes is NOT necessary since it already has been checked
1670 * before the buffer is queued/prepared. So it can never fail.
1671 */ 1669 */
1672 list_del(&(*vb)->done_entry); 1670 ret = call_bufop(q, verify_planes_array, *vb, pb);
1671 if (!ret)
1672 list_del(&(*vb)->done_entry);
1673 spin_unlock_irqrestore(&q->done_lock, flags); 1673 spin_unlock_irqrestore(&q->done_lock, flags);
1674 1674
1675 return ret; 1675 return ret;
@@ -1748,7 +1748,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
1748 struct vb2_buffer *vb = NULL; 1748 struct vb2_buffer *vb = NULL;
1749 int ret; 1749 int ret;
1750 1750
1751 ret = __vb2_get_done_vb(q, &vb, nonblocking); 1751 ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
1752 if (ret < 0) 1752 if (ret < 0)
1753 return ret; 1753 return ret;
1754 1754
@@ -2298,6 +2298,16 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
2298 return POLLERR; 2298 return POLLERR;
2299 2299
2300 /* 2300 /*
2301 * If this quirk is set and QBUF hasn't been called yet then
2302 * return POLLERR as well. This only affects capture queues, output
2303 * queues will always initialize waiting_for_buffers to false.
2304 * This quirk is set by V4L2 for backwards compatibility reasons.
2305 */
2306 if (q->quirk_poll_must_check_waiting_for_buffers &&
2307 q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
2308 return POLLERR;
2309
2310 /*
2301 * For output streams you can call write() as long as there are fewer 2311 * For output streams you can call write() as long as there are fewer
2302 * buffers queued than there are buffers available. 2312 * buffers queued than there are buffers available.
2303 */ 2313 */
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index dbec5923fcf0..3c3b517f1d1c 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
49 vec = frame_vector_create(nr); 49 vec = frame_vector_create(nr);
50 if (!vec) 50 if (!vec)
51 return ERR_PTR(-ENOMEM); 51 return ERR_PTR(-ENOMEM);
52 ret = get_vaddr_frames(start, nr, write, 1, vec); 52 ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
53 if (ret < 0) 53 if (ret < 0)
54 goto out_destroy; 54 goto out_destroy;
55 /* We accept only complete set of PFNs */ 55 /* We accept only complete set of PFNs */
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 91f552124050..7f366f1b0377 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -74,6 +74,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
74 return 0; 74 return 0;
75} 75}
76 76
77static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
78{
79 return __verify_planes_array(vb, pb);
80}
81
77/** 82/**
78 * __verify_length() - Verify that the bytesused value for each plane fits in 83 * __verify_length() - Verify that the bytesused value for each plane fits in
79 * the plane length and that the data offset doesn't exceed the bytesused value. 84 * the plane length and that the data offset doesn't exceed the bytesused value.
@@ -437,6 +442,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
437} 442}
438 443
439static const struct vb2_buf_ops v4l2_buf_ops = { 444static const struct vb2_buf_ops v4l2_buf_ops = {
445 .verify_planes_array = __verify_planes_array_core,
440 .fill_user_buffer = __fill_v4l2_buffer, 446 .fill_user_buffer = __fill_v4l2_buffer,
441 .fill_vb2_buffer = __fill_vb2_buffer, 447 .fill_vb2_buffer = __fill_vb2_buffer,
442 .copy_timestamp = __copy_timestamp, 448 .copy_timestamp = __copy_timestamp,
@@ -765,6 +771,12 @@ int vb2_queue_init(struct vb2_queue *q)
765 q->is_output = V4L2_TYPE_IS_OUTPUT(q->type); 771 q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
766 q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) 772 q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
767 == V4L2_BUF_FLAG_TIMESTAMP_COPY; 773 == V4L2_BUF_FLAG_TIMESTAMP_COPY;
774 /*
775 * For compatibility with vb1: if QBUF hasn't been called yet, then
776 * return POLLERR as well. This only affects capture queues, output
777 * queues will always initialize waiting_for_buffers to false.
778 */
779 q->quirk_poll_must_check_waiting_for_buffers = true;
768 780
769 return vb2_core_queue_init(q); 781 return vb2_core_queue_init(q);
770} 782}
@@ -818,14 +830,6 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
818 poll_wait(file, &fh->wait, wait); 830 poll_wait(file, &fh->wait, wait);
819 } 831 }
820 832
821 /*
822 * For compatibility with vb1: if QBUF hasn't been called yet, then
823 * return POLLERR as well. This only affects capture queues, output
824 * queues will always initialize waiting_for_buffers to false.
825 */
826 if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
827 return POLLERR;
828
829 return res | vb2_core_poll(q, file, wait); 833 return res | vb2_core_poll(q, file, wait);
830} 834}
831EXPORT_SYMBOL_GPL(vb2_poll); 835EXPORT_SYMBOL_GPL(vb2_poll);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 10370f280500..7edea9c19199 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -223,6 +223,13 @@ int __detach_context(struct cxl_context *ctx)
223 cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)); 223 cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
224 flush_work(&ctx->fault_work); /* Only needed for dedicated process */ 224 flush_work(&ctx->fault_work); /* Only needed for dedicated process */
225 225
226 /*
227 * Wait until no further interrupts are presented by the PSL
228 * for this context.
229 */
230 if (cxl_ops->irq_wait)
231 cxl_ops->irq_wait(ctx);
232
226 /* release the reference to the group leader and mm handling pid */ 233 /* release the reference to the group leader and mm handling pid */
227 put_pid(ctx->pid); 234 put_pid(ctx->pid);
228 put_pid(ctx->glpid); 235 put_pid(ctx->glpid);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 38e21cf7806e..73dc2a33da74 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -274,6 +274,7 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
274#define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */ 274#define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */
275#define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */ 275#define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */
276#define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */ 276#define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */
277#define CXL_PSL_DSISR_PENDING (CXL_PSL_DSISR_TRANS | CXL_PSL_DSISR_An_PE | CXL_PSL_DSISR_An_AE | CXL_PSL_DSISR_An_OC)
277/* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */ 278/* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */
278#define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */ 279#define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */
279#define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */ 280#define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */
@@ -855,6 +856,7 @@ struct cxl_backend_ops {
855 u64 dsisr, u64 errstat); 856 u64 dsisr, u64 errstat);
856 irqreturn_t (*psl_interrupt)(int irq, void *data); 857 irqreturn_t (*psl_interrupt)(int irq, void *data);
857 int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask); 858 int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
859 void (*irq_wait)(struct cxl_context *ctx);
858 int (*attach_process)(struct cxl_context *ctx, bool kernel, 860 int (*attach_process)(struct cxl_context *ctx, bool kernel,
859 u64 wed, u64 amr); 861 u64 wed, u64 amr);
860 int (*detach_process)(struct cxl_context *ctx); 862 int (*detach_process)(struct cxl_context *ctx);
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index be646dc41a2c..8def4553acba 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -203,7 +203,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
203void cxl_unmap_irq(unsigned int virq, void *cookie) 203void cxl_unmap_irq(unsigned int virq, void *cookie)
204{ 204{
205 free_irq(virq, cookie); 205 free_irq(virq, cookie);
206 irq_dispose_mapping(virq);
207} 206}
208 207
209int cxl_register_one_irq(struct cxl *adapter, 208int cxl_register_one_irq(struct cxl *adapter,
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 387fcbdf9793..ecf7557cd657 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -14,6 +14,7 @@
14#include <linux/mutex.h> 14#include <linux/mutex.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <linux/delay.h>
17#include <asm/synch.h> 18#include <asm/synch.h>
18#include <misc/cxl-base.h> 19#include <misc/cxl-base.h>
19 20
@@ -797,6 +798,35 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
797 return fail_psl_irq(afu, &irq_info); 798 return fail_psl_irq(afu, &irq_info);
798} 799}
799 800
801void native_irq_wait(struct cxl_context *ctx)
802{
803 u64 dsisr;
804 int timeout = 1000;
805 int ph;
806
807 /*
808 * Wait until no further interrupts are presented by the PSL
809 * for this context.
810 */
811 while (timeout--) {
812 ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
813 if (ph != ctx->pe)
814 return;
815 dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
816 if ((dsisr & CXL_PSL_DSISR_PENDING) == 0)
817 return;
818 /*
819 * We are waiting for the workqueue to process our
820 * irq, so need to let that run here.
821 */
822 msleep(1);
823 }
824
825 dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
826 " DSISR %016llx!\n", ph, dsisr);
827 return;
828}
829
800static irqreturn_t native_slice_irq_err(int irq, void *data) 830static irqreturn_t native_slice_irq_err(int irq, void *data)
801{ 831{
802 struct cxl_afu *afu = data; 832 struct cxl_afu *afu = data;
@@ -1076,6 +1106,7 @@ const struct cxl_backend_ops cxl_native_ops = {
1076 .handle_psl_slice_error = native_handle_psl_slice_error, 1106 .handle_psl_slice_error = native_handle_psl_slice_error,
1077 .psl_interrupt = NULL, 1107 .psl_interrupt = NULL,
1078 .ack_irq = native_ack_irq, 1108 .ack_irq = native_ack_irq,
1109 .irq_wait = native_irq_wait,
1079 .attach_process = native_attach_process, 1110 .attach_process = native_attach_process,
1080 .detach_process = native_detach_process, 1111 .detach_process = native_detach_process,
1081 .support_attributes = native_support_attributes, 1112 .support_attributes = native_support_attributes,
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 04feea8354cb..e657af0e95fa 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -97,6 +97,7 @@ config MMC_RICOH_MMC
97config MMC_SDHCI_ACPI 97config MMC_SDHCI_ACPI
98 tristate "SDHCI support for ACPI enumerated SDHCI controllers" 98 tristate "SDHCI support for ACPI enumerated SDHCI controllers"
99 depends on MMC_SDHCI && ACPI 99 depends on MMC_SDHCI && ACPI
100 select IOSF_MBI if X86
100 help 101 help
101 This selects support for ACPI enumerated SDHCI controllers, 102 This selects support for ACPI enumerated SDHCI controllers,
102 identified by ACPI Compatibility ID PNP0D40 or specific 103 identified by ACPI Compatibility ID PNP0D40 or specific
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 6839e41c6d58..bed6a494f52c 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -41,6 +41,11 @@
41#include <linux/mmc/pm.h> 41#include <linux/mmc/pm.h>
42#include <linux/mmc/slot-gpio.h> 42#include <linux/mmc/slot-gpio.h>
43 43
44#ifdef CONFIG_X86
45#include <asm/cpu_device_id.h>
46#include <asm/iosf_mbi.h>
47#endif
48
44#include "sdhci.h" 49#include "sdhci.h"
45 50
46enum { 51enum {
@@ -116,6 +121,75 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
116 .ops = &sdhci_acpi_ops_int, 121 .ops = &sdhci_acpi_ops_int,
117}; 122};
118 123
124#ifdef CONFIG_X86
125
126static bool sdhci_acpi_byt(void)
127{
128 static const struct x86_cpu_id byt[] = {
129 { X86_VENDOR_INTEL, 6, 0x37 },
130 {}
131 };
132
133 return x86_match_cpu(byt);
134}
135
136#define BYT_IOSF_SCCEP 0x63
137#define BYT_IOSF_OCP_NETCTRL0 0x1078
138#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
139
140static void sdhci_acpi_byt_setting(struct device *dev)
141{
142 u32 val = 0;
143
144 if (!sdhci_acpi_byt())
145 return;
146
147 if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
148 &val)) {
149 dev_err(dev, "%s read error\n", __func__);
150 return;
151 }
152
153 if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
154 return;
155
156 val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
157
158 if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
159 val)) {
160 dev_err(dev, "%s write error\n", __func__);
161 return;
162 }
163
164 dev_dbg(dev, "%s completed\n", __func__);
165}
166
167static bool sdhci_acpi_byt_defer(struct device *dev)
168{
169 if (!sdhci_acpi_byt())
170 return false;
171
172 if (!iosf_mbi_available())
173 return true;
174
175 sdhci_acpi_byt_setting(dev);
176
177 return false;
178}
179
180#else
181
182static inline void sdhci_acpi_byt_setting(struct device *dev)
183{
184}
185
186static inline bool sdhci_acpi_byt_defer(struct device *dev)
187{
188 return false;
189}
190
191#endif
192
119static int bxt_get_cd(struct mmc_host *mmc) 193static int bxt_get_cd(struct mmc_host *mmc)
120{ 194{
121 int gpio_cd = mmc_gpio_get_cd(mmc); 195 int gpio_cd = mmc_gpio_get_cd(mmc);
@@ -322,6 +396,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
322 if (acpi_bus_get_status(device) || !device->status.present) 396 if (acpi_bus_get_status(device) || !device->status.present)
323 return -ENODEV; 397 return -ENODEV;
324 398
399 if (sdhci_acpi_byt_defer(dev))
400 return -EPROBE_DEFER;
401
325 hid = acpi_device_hid(device); 402 hid = acpi_device_hid(device);
326 uid = device->pnp.unique_id; 403 uid = device->pnp.unique_id;
327 404
@@ -447,6 +524,8 @@ static int sdhci_acpi_resume(struct device *dev)
447{ 524{
448 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 525 struct sdhci_acpi_host *c = dev_get_drvdata(dev);
449 526
527 sdhci_acpi_byt_setting(&c->pdev->dev);
528
450 return sdhci_resume_host(c->host); 529 return sdhci_resume_host(c->host);
451} 530}
452 531
@@ -470,6 +549,8 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
470{ 549{
471 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 550 struct sdhci_acpi_host *c = dev_get_drvdata(dev);
472 551
552 sdhci_acpi_byt_setting(&c->pdev->dev);
553
473 return sdhci_runtime_resume_host(c->host); 554 return sdhci_runtime_resume_host(c->host);
474} 555}
475 556
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 8372a413848c..7fc8b7aa83f0 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1129,6 +1129,11 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
1129 MMC_CAP_1_8V_DDR | 1129 MMC_CAP_1_8V_DDR |
1130 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; 1130 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
1131 1131
1132 /* TODO MMC DDR is not working on A80 */
1133 if (of_device_is_compatible(pdev->dev.of_node,
1134 "allwinner,sun9i-a80-mmc"))
1135 mmc->caps &= ~MMC_CAP_1_8V_DDR;
1136
1132 ret = mmc_of_parse(mmc); 1137 ret = mmc_of_parse(mmc);
1133 if (ret) 1138 if (ret)
1134 goto error_free_dma; 1139 goto error_free_dma;
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 61150af37bc7..470cfc783baa 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2202,7 +2202,7 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
2202 struct net_device *bridge) 2202 struct net_device *bridge)
2203{ 2203{
2204 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2204 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2205 int i, err; 2205 int i, err = 0;
2206 2206
2207 mutex_lock(&ps->smi_mutex); 2207 mutex_lock(&ps->smi_mutex);
2208 2208
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4645c44e7c15..1199c2b4bf20 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -588,12 +588,30 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
588 struct page *page; 588 struct page *page;
589 dma_addr_t mapping; 589 dma_addr_t mapping;
590 u16 sw_prod = rxr->rx_sw_agg_prod; 590 u16 sw_prod = rxr->rx_sw_agg_prod;
591 unsigned int offset = 0;
591 592
592 page = alloc_page(gfp); 593 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
593 if (!page) 594 page = rxr->rx_page;
594 return -ENOMEM; 595 if (!page) {
596 page = alloc_page(gfp);
597 if (!page)
598 return -ENOMEM;
599 rxr->rx_page = page;
600 rxr->rx_page_offset = 0;
601 }
602 offset = rxr->rx_page_offset;
603 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
604 if (rxr->rx_page_offset == PAGE_SIZE)
605 rxr->rx_page = NULL;
606 else
607 get_page(page);
608 } else {
609 page = alloc_page(gfp);
610 if (!page)
611 return -ENOMEM;
612 }
595 613
596 mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE, 614 mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
597 PCI_DMA_FROMDEVICE); 615 PCI_DMA_FROMDEVICE);
598 if (dma_mapping_error(&pdev->dev, mapping)) { 616 if (dma_mapping_error(&pdev->dev, mapping)) {
599 __free_page(page); 617 __free_page(page);
@@ -608,6 +626,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
608 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); 626 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
609 627
610 rx_agg_buf->page = page; 628 rx_agg_buf->page = page;
629 rx_agg_buf->offset = offset;
611 rx_agg_buf->mapping = mapping; 630 rx_agg_buf->mapping = mapping;
612 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 631 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
613 rxbd->rx_bd_opaque = sw_prod; 632 rxbd->rx_bd_opaque = sw_prod;
@@ -649,6 +668,7 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
649 page = cons_rx_buf->page; 668 page = cons_rx_buf->page;
650 cons_rx_buf->page = NULL; 669 cons_rx_buf->page = NULL;
651 prod_rx_buf->page = page; 670 prod_rx_buf->page = page;
671 prod_rx_buf->offset = cons_rx_buf->offset;
652 672
653 prod_rx_buf->mapping = cons_rx_buf->mapping; 673 prod_rx_buf->mapping = cons_rx_buf->mapping;
654 674
@@ -716,7 +736,8 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
716 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 736 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
717 737
718 cons_rx_buf = &rxr->rx_agg_ring[cons]; 738 cons_rx_buf = &rxr->rx_agg_ring[cons];
719 skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len); 739 skb_fill_page_desc(skb, i, cons_rx_buf->page,
740 cons_rx_buf->offset, frag_len);
720 __clear_bit(cons, rxr->rx_agg_bmap); 741 __clear_bit(cons, rxr->rx_agg_bmap);
721 742
722 /* It is possible for bnxt_alloc_rx_page() to allocate 743 /* It is possible for bnxt_alloc_rx_page() to allocate
@@ -747,7 +768,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
747 return NULL; 768 return NULL;
748 } 769 }
749 770
750 dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE, 771 dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
751 PCI_DMA_FROMDEVICE); 772 PCI_DMA_FROMDEVICE);
752 773
753 skb->data_len += frag_len; 774 skb->data_len += frag_len;
@@ -1635,13 +1656,17 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
1635 1656
1636 dma_unmap_page(&pdev->dev, 1657 dma_unmap_page(&pdev->dev,
1637 dma_unmap_addr(rx_agg_buf, mapping), 1658 dma_unmap_addr(rx_agg_buf, mapping),
1638 PAGE_SIZE, PCI_DMA_FROMDEVICE); 1659 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
1639 1660
1640 rx_agg_buf->page = NULL; 1661 rx_agg_buf->page = NULL;
1641 __clear_bit(j, rxr->rx_agg_bmap); 1662 __clear_bit(j, rxr->rx_agg_bmap);
1642 1663
1643 __free_page(page); 1664 __free_page(page);
1644 } 1665 }
1666 if (rxr->rx_page) {
1667 __free_page(rxr->rx_page);
1668 rxr->rx_page = NULL;
1669 }
1645 } 1670 }
1646} 1671}
1647 1672
@@ -2024,7 +2049,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2024 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 2049 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2025 return 0; 2050 return 0;
2026 2051
2027 type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) | 2052 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
2028 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 2053 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2029 2054
2030 bnxt_init_rxbd_pages(ring, type); 2055 bnxt_init_rxbd_pages(ring, type);
@@ -2215,7 +2240,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
2215 bp->rx_agg_nr_pages = 0; 2240 bp->rx_agg_nr_pages = 0;
2216 2241
2217 if (bp->flags & BNXT_FLAG_TPA) 2242 if (bp->flags & BNXT_FLAG_TPA)
2218 agg_factor = 4; 2243 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
2219 2244
2220 bp->flags &= ~BNXT_FLAG_JUMBO; 2245 bp->flags &= ~BNXT_FLAG_JUMBO;
2221 if (rx_space > PAGE_SIZE) { 2246 if (rx_space > PAGE_SIZE) {
@@ -3076,12 +3101,12 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3076 /* Number of segs are log2 units, and first packet is not 3101 /* Number of segs are log2 units, and first packet is not
3077 * included as part of this units. 3102 * included as part of this units.
3078 */ 3103 */
3079 if (mss <= PAGE_SIZE) { 3104 if (mss <= BNXT_RX_PAGE_SIZE) {
3080 n = PAGE_SIZE / mss; 3105 n = BNXT_RX_PAGE_SIZE / mss;
3081 nsegs = (MAX_SKB_FRAGS - 1) * n; 3106 nsegs = (MAX_SKB_FRAGS - 1) * n;
3082 } else { 3107 } else {
3083 n = mss / PAGE_SIZE; 3108 n = mss / BNXT_RX_PAGE_SIZE;
3084 if (mss & (PAGE_SIZE - 1)) 3109 if (mss & (BNXT_RX_PAGE_SIZE - 1))
3085 n++; 3110 n++;
3086 nsegs = (MAX_SKB_FRAGS - n) / n; 3111 nsegs = (MAX_SKB_FRAGS - n) / n;
3087 } 3112 }
@@ -4367,7 +4392,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
4367 if (bp->flags & BNXT_FLAG_MSIX_CAP) 4392 if (bp->flags & BNXT_FLAG_MSIX_CAP)
4368 rc = bnxt_setup_msix(bp); 4393 rc = bnxt_setup_msix(bp);
4369 4394
4370 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { 4395 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
4371 /* fallback to INTA */ 4396 /* fallback to INTA */
4372 rc = bnxt_setup_inta(bp); 4397 rc = bnxt_setup_inta(bp);
4373 } 4398 }
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 26dac2f3c63c..62896352b0df 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -407,6 +407,15 @@ struct rx_tpa_end_cmp_ext {
407 407
408#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT) 408#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
409 409
410/* The RXBD length is 16-bit so we can only support page sizes < 64K */
411#if (PAGE_SHIFT > 15)
412#define BNXT_RX_PAGE_SHIFT 15
413#else
414#define BNXT_RX_PAGE_SHIFT PAGE_SHIFT
415#endif
416
417#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
418
410#define BNXT_MIN_PKT_SIZE 45 419#define BNXT_MIN_PKT_SIZE 45
411 420
412#define BNXT_NUM_TESTS(bp) 0 421#define BNXT_NUM_TESTS(bp) 0
@@ -506,6 +515,7 @@ struct bnxt_sw_rx_bd {
506 515
507struct bnxt_sw_rx_agg_bd { 516struct bnxt_sw_rx_agg_bd {
508 struct page *page; 517 struct page *page;
518 unsigned int offset;
509 dma_addr_t mapping; 519 dma_addr_t mapping;
510}; 520};
511 521
@@ -586,6 +596,9 @@ struct bnxt_rx_ring_info {
586 unsigned long *rx_agg_bmap; 596 unsigned long *rx_agg_bmap;
587 u16 rx_agg_bmap_size; 597 u16 rx_agg_bmap_size;
588 598
599 struct page *rx_page;
600 unsigned int rx_page_offset;
601
589 dma_addr_t rx_desc_mapping[MAX_RX_PAGES]; 602 dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
590 dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES]; 603 dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
591 604
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index eec3200ade4a..cb07d95e3dd9 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -440,7 +440,7 @@ static int macb_mii_init(struct macb *bp)
440 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 440 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
441 bp->pdev->name, bp->pdev->id); 441 bp->pdev->name, bp->pdev->id);
442 bp->mii_bus->priv = bp; 442 bp->mii_bus->priv = bp;
443 bp->mii_bus->parent = &bp->dev->dev; 443 bp->mii_bus->parent = &bp->pdev->dev;
444 pdata = dev_get_platdata(&bp->pdev->dev); 444 pdata = dev_get_platdata(&bp->pdev->dev);
445 445
446 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); 446 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
@@ -458,7 +458,8 @@ static int macb_mii_init(struct macb *bp)
458 struct phy_device *phydev; 458 struct phy_device *phydev;
459 459
460 phydev = mdiobus_scan(bp->mii_bus, i); 460 phydev = mdiobus_scan(bp->mii_bus, i);
461 if (IS_ERR(phydev)) { 461 if (IS_ERR(phydev) &&
462 PTR_ERR(phydev) != -ENODEV) {
462 err = PTR_ERR(phydev); 463 err = PTR_ERR(phydev);
463 break; 464 break;
464 } 465 }
@@ -3005,29 +3006,36 @@ static int macb_probe(struct platform_device *pdev)
3005 if (err) 3006 if (err)
3006 goto err_out_free_netdev; 3007 goto err_out_free_netdev;
3007 3008
3009 err = macb_mii_init(bp);
3010 if (err)
3011 goto err_out_free_netdev;
3012
3013 phydev = bp->phy_dev;
3014
3015 netif_carrier_off(dev);
3016
3008 err = register_netdev(dev); 3017 err = register_netdev(dev);
3009 if (err) { 3018 if (err) {
3010 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 3019 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
3011 goto err_out_unregister_netdev; 3020 goto err_out_unregister_mdio;
3012 } 3021 }
3013 3022
3014 err = macb_mii_init(bp); 3023 phy_attached_info(phydev);
3015 if (err)
3016 goto err_out_unregister_netdev;
3017
3018 netif_carrier_off(dev);
3019 3024
3020 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", 3025 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
3021 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), 3026 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
3022 dev->base_addr, dev->irq, dev->dev_addr); 3027 dev->base_addr, dev->irq, dev->dev_addr);
3023 3028
3024 phydev = bp->phy_dev;
3025 phy_attached_info(phydev);
3026
3027 return 0; 3029 return 0;
3028 3030
3029err_out_unregister_netdev: 3031err_out_unregister_mdio:
3030 unregister_netdev(dev); 3032 phy_disconnect(bp->phy_dev);
3033 mdiobus_unregister(bp->mii_bus);
3034 mdiobus_free(bp->mii_bus);
3035
3036 /* Shutdown the PHY if there is a GPIO reset */
3037 if (bp->reset_gpio)
3038 gpiod_set_value(bp->reset_gpio, 0);
3031 3039
3032err_out_free_netdev: 3040err_out_free_netdev:
3033 free_netdev(dev); 3041 free_netdev(dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 60908eab3b3a..43da891fab97 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -576,7 +576,7 @@ static void setup_rss(struct adapter *adap)
576 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets; 576 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
577 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1; 577 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
578 u8 cpus[SGE_QSETS + 1]; 578 u8 cpus[SGE_QSETS + 1];
579 u16 rspq_map[RSS_TABLE_SIZE]; 579 u16 rspq_map[RSS_TABLE_SIZE + 1];
580 580
581 for (i = 0; i < SGE_QSETS; ++i) 581 for (i = 0; i < SGE_QSETS; ++i)
582 cpus[i] = i; 582 cpus[i] = i;
@@ -586,6 +586,7 @@ static void setup_rss(struct adapter *adap)
586 rspq_map[i] = i % nq0; 586 rspq_map[i] = i % nq0;
587 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0; 587 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
588 } 588 }
589 rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
589 590
590 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN | 591 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
591 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | 592 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 7fc490225da5..a6d26d351dfc 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3354,8 +3354,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
3354 /* Enable per-CPU interrupts on the CPU that is 3354 /* Enable per-CPU interrupts on the CPU that is
3355 * brought up. 3355 * brought up.
3356 */ 3356 */
3357 smp_call_function_single(cpu, mvneta_percpu_enable, 3357 mvneta_percpu_enable(pp);
3358 pp, true);
3359 3358
3360 /* Enable per-CPU interrupt on the one CPU we care 3359 /* Enable per-CPU interrupt on the one CPU we care
3361 * about. 3360 * about.
@@ -3387,8 +3386,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
3387 /* Disable per-CPU interrupts on the CPU that is 3386 /* Disable per-CPU interrupts on the CPU that is
3388 * brought down. 3387 * brought down.
3389 */ 3388 */
3390 smp_call_function_single(cpu, mvneta_percpu_disable, 3389 mvneta_percpu_disable(pp);
3391 pp, true);
3392 3390
3393 break; 3391 break;
3394 case CPU_DEAD: 3392 case CPU_DEAD:
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 7ace07dad6a3..c442f6ad15ff 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -979,6 +979,8 @@ static int pxa168_init_phy(struct net_device *dev)
979 return 0; 979 return 0;
980 980
981 pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr); 981 pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
982 if (IS_ERR(pep->phy))
983 return PTR_ERR(pep->phy);
982 if (!pep->phy) 984 if (!pep->phy)
983 return -ENODEV; 985 return -ENODEV;
984 986
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 1cf722eba607..559d11a443bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -14,6 +14,7 @@ config MLX5_CORE_EN
14 bool "Mellanox Technologies ConnectX-4 Ethernet support" 14 bool "Mellanox Technologies ConnectX-4 Ethernet support"
15 depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE 15 depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
16 select PTP_1588_CLOCK 16 select PTP_1588_CLOCK
17 select VXLAN if MLX5_CORE=y
17 default n 18 default n
18 ---help--- 19 ---help---
19 Ethernet support in Mellanox Technologies ConnectX-4 NIC. 20 Ethernet support in Mellanox Technologies ConnectX-4 NIC.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 34523c48444e..bfa5daaaf5aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -525,6 +525,7 @@ struct mlx5e_priv {
525 struct mlx5e_vxlan_db vxlan; 525 struct mlx5e_vxlan_db vxlan;
526 526
527 struct mlx5e_params params; 527 struct mlx5e_params params;
528 struct workqueue_struct *wq;
528 struct work_struct update_carrier_work; 529 struct work_struct update_carrier_work;
529 struct work_struct set_rx_mode_work; 530 struct work_struct set_rx_mode_work;
530 struct delayed_work update_stats_work; 531 struct delayed_work update_stats_work;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 4ccfc1ac62c5..7dfb73aa8e41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -231,9 +231,8 @@ static void mlx5e_update_stats_work(struct work_struct *work)
231 mutex_lock(&priv->state_lock); 231 mutex_lock(&priv->state_lock);
232 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { 232 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
233 mlx5e_update_stats(priv); 233 mlx5e_update_stats(priv);
234 schedule_delayed_work(dwork, 234 queue_delayed_work(priv->wq, dwork,
235 msecs_to_jiffies( 235 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
236 MLX5E_UPDATE_STATS_INTERVAL));
237 } 236 }
238 mutex_unlock(&priv->state_lock); 237 mutex_unlock(&priv->state_lock);
239} 238}
@@ -249,7 +248,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
249 switch (event) { 248 switch (event) {
250 case MLX5_DEV_EVENT_PORT_UP: 249 case MLX5_DEV_EVENT_PORT_UP:
251 case MLX5_DEV_EVENT_PORT_DOWN: 250 case MLX5_DEV_EVENT_PORT_DOWN:
252 schedule_work(&priv->update_carrier_work); 251 queue_work(priv->wq, &priv->update_carrier_work);
253 break; 252 break;
254 253
255 default: 254 default:
@@ -1695,7 +1694,7 @@ int mlx5e_open_locked(struct net_device *netdev)
1695 priv->netdev->rx_cpu_rmap = priv->mdev->rmap; 1694 priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
1696#endif 1695#endif
1697 1696
1698 schedule_delayed_work(&priv->update_stats_work, 0); 1697 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
1699 1698
1700 return 0; 1699 return 0;
1701 1700
@@ -2202,7 +2201,7 @@ static void mlx5e_set_rx_mode(struct net_device *dev)
2202{ 2201{
2203 struct mlx5e_priv *priv = netdev_priv(dev); 2202 struct mlx5e_priv *priv = netdev_priv(dev);
2204 2203
2205 schedule_work(&priv->set_rx_mode_work); 2204 queue_work(priv->wq, &priv->set_rx_mode_work);
2206} 2205}
2207 2206
2208static int mlx5e_set_mac(struct net_device *netdev, void *addr) 2207static int mlx5e_set_mac(struct net_device *netdev, void *addr)
@@ -2217,7 +2216,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
2217 ether_addr_copy(netdev->dev_addr, saddr->sa_data); 2216 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
2218 netif_addr_unlock_bh(netdev); 2217 netif_addr_unlock_bh(netdev);
2219 2218
2220 schedule_work(&priv->set_rx_mode_work); 2219 queue_work(priv->wq, &priv->set_rx_mode_work);
2221 2220
2222 return 0; 2221 return 0;
2223} 2222}
@@ -2503,7 +2502,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
2503 if (!mlx5e_vxlan_allowed(priv->mdev)) 2502 if (!mlx5e_vxlan_allowed(priv->mdev))
2504 return; 2503 return;
2505 2504
2506 mlx5e_vxlan_add_port(priv, be16_to_cpu(port)); 2505 mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1);
2507} 2506}
2508 2507
2509static void mlx5e_del_vxlan_port(struct net_device *netdev, 2508static void mlx5e_del_vxlan_port(struct net_device *netdev,
@@ -2514,7 +2513,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev,
2514 if (!mlx5e_vxlan_allowed(priv->mdev)) 2513 if (!mlx5e_vxlan_allowed(priv->mdev))
2515 return; 2514 return;
2516 2515
2517 mlx5e_vxlan_del_port(priv, be16_to_cpu(port)); 2516 mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0);
2518} 2517}
2519 2518
2520static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, 2519static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
@@ -2947,10 +2946,14 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
2947 2946
2948 priv = netdev_priv(netdev); 2947 priv = netdev_priv(netdev);
2949 2948
2949 priv->wq = create_singlethread_workqueue("mlx5e");
2950 if (!priv->wq)
2951 goto err_free_netdev;
2952
2950 err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false); 2953 err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
2951 if (err) { 2954 if (err) {
2952 mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); 2955 mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
2953 goto err_free_netdev; 2956 goto err_destroy_wq;
2954 } 2957 }
2955 2958
2956 err = mlx5_core_alloc_pd(mdev, &priv->pdn); 2959 err = mlx5_core_alloc_pd(mdev, &priv->pdn);
@@ -3034,7 +3037,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
3034 } 3037 }
3035 3038
3036 mlx5e_enable_async_events(priv); 3039 mlx5e_enable_async_events(priv);
3037 schedule_work(&priv->set_rx_mode_work); 3040 queue_work(priv->wq, &priv->set_rx_mode_work);
3038 3041
3039 return priv; 3042 return priv;
3040 3043
@@ -3072,6 +3075,9 @@ err_dealloc_pd:
3072err_unmap_free_uar: 3075err_unmap_free_uar:
3073 mlx5_unmap_free_uar(mdev, &priv->cq_uar); 3076 mlx5_unmap_free_uar(mdev, &priv->cq_uar);
3074 3077
3078err_destroy_wq:
3079 destroy_workqueue(priv->wq);
3080
3075err_free_netdev: 3081err_free_netdev:
3076 free_netdev(netdev); 3082 free_netdev(netdev);
3077 3083
@@ -3085,9 +3091,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
3085 3091
3086 set_bit(MLX5E_STATE_DESTROYING, &priv->state); 3092 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
3087 3093
3088 schedule_work(&priv->set_rx_mode_work); 3094 queue_work(priv->wq, &priv->set_rx_mode_work);
3089 mlx5e_disable_async_events(priv); 3095 mlx5e_disable_async_events(priv);
3090 flush_scheduled_work(); 3096 flush_workqueue(priv->wq);
3091 if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { 3097 if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
3092 netif_device_detach(netdev); 3098 netif_device_detach(netdev);
3093 mutex_lock(&priv->state_lock); 3099 mutex_lock(&priv->state_lock);
@@ -3111,6 +3117,8 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
3111 mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn); 3117 mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
3112 mlx5_core_dealloc_pd(priv->mdev, priv->pdn); 3118 mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
3113 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); 3119 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
3120 cancel_delayed_work_sync(&priv->update_stats_work);
3121 destroy_workqueue(priv->wq);
3114 3122
3115 if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) 3123 if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
3116 free_netdev(netdev); 3124 free_netdev(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 8ba080e441a1..5ff8af472bf5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -269,8 +269,10 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
269 269
270void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) 270void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
271{ 271{
272 iounmap(uar->map); 272 if (uar->map)
273 iounmap(uar->bf_map); 273 iounmap(uar->map);
274 else
275 iounmap(uar->bf_map);
274 mlx5_cmd_free_uar(mdev, uar->index); 276 mlx5_cmd_free_uar(mdev, uar->index);
275} 277}
276EXPORT_SYMBOL(mlx5_unmap_free_uar); 278EXPORT_SYMBOL(mlx5_unmap_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index 9f10df25f3cd..f2fd1ef16da7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -95,21 +95,22 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
95 return vxlan; 95 return vxlan;
96} 96}
97 97
98int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) 98static void mlx5e_vxlan_add_port(struct work_struct *work)
99{ 99{
100 struct mlx5e_vxlan_work *vxlan_work =
101 container_of(work, struct mlx5e_vxlan_work, work);
102 struct mlx5e_priv *priv = vxlan_work->priv;
100 struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; 103 struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
104 u16 port = vxlan_work->port;
101 struct mlx5e_vxlan *vxlan; 105 struct mlx5e_vxlan *vxlan;
102 int err; 106 int err;
103 107
104 err = mlx5e_vxlan_core_add_port_cmd(priv->mdev, port); 108 if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
105 if (err) 109 goto free_work;
106 return err;
107 110
108 vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL); 111 vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
109 if (!vxlan) { 112 if (!vxlan)
110 err = -ENOMEM;
111 goto err_delete_port; 113 goto err_delete_port;
112 }
113 114
114 vxlan->udp_port = port; 115 vxlan->udp_port = port;
115 116
@@ -119,13 +120,14 @@ int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
119 if (err) 120 if (err)
120 goto err_free; 121 goto err_free;
121 122
122 return 0; 123 goto free_work;
123 124
124err_free: 125err_free:
125 kfree(vxlan); 126 kfree(vxlan);
126err_delete_port: 127err_delete_port:
127 mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); 128 mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
128 return err; 129free_work:
130 kfree(vxlan_work);
129} 131}
130 132
131static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port) 133static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
@@ -145,12 +147,36 @@ static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
145 kfree(vxlan); 147 kfree(vxlan);
146} 148}
147 149
148void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port) 150static void mlx5e_vxlan_del_port(struct work_struct *work)
149{ 151{
150 if (!mlx5e_vxlan_lookup_port(priv, port)) 152 struct mlx5e_vxlan_work *vxlan_work =
151 return; 153 container_of(work, struct mlx5e_vxlan_work, work);
154 struct mlx5e_priv *priv = vxlan_work->priv;
155 u16 port = vxlan_work->port;
152 156
153 __mlx5e_vxlan_core_del_port(priv, port); 157 __mlx5e_vxlan_core_del_port(priv, port);
158
159 kfree(vxlan_work);
160}
161
162void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
163 u16 port, int add)
164{
165 struct mlx5e_vxlan_work *vxlan_work;
166
167 vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
168 if (!vxlan_work)
169 return;
170
171 if (add)
172 INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port);
173 else
174 INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port);
175
176 vxlan_work->priv = priv;
177 vxlan_work->port = port;
178 vxlan_work->sa_family = sa_family;
179 queue_work(priv->wq, &vxlan_work->work);
154} 180}
155 181
156void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) 182void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
index a01685056ab1..129f3527aa14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
@@ -39,6 +39,13 @@ struct mlx5e_vxlan {
39 u16 udp_port; 39 u16 udp_port;
40}; 40};
41 41
42struct mlx5e_vxlan_work {
43 struct work_struct work;
44 struct mlx5e_priv *priv;
45 sa_family_t sa_family;
46 u16 port;
47};
48
42static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) 49static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
43{ 50{
44 return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) && 51 return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
@@ -46,8 +53,8 @@ static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
46} 53}
47 54
48void mlx5e_vxlan_init(struct mlx5e_priv *priv); 55void mlx5e_vxlan_init(struct mlx5e_priv *priv);
49int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port); 56void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
50void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port); 57 u16 port, int add);
51struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); 58struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
52void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); 59void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
53 60
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 270c9eeb7ab6..6d1a956e3f77 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2668,9 +2668,9 @@ static int myri10ge_close(struct net_device *dev)
2668 2668
2669 del_timer_sync(&mgp->watchdog_timer); 2669 del_timer_sync(&mgp->watchdog_timer);
2670 mgp->running = MYRI10GE_ETH_STOPPING; 2670 mgp->running = MYRI10GE_ETH_STOPPING;
2671 local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */
2672 for (i = 0; i < mgp->num_slices; i++) { 2671 for (i = 0; i < mgp->num_slices; i++) {
2673 napi_disable(&mgp->ss[i].napi); 2672 napi_disable(&mgp->ss[i].napi);
2673 local_bh_disable(); /* myri10ge_ss_lock_napi needs this */
2674 /* Lock the slice to prevent the busy_poll handler from 2674 /* Lock the slice to prevent the busy_poll handler from
2675 * accessing it. Later when we bring the NIC up, myri10ge_open 2675 * accessing it. Later when we bring the NIC up, myri10ge_open
2676 * resets the slice including this lock. 2676 * resets the slice including this lock.
@@ -2679,8 +2679,8 @@ static int myri10ge_close(struct net_device *dev)
2679 pr_info("Slice %d locked\n", i); 2679 pr_info("Slice %d locked\n", i);
2680 mdelay(1); 2680 mdelay(1);
2681 } 2681 }
2682 local_bh_enable();
2682 } 2683 }
2683 local_bh_enable();
2684 netif_carrier_off(dev); 2684 netif_carrier_off(dev);
2685 2685
2686 netif_tx_stop_all_queues(dev); 2686 netif_tx_stop_all_queues(dev);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 98d33d462c6c..1681084cc96f 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1920,6 +1920,10 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
1920 return 0; 1920 return 0;
1921 } 1921 }
1922 1922
1923 if (nic_data->datapath_caps &
1924 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
1925 return -EOPNOTSUPP;
1926
1923 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, 1927 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
1924 nic_data->vport_id); 1928 nic_data->vport_id);
1925 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type); 1929 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
@@ -2923,9 +2927,16 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
2923 bool replacing) 2927 bool replacing)
2924{ 2928{
2925 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2929 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2930 u32 flags = spec->flags;
2926 2931
2927 memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN); 2932 memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
2928 2933
2934 /* Remove RSS flag if we don't have an RSS context. */
2935 if (flags & EFX_FILTER_FLAG_RX_RSS &&
2936 spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT &&
2937 nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID)
2938 flags &= ~EFX_FILTER_FLAG_RX_RSS;
2939
2929 if (replacing) { 2940 if (replacing) {
2930 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 2941 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2931 MC_CMD_FILTER_OP_IN_OP_REPLACE); 2942 MC_CMD_FILTER_OP_IN_OP_REPLACE);
@@ -2985,10 +2996,10 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
2985 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? 2996 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
2986 0 : spec->dmaq_id); 2997 0 : spec->dmaq_id);
2987 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, 2998 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
2988 (spec->flags & EFX_FILTER_FLAG_RX_RSS) ? 2999 (flags & EFX_FILTER_FLAG_RX_RSS) ?
2989 MC_CMD_FILTER_OP_IN_RX_MODE_RSS : 3000 MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
2990 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); 3001 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
2991 if (spec->flags & EFX_FILTER_FLAG_RX_RSS) 3002 if (flags & EFX_FILTER_FLAG_RX_RSS)
2992 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, 3003 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
2993 spec->rss_context != 3004 spec->rss_context !=
2994 EFX_FILTER_RSS_CONTEXT_DEFAULT ? 3005 EFX_FILTER_RSS_CONTEXT_DEFAULT ?
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 0fa75a86b1c1..68577ee2e64a 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -367,7 +367,6 @@ struct cpsw_priv {
367 spinlock_t lock; 367 spinlock_t lock;
368 struct platform_device *pdev; 368 struct platform_device *pdev;
369 struct net_device *ndev; 369 struct net_device *ndev;
370 struct device_node *phy_node;
371 struct napi_struct napi_rx; 370 struct napi_struct napi_rx;
372 struct napi_struct napi_tx; 371 struct napi_struct napi_tx;
373 struct device *dev; 372 struct device *dev;
@@ -1142,25 +1141,34 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1142 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 1141 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1143 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); 1142 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
1144 1143
1145 if (priv->phy_node) 1144 if (slave->data->phy_node) {
1146 slave->phy = of_phy_connect(priv->ndev, priv->phy_node, 1145 slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node,
1147 &cpsw_adjust_link, 0, slave->data->phy_if); 1146 &cpsw_adjust_link, 0, slave->data->phy_if);
1148 else 1147 if (!slave->phy) {
1148 dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
1149 slave->data->phy_node->full_name,
1150 slave->slave_num);
1151 return;
1152 }
1153 } else {
1149 slave->phy = phy_connect(priv->ndev, slave->data->phy_id, 1154 slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
1150 &cpsw_adjust_link, slave->data->phy_if); 1155 &cpsw_adjust_link, slave->data->phy_if);
1151 if (IS_ERR(slave->phy)) { 1156 if (IS_ERR(slave->phy)) {
1152 dev_err(priv->dev, "phy %s not found on slave %d\n", 1157 dev_err(priv->dev,
1153 slave->data->phy_id, slave->slave_num); 1158 "phy \"%s\" not found on slave %d, err %ld\n",
1154 slave->phy = NULL; 1159 slave->data->phy_id, slave->slave_num,
1155 } else { 1160 PTR_ERR(slave->phy));
1156 phy_attached_info(slave->phy); 1161 slave->phy = NULL;
1162 return;
1163 }
1164 }
1157 1165
1158 phy_start(slave->phy); 1166 phy_attached_info(slave->phy);
1159 1167
1160 /* Configure GMII_SEL register */ 1168 phy_start(slave->phy);
1161 cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, 1169
1162 slave->slave_num); 1170 /* Configure GMII_SEL register */
1163 } 1171 cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, slave->slave_num);
1164} 1172}
1165 1173
1166static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) 1174static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
@@ -1932,12 +1940,11 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
1932 slave->port_vlan = data->dual_emac_res_vlan; 1940 slave->port_vlan = data->dual_emac_res_vlan;
1933} 1941}
1934 1942
1935static int cpsw_probe_dt(struct cpsw_priv *priv, 1943static int cpsw_probe_dt(struct cpsw_platform_data *data,
1936 struct platform_device *pdev) 1944 struct platform_device *pdev)
1937{ 1945{
1938 struct device_node *node = pdev->dev.of_node; 1946 struct device_node *node = pdev->dev.of_node;
1939 struct device_node *slave_node; 1947 struct device_node *slave_node;
1940 struct cpsw_platform_data *data = &priv->data;
1941 int i = 0, ret; 1948 int i = 0, ret;
1942 u32 prop; 1949 u32 prop;
1943 1950
@@ -2025,25 +2032,21 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
2025 if (strcmp(slave_node->name, "slave")) 2032 if (strcmp(slave_node->name, "slave"))
2026 continue; 2033 continue;
2027 2034
2028 priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0); 2035 slave_data->phy_node = of_parse_phandle(slave_node,
2036 "phy-handle", 0);
2029 parp = of_get_property(slave_node, "phy_id", &lenp); 2037 parp = of_get_property(slave_node, "phy_id", &lenp);
2030 if (of_phy_is_fixed_link(slave_node)) { 2038 if (slave_data->phy_node) {
2031 struct device_node *phy_node; 2039 dev_dbg(&pdev->dev,
2032 struct phy_device *phy_dev; 2040 "slave[%d] using phy-handle=\"%s\"\n",
2033 2041 i, slave_data->phy_node->full_name);
2042 } else if (of_phy_is_fixed_link(slave_node)) {
2034 /* In the case of a fixed PHY, the DT node associated 2043 /* In the case of a fixed PHY, the DT node associated
2035 * to the PHY is the Ethernet MAC DT node. 2044 * to the PHY is the Ethernet MAC DT node.
2036 */ 2045 */
2037 ret = of_phy_register_fixed_link(slave_node); 2046 ret = of_phy_register_fixed_link(slave_node);
2038 if (ret) 2047 if (ret)
2039 return ret; 2048 return ret;
2040 phy_node = of_node_get(slave_node); 2049 slave_data->phy_node = of_node_get(slave_node);
2041 phy_dev = of_phy_find_device(phy_node);
2042 if (!phy_dev)
2043 return -ENODEV;
2044 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
2045 PHY_ID_FMT, phy_dev->mdio.bus->id,
2046 phy_dev->mdio.addr);
2047 } else if (parp) { 2050 } else if (parp) {
2048 u32 phyid; 2051 u32 phyid;
2049 struct device_node *mdio_node; 2052 struct device_node *mdio_node;
@@ -2064,7 +2067,9 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
2064 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 2067 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
2065 PHY_ID_FMT, mdio->name, phyid); 2068 PHY_ID_FMT, mdio->name, phyid);
2066 } else { 2069 } else {
2067 dev_err(&pdev->dev, "No slave[%d] phy_id or fixed-link property\n", i); 2070 dev_err(&pdev->dev,
2071 "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
2072 i);
2068 goto no_phy_slave; 2073 goto no_phy_slave;
2069 } 2074 }
2070 slave_data->phy_if = of_get_phy_mode(slave_node); 2075 slave_data->phy_if = of_get_phy_mode(slave_node);
@@ -2266,7 +2271,7 @@ static int cpsw_probe(struct platform_device *pdev)
2266 /* Select default pin state */ 2271 /* Select default pin state */
2267 pinctrl_pm_select_default_state(&pdev->dev); 2272 pinctrl_pm_select_default_state(&pdev->dev);
2268 2273
2269 if (cpsw_probe_dt(priv, pdev)) { 2274 if (cpsw_probe_dt(&priv->data, pdev)) {
2270 dev_err(&pdev->dev, "cpsw: platform data missing\n"); 2275 dev_err(&pdev->dev, "cpsw: platform data missing\n");
2271 ret = -ENODEV; 2276 ret = -ENODEV;
2272 goto clean_runtime_disable_ret; 2277 goto clean_runtime_disable_ret;
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index 442a7038e660..e50afd1b2eda 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -18,6 +18,7 @@
18#include <linux/phy.h> 18#include <linux/phy.h>
19 19
20struct cpsw_slave_data { 20struct cpsw_slave_data {
21 struct device_node *phy_node;
21 char phy_id[MII_BUS_ID_SIZE]; 22 char phy_id[MII_BUS_ID_SIZE];
22 int phy_if; 23 int phy_if;
23 u8 mac_addr[ETH_ALEN]; 24 u8 mac_addr[ETH_ALEN];
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 58d58f002559..f56d66e6ec15 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1512,7 +1512,10 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
1512 1512
1513 /* TODO: Add phy read and write and private statistics get feature */ 1513 /* TODO: Add phy read and write and private statistics get feature */
1514 1514
1515 return phy_mii_ioctl(priv->phydev, ifrq, cmd); 1515 if (priv->phydev)
1516 return phy_mii_ioctl(priv->phydev, ifrq, cmd);
1517 else
1518 return -EOPNOTSUPP;
1516} 1519}
1517 1520
1518static int match_first_device(struct device *dev, void *data) 1521static int match_first_device(struct device *dev, void *data)
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 13214a6492ac..743b18266a7c 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1622,7 +1622,7 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
1622 continue; 1622 continue;
1623 1623
1624 /* copy hw scan info */ 1624 /* copy hw scan info */
1625 memcpy(target->hwinfo, scan_info, scan_info->size); 1625 memcpy(target->hwinfo, scan_info, be16_to_cpu(scan_info->size));
1626 target->essid_len = strnlen(scan_info->essid, 1626 target->essid_len = strnlen(scan_info->essid,
1627 sizeof(scan_info->essid)); 1627 sizeof(scan_info->essid));
1628 target->rate_len = 0; 1628 target->rate_len = 0;
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index b3ffaee30858..f279a897a5c7 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -359,27 +359,25 @@ static void at803x_link_change_notify(struct phy_device *phydev)
359 * in the FIFO. In such cases, the FIFO enters an error mode it 359 * in the FIFO. In such cases, the FIFO enters an error mode it
360 * cannot recover from by software. 360 * cannot recover from by software.
361 */ 361 */
362 if (phydev->drv->phy_id == ATH8030_PHY_ID) { 362 if (phydev->state == PHY_NOLINK) {
363 if (phydev->state == PHY_NOLINK) { 363 if (priv->gpiod_reset && !priv->phy_reset) {
364 if (priv->gpiod_reset && !priv->phy_reset) { 364 struct at803x_context context;
365 struct at803x_context context; 365
366 366 at803x_context_save(phydev, &context);
367 at803x_context_save(phydev, &context); 367
368 368 gpiod_set_value(priv->gpiod_reset, 1);
369 gpiod_set_value(priv->gpiod_reset, 1); 369 msleep(1);
370 msleep(1); 370 gpiod_set_value(priv->gpiod_reset, 0);
371 gpiod_set_value(priv->gpiod_reset, 0); 371 msleep(1);
372 msleep(1); 372
373 373 at803x_context_restore(phydev, &context);
374 at803x_context_restore(phydev, &context); 374
375 375 phydev_dbg(phydev, "%s(): phy was reset\n",
376 phydev_dbg(phydev, "%s(): phy was reset\n", 376 __func__);
377 __func__); 377 priv->phy_reset = true;
378 priv->phy_reset = true;
379 }
380 } else {
381 priv->phy_reset = false;
382 } 378 }
379 } else {
380 priv->phy_reset = false;
383 } 381 }
384} 382}
385 383
@@ -391,7 +389,6 @@ static struct phy_driver at803x_driver[] = {
391 .phy_id_mask = 0xffffffef, 389 .phy_id_mask = 0xffffffef,
392 .probe = at803x_probe, 390 .probe = at803x_probe,
393 .config_init = at803x_config_init, 391 .config_init = at803x_config_init,
394 .link_change_notify = at803x_link_change_notify,
395 .set_wol = at803x_set_wol, 392 .set_wol = at803x_set_wol,
396 .get_wol = at803x_get_wol, 393 .get_wol = at803x_get_wol,
397 .suspend = at803x_suspend, 394 .suspend = at803x_suspend,
@@ -427,7 +424,6 @@ static struct phy_driver at803x_driver[] = {
427 .phy_id_mask = 0xffffffef, 424 .phy_id_mask = 0xffffffef,
428 .probe = at803x_probe, 425 .probe = at803x_probe,
429 .config_init = at803x_config_init, 426 .config_init = at803x_config_init,
430 .link_change_notify = at803x_link_change_notify,
431 .set_wol = at803x_set_wol, 427 .set_wol = at803x_set_wol,
432 .get_wol = at803x_get_wol, 428 .get_wol = at803x_get_wol,
433 .suspend = at803x_suspend, 429 .suspend = at803x_suspend,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f20890ee03f3..f64778ad9753 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -269,6 +269,7 @@ struct skb_data { /* skb->cb is one of these */
269 struct lan78xx_net *dev; 269 struct lan78xx_net *dev;
270 enum skb_state state; 270 enum skb_state state;
271 size_t length; 271 size_t length;
272 int num_of_packet;
272}; 273};
273 274
274struct usb_context { 275struct usb_context {
@@ -1803,7 +1804,34 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1803 1804
1804static void lan78xx_link_status_change(struct net_device *net) 1805static void lan78xx_link_status_change(struct net_device *net)
1805{ 1806{
1806 /* nothing to do */ 1807 struct phy_device *phydev = net->phydev;
1808 int ret, temp;
1809
1810 /* At forced 100 F/H mode, chip may fail to set mode correctly
1811 * when cable is switched between long(~50+m) and short one.
1812 * As workaround, set to 10 before setting to 100
1813 * at forced 100 F/H mode.
1814 */
1815 if (!phydev->autoneg && (phydev->speed == 100)) {
1816 /* disable phy interrupt */
1817 temp = phy_read(phydev, LAN88XX_INT_MASK);
1818 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1819 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1820
1821 temp = phy_read(phydev, MII_BMCR);
1822 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1823 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1824 temp |= BMCR_SPEED100;
1825 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1826
1827 /* clear pending interrupt generated while workaround */
1828 temp = phy_read(phydev, LAN88XX_INT_STS);
1829
1830 /* enable phy interrupt back */
1831 temp = phy_read(phydev, LAN88XX_INT_MASK);
1832 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1833 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1834 }
1807} 1835}
1808 1836
1809static int lan78xx_phy_init(struct lan78xx_net *dev) 1837static int lan78xx_phy_init(struct lan78xx_net *dev)
@@ -2464,7 +2492,7 @@ static void tx_complete(struct urb *urb)
2464 struct lan78xx_net *dev = entry->dev; 2492 struct lan78xx_net *dev = entry->dev;
2465 2493
2466 if (urb->status == 0) { 2494 if (urb->status == 0) {
2467 dev->net->stats.tx_packets++; 2495 dev->net->stats.tx_packets += entry->num_of_packet;
2468 dev->net->stats.tx_bytes += entry->length; 2496 dev->net->stats.tx_bytes += entry->length;
2469 } else { 2497 } else {
2470 dev->net->stats.tx_errors++; 2498 dev->net->stats.tx_errors++;
@@ -2681,10 +2709,11 @@ void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2681 return; 2709 return;
2682 } 2710 }
2683 2711
2684 skb->protocol = eth_type_trans(skb, dev->net);
2685 dev->net->stats.rx_packets++; 2712 dev->net->stats.rx_packets++;
2686 dev->net->stats.rx_bytes += skb->len; 2713 dev->net->stats.rx_bytes += skb->len;
2687 2714
2715 skb->protocol = eth_type_trans(skb, dev->net);
2716
2688 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", 2717 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2689 skb->len + sizeof(struct ethhdr), skb->protocol); 2718 skb->len + sizeof(struct ethhdr), skb->protocol);
2690 memset(skb->cb, 0, sizeof(struct skb_data)); 2719 memset(skb->cb, 0, sizeof(struct skb_data));
@@ -2934,13 +2963,16 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
2934 2963
2935 skb_totallen = 0; 2964 skb_totallen = 0;
2936 pkt_cnt = 0; 2965 pkt_cnt = 0;
2966 count = 0;
2967 length = 0;
2937 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) { 2968 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2938 if (skb_is_gso(skb)) { 2969 if (skb_is_gso(skb)) {
2939 if (pkt_cnt) { 2970 if (pkt_cnt) {
2940 /* handle previous packets first */ 2971 /* handle previous packets first */
2941 break; 2972 break;
2942 } 2973 }
2943 length = skb->len; 2974 count = 1;
2975 length = skb->len - TX_OVERHEAD;
2944 skb2 = skb_dequeue(tqp); 2976 skb2 = skb_dequeue(tqp);
2945 goto gso_skb; 2977 goto gso_skb;
2946 } 2978 }
@@ -2961,14 +2993,13 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
2961 for (count = pos = 0; count < pkt_cnt; count++) { 2993 for (count = pos = 0; count < pkt_cnt; count++) {
2962 skb2 = skb_dequeue(tqp); 2994 skb2 = skb_dequeue(tqp);
2963 if (skb2) { 2995 if (skb2) {
2996 length += (skb2->len - TX_OVERHEAD);
2964 memcpy(skb->data + pos, skb2->data, skb2->len); 2997 memcpy(skb->data + pos, skb2->data, skb2->len);
2965 pos += roundup(skb2->len, sizeof(u32)); 2998 pos += roundup(skb2->len, sizeof(u32));
2966 dev_kfree_skb(skb2); 2999 dev_kfree_skb(skb2);
2967 } 3000 }
2968 } 3001 }
2969 3002
2970 length = skb_totallen;
2971
2972gso_skb: 3003gso_skb:
2973 urb = usb_alloc_urb(0, GFP_ATOMIC); 3004 urb = usb_alloc_urb(0, GFP_ATOMIC);
2974 if (!urb) { 3005 if (!urb) {
@@ -2980,6 +3011,7 @@ gso_skb:
2980 entry->urb = urb; 3011 entry->urb = urb;
2981 entry->dev = dev; 3012 entry->dev = dev;
2982 entry->length = length; 3013 entry->length = length;
3014 entry->num_of_packet = count;
2983 3015
2984 spin_lock_irqsave(&dev->txq.lock, flags); 3016 spin_lock_irqsave(&dev->txq.lock, flags);
2985 ret = usb_autopm_get_interface_async(dev->intf); 3017 ret = usb_autopm_get_interface_async(dev->intf);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index f84080215915..82129eef7774 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -411,7 +411,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
411 int ret; 411 int ret;
412 412
413 read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart); 413 read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
414 data[0] = 0xc9; 414 data[0] = 0xc8; /* TX & RX enable, append status, no CRC */
415 data[1] = 0; 415 data[1] = 0;
416 if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL)) 416 if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL))
417 data[1] |= 0x20; /* set full duplex */ 417 data[1] |= 0x20; /* set full duplex */
@@ -497,7 +497,7 @@ static void read_bulk_callback(struct urb *urb)
497 pkt_len = buf[count - 3] << 8; 497 pkt_len = buf[count - 3] << 8;
498 pkt_len += buf[count - 4]; 498 pkt_len += buf[count - 4];
499 pkt_len &= 0xfff; 499 pkt_len &= 0xfff;
500 pkt_len -= 8; 500 pkt_len -= 4;
501 } 501 }
502 502
503 /* 503 /*
@@ -528,7 +528,7 @@ static void read_bulk_callback(struct urb *urb)
528goon: 528goon:
529 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, 529 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
530 usb_rcvbulkpipe(pegasus->usb, 1), 530 usb_rcvbulkpipe(pegasus->usb, 1),
531 pegasus->rx_skb->data, PEGASUS_MTU + 8, 531 pegasus->rx_skb->data, PEGASUS_MTU,
532 read_bulk_callback, pegasus); 532 read_bulk_callback, pegasus);
533 rx_status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC); 533 rx_status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
534 if (rx_status == -ENODEV) 534 if (rx_status == -ENODEV)
@@ -569,7 +569,7 @@ static void rx_fixup(unsigned long data)
569 } 569 }
570 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, 570 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
571 usb_rcvbulkpipe(pegasus->usb, 1), 571 usb_rcvbulkpipe(pegasus->usb, 1),
572 pegasus->rx_skb->data, PEGASUS_MTU + 8, 572 pegasus->rx_skb->data, PEGASUS_MTU,
573 read_bulk_callback, pegasus); 573 read_bulk_callback, pegasus);
574try_again: 574try_again:
575 status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC); 575 status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
@@ -823,7 +823,7 @@ static int pegasus_open(struct net_device *net)
823 823
824 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, 824 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
825 usb_rcvbulkpipe(pegasus->usb, 1), 825 usb_rcvbulkpipe(pegasus->usb, 1),
826 pegasus->rx_skb->data, PEGASUS_MTU + 8, 826 pegasus->rx_skb->data, PEGASUS_MTU,
827 read_bulk_callback, pegasus); 827 read_bulk_callback, pegasus);
828 if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) { 828 if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) {
829 if (res == -ENODEV) 829 if (res == -ENODEV)
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 30033dbe6662..c369db99c005 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -29,6 +29,7 @@
29#include <linux/crc32.h> 29#include <linux/crc32.h>
30#include <linux/usb/usbnet.h> 30#include <linux/usb/usbnet.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/of_net.h>
32#include "smsc75xx.h" 33#include "smsc75xx.h"
33 34
34#define SMSC_CHIPNAME "smsc75xx" 35#define SMSC_CHIPNAME "smsc75xx"
@@ -761,6 +762,15 @@ static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
761 762
762static void smsc75xx_init_mac_address(struct usbnet *dev) 763static void smsc75xx_init_mac_address(struct usbnet *dev)
763{ 764{
765 const u8 *mac_addr;
766
767 /* maybe the boot loader passed the MAC address in devicetree */
768 mac_addr = of_get_mac_address(dev->udev->dev.of_node);
769 if (mac_addr) {
770 memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
771 return;
772 }
773
764 /* try reading mac address from EEPROM */ 774 /* try reading mac address from EEPROM */
765 if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, 775 if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
766 dev->net->dev_addr) == 0) { 776 dev->net->dev_addr) == 0) {
@@ -772,7 +782,7 @@ static void smsc75xx_init_mac_address(struct usbnet *dev)
772 } 782 }
773 } 783 }
774 784
775 /* no eeprom, or eeprom values are invalid. generate random MAC */ 785 /* no useful static MAC address found. generate a random one */
776 eth_hw_addr_random(dev->net); 786 eth_hw_addr_random(dev->net);
777 netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n"); 787 netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
778} 788}
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 66b3ab9f614e..2edc2bc6d1b9 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -29,6 +29,7 @@
29#include <linux/crc32.h> 29#include <linux/crc32.h>
30#include <linux/usb/usbnet.h> 30#include <linux/usb/usbnet.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/of_net.h>
32#include "smsc95xx.h" 33#include "smsc95xx.h"
33 34
34#define SMSC_CHIPNAME "smsc95xx" 35#define SMSC_CHIPNAME "smsc95xx"
@@ -765,6 +766,15 @@ static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
765 766
766static void smsc95xx_init_mac_address(struct usbnet *dev) 767static void smsc95xx_init_mac_address(struct usbnet *dev)
767{ 768{
769 const u8 *mac_addr;
770
771 /* maybe the boot loader passed the MAC address in devicetree */
772 mac_addr = of_get_mac_address(dev->udev->dev.of_node);
773 if (mac_addr) {
774 memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
775 return;
776 }
777
768 /* try reading mac address from EEPROM */ 778 /* try reading mac address from EEPROM */
769 if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, 779 if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
770 dev->net->dev_addr) == 0) { 780 dev->net->dev_addr) == 0) {
@@ -775,7 +785,7 @@ static void smsc95xx_init_mac_address(struct usbnet *dev)
775 } 785 }
776 } 786 }
777 787
778 /* no eeprom, or eeprom values are invalid. generate random MAC */ 788 /* no useful static MAC address found. generate a random one */
779 eth_hw_addr_random(dev->net); 789 eth_hw_addr_random(dev->net);
780 netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n"); 790 netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
781} 791}
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 8f8793004b9f..1b271b99c49e 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -274,6 +274,9 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
274 }; 274 };
275 static const int inc[4] = { 0, 100, 0, 0 }; 275 static const int inc[4] = { 0, 100, 0, 0 };
276 276
277 memset(&mask_m, 0, sizeof(int8_t) * 123);
278 memset(&mask_p, 0, sizeof(int8_t) * 123);
279
277 cur_bin = -6000; 280 cur_bin = -6000;
278 upper = bin + 100; 281 upper = bin + 100;
279 lower = bin - 100; 282 lower = bin - 100;
@@ -424,14 +427,9 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
424 int tmp, new; 427 int tmp, new;
425 int i; 428 int i;
426 429
427 int8_t mask_m[123];
428 int8_t mask_p[123];
429 int cur_bb_spur; 430 int cur_bb_spur;
430 bool is2GHz = IS_CHAN_2GHZ(chan); 431 bool is2GHz = IS_CHAN_2GHZ(chan);
431 432
432 memset(&mask_m, 0, sizeof(int8_t) * 123);
433 memset(&mask_p, 0, sizeof(int8_t) * 123);
434
435 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { 433 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
436 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz); 434 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
437 if (AR_NO_SPUR == cur_bb_spur) 435 if (AR_NO_SPUR == cur_bb_spur)
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index db6624527d99..53d7445a5d12 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -178,14 +178,9 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
178 int i; 178 int i;
179 struct chan_centers centers; 179 struct chan_centers centers;
180 180
181 int8_t mask_m[123];
182 int8_t mask_p[123];
183 int cur_bb_spur; 181 int cur_bb_spur;
184 bool is2GHz = IS_CHAN_2GHZ(chan); 182 bool is2GHz = IS_CHAN_2GHZ(chan);
185 183
186 memset(&mask_m, 0, sizeof(int8_t) * 123);
187 memset(&mask_p, 0, sizeof(int8_t) * 123);
188
189 ath9k_hw_get_channel_centers(ah, chan, &centers); 184 ath9k_hw_get_channel_centers(ah, chan, &centers);
190 freq = centers.synth_center; 185 freq = centers.synth_center;
191 186
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index a9212a12f4da..2d20556ce22d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -89,7 +89,7 @@
89#define IWL8260_SMEM_OFFSET 0x400000 89#define IWL8260_SMEM_OFFSET 0x400000
90#define IWL8260_SMEM_LEN 0x68000 90#define IWL8260_SMEM_LEN 0x68000
91 91
92#define IWL8000_FW_PRE "iwlwifi-8000" 92#define IWL8000_FW_PRE "iwlwifi-8000C-"
93#define IWL8000_MODULE_FIRMWARE(api) \ 93#define IWL8000_MODULE_FIRMWARE(api) \
94 IWL8000_FW_PRE "-" __stringify(api) ".ucode" 94 IWL8000_FW_PRE "-" __stringify(api) ".ucode"
95 95
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 4f495d9153a6..ff18b0658677 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -240,19 +240,6 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
240 snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode", 240 snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
241 name_pre, tag); 241 name_pre, tag);
242 242
243 /*
244 * Starting 8000B - FW name format has changed. This overwrites the
245 * previous name and uses the new format.
246 */
247 if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
248 char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev);
249
250 if (rev_step != 'A')
251 snprintf(drv->firmware_name,
252 sizeof(drv->firmware_name), "%s%c-%s.ucode",
253 name_pre, rev_step, tag);
254 }
255
256 IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n", 243 IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
257 (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) 244 (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
258 ? "EXPERIMENTAL " : "", 245 ? "EXPERIMENTAL " : "",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index cbb5947b3fab..e25171f9b407 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -609,7 +609,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
609 } 609 }
610 610
611 /* Make room for fw's virtual image pages, if it exists */ 611 /* Make room for fw's virtual image pages, if it exists */
612 if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) 612 if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
613 mvm->fw_paging_db[0].fw_paging_block)
613 file_len += mvm->num_of_paging_blk * 614 file_len += mvm->num_of_paging_blk *
614 (sizeof(*dump_data) + 615 (sizeof(*dump_data) +
615 sizeof(struct iwl_fw_error_dump_paging) + 616 sizeof(struct iwl_fw_error_dump_paging) +
@@ -750,7 +751,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
750 } 751 }
751 752
752 /* Dump fw's virtual image */ 753 /* Dump fw's virtual image */
753 if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) { 754 if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
755 mvm->fw_paging_db[0].fw_paging_block) {
754 for (i = 1; i < mvm->num_of_paging_blk + 1; i++) { 756 for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
755 struct iwl_fw_error_dump_paging *paging; 757 struct iwl_fw_error_dump_paging *paging;
756 struct page *pages = 758 struct page *pages =
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 9e97cf4ff1c5..b70f4530f960 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -149,9 +149,11 @@ void iwl_free_fw_paging(struct iwl_mvm *mvm)
149 149
150 __free_pages(mvm->fw_paging_db[i].fw_paging_block, 150 __free_pages(mvm->fw_paging_db[i].fw_paging_block,
151 get_order(mvm->fw_paging_db[i].fw_paging_size)); 151 get_order(mvm->fw_paging_db[i].fw_paging_size));
152 mvm->fw_paging_db[i].fw_paging_block = NULL;
152 } 153 }
153 kfree(mvm->trans->paging_download_buf); 154 kfree(mvm->trans->paging_download_buf);
154 mvm->trans->paging_download_buf = NULL; 155 mvm->trans->paging_download_buf = NULL;
156 mvm->trans->paging_db = NULL;
155 157
156 memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); 158 memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
157} 159}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 41c6dd5b9ccc..de42066fa49b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -479,8 +479,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
479 {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, 479 {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
480 {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)}, 480 {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
481 {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)}, 481 {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
482 {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
483 {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
484 {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
485 {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
486 {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
487 {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)},
488 {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)},
489 {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)},
482 {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)}, 490 {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
483 {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)}, 491 {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
492 {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)},
493 {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
484 494
485/* 9000 Series */ 495/* 9000 Series */
486 {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9560_2ac_cfg)}, 496 {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9560_2ac_cfg)},
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index df1f1a76a862..01e12d221a8b 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -135,7 +135,7 @@ MODULE_LICENSE("GPL");
135/* Field definitions */ 135/* Field definitions */
136#define HCI_ACCEL_MASK 0x7fff 136#define HCI_ACCEL_MASK 0x7fff
137#define HCI_HOTKEY_DISABLE 0x0b 137#define HCI_HOTKEY_DISABLE 0x0b
138#define HCI_HOTKEY_ENABLE 0x01 138#define HCI_HOTKEY_ENABLE 0x09
139#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10 139#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
140#define HCI_LCD_BRIGHTNESS_BITS 3 140#define HCI_LCD_BRIGHTNESS_BITS 3
141#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS) 141#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 5d4d91846357..96168b819044 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -2669,9 +2669,9 @@ static int __init mport_init(void)
2669 2669
2670 /* Create device class needed by udev */ 2670 /* Create device class needed by udev */
2671 dev_class = class_create(THIS_MODULE, DRV_NAME); 2671 dev_class = class_create(THIS_MODULE, DRV_NAME);
2672 if (!dev_class) { 2672 if (IS_ERR(dev_class)) {
2673 rmcd_error("Unable to create " DRV_NAME " class"); 2673 rmcd_error("Unable to create " DRV_NAME " class");
2674 return -EINVAL; 2674 return PTR_ERR(dev_class);
2675 } 2675 }
2676 2676
2677 ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME); 2677 ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
index 648cb86afd42..ea607a4a1bdd 100644
--- a/drivers/s390/char/sclp_ctl.c
+++ b/drivers/s390/char/sclp_ctl.c
@@ -56,6 +56,7 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
56{ 56{
57 struct sclp_ctl_sccb ctl_sccb; 57 struct sclp_ctl_sccb ctl_sccb;
58 struct sccb_header *sccb; 58 struct sccb_header *sccb;
59 unsigned long copied;
59 int rc; 60 int rc;
60 61
61 if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb))) 62 if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
@@ -65,14 +66,15 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
65 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 66 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
66 if (!sccb) 67 if (!sccb)
67 return -ENOMEM; 68 return -ENOMEM;
68 if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) { 69 copied = PAGE_SIZE -
70 copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
71 if (offsetof(struct sccb_header, length) +
72 sizeof(sccb->length) > copied || sccb->length > copied) {
69 rc = -EFAULT; 73 rc = -EFAULT;
70 goto out_free; 74 goto out_free;
71 } 75 }
72 if (sccb->length > PAGE_SIZE || sccb->length < 8) 76 if (sccb->length < 8) {
73 return -EINVAL; 77 rc = -EINVAL;
74 if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
75 rc = -EFAULT;
76 goto out_free; 78 goto out_free;
77 } 79 }
78 rc = sclp_sync_request(ctl_sccb.cmdw, sccb); 80 rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index b793c04028a3..be72a8e5f221 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -172,9 +172,11 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
172static int vpfe_update_pipe_state(struct vpfe_video_device *video) 172static int vpfe_update_pipe_state(struct vpfe_video_device *video)
173{ 173{
174 struct vpfe_pipeline *pipe = &video->pipe; 174 struct vpfe_pipeline *pipe = &video->pipe;
175 int ret;
175 176
176 if (vpfe_prepare_pipeline(video)) 177 ret = vpfe_prepare_pipeline(video);
177 return vpfe_prepare_pipeline(video); 178 if (ret)
179 return ret;
178 180
179 /* 181 /*
180 * Find out if there is any input video 182 * Find out if there is any input video
@@ -182,9 +184,10 @@ static int vpfe_update_pipe_state(struct vpfe_video_device *video)
182 */ 184 */
183 if (pipe->input_num == 0) { 185 if (pipe->input_num == 0) {
184 pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS; 186 pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS;
185 if (vpfe_update_current_ext_subdev(video)) { 187 ret = vpfe_update_current_ext_subdev(video);
188 if (ret) {
186 pr_err("Invalid external subdev\n"); 189 pr_err("Invalid external subdev\n");
187 return vpfe_update_current_ext_subdev(video); 190 return ret;
188 } 191 }
189 } else { 192 } else {
190 pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT; 193 pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT;
@@ -667,6 +670,7 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
667 struct v4l2_subdev *subdev; 670 struct v4l2_subdev *subdev;
668 struct v4l2_format format; 671 struct v4l2_format format;
669 struct media_pad *remote; 672 struct media_pad *remote;
673 int ret;
670 674
671 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n"); 675 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n");
672 676
@@ -695,10 +699,11 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
695 sd_fmt.pad = remote->index; 699 sd_fmt.pad = remote->index;
696 sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; 700 sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
697 /* get output format of remote subdev */ 701 /* get output format of remote subdev */
698 if (v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt)) { 702 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
703 if (ret) {
699 v4l2_err(&vpfe_dev->v4l2_dev, 704 v4l2_err(&vpfe_dev->v4l2_dev,
700 "invalid remote subdev for video node\n"); 705 "invalid remote subdev for video node\n");
701 return v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt); 706 return ret;
702 } 707 }
703 /* convert to pix format */ 708 /* convert to pix format */
704 mbus.code = sd_fmt.format.code; 709 mbus.code = sd_fmt.format.code;
@@ -725,6 +730,7 @@ static int vpfe_s_fmt(struct file *file, void *priv,
725 struct vpfe_video_device *video = video_drvdata(file); 730 struct vpfe_video_device *video = video_drvdata(file);
726 struct vpfe_device *vpfe_dev = video->vpfe_dev; 731 struct vpfe_device *vpfe_dev = video->vpfe_dev;
727 struct v4l2_format format; 732 struct v4l2_format format;
733 int ret;
728 734
729 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n"); 735 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n");
730 /* If streaming is started, return error */ 736 /* If streaming is started, return error */
@@ -733,8 +739,9 @@ static int vpfe_s_fmt(struct file *file, void *priv,
733 return -EBUSY; 739 return -EBUSY;
734 } 740 }
735 /* get adjacent subdev's output pad format */ 741 /* get adjacent subdev's output pad format */
736 if (__vpfe_video_get_format(video, &format)) 742 ret = __vpfe_video_get_format(video, &format);
737 return __vpfe_video_get_format(video, &format); 743 if (ret)
744 return ret;
738 *fmt = format; 745 *fmt = format;
739 video->fmt = *fmt; 746 video->fmt = *fmt;
740 return 0; 747 return 0;
@@ -757,11 +764,13 @@ static int vpfe_try_fmt(struct file *file, void *priv,
757 struct vpfe_video_device *video = video_drvdata(file); 764 struct vpfe_video_device *video = video_drvdata(file);
758 struct vpfe_device *vpfe_dev = video->vpfe_dev; 765 struct vpfe_device *vpfe_dev = video->vpfe_dev;
759 struct v4l2_format format; 766 struct v4l2_format format;
767 int ret;
760 768
761 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n"); 769 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n");
762 /* get adjacent subdev's output pad format */ 770 /* get adjacent subdev's output pad format */
763 if (__vpfe_video_get_format(video, &format)) 771 ret = __vpfe_video_get_format(video, &format);
764 return __vpfe_video_get_format(video, &format); 772 if (ret)
773 return ret;
765 774
766 *fmt = format; 775 *fmt = format;
767 return 0; 776 return 0;
@@ -838,8 +847,9 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
838 847
839 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n"); 848 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
840 849
841 if (mutex_lock_interruptible(&video->lock)) 850 ret = mutex_lock_interruptible(&video->lock);
842 return mutex_lock_interruptible(&video->lock); 851 if (ret)
852 return ret;
843 /* 853 /*
844 * If streaming is started return device busy 854 * If streaming is started return device busy
845 * error 855 * error
@@ -940,8 +950,9 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
940 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n"); 950 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
941 951
942 /* Call decoder driver function to set the standard */ 952 /* Call decoder driver function to set the standard */
943 if (mutex_lock_interruptible(&video->lock)) 953 ret = mutex_lock_interruptible(&video->lock);
944 return mutex_lock_interruptible(&video->lock); 954 if (ret)
955 return ret;
945 sdinfo = video->current_ext_subdev; 956 sdinfo = video->current_ext_subdev;
946 /* If streaming is started, return device busy error */ 957 /* If streaming is started, return device busy error */
947 if (video->started) { 958 if (video->started) {
@@ -1327,8 +1338,9 @@ static int vpfe_reqbufs(struct file *file, void *priv,
1327 return -EINVAL; 1338 return -EINVAL;
1328 } 1339 }
1329 1340
1330 if (mutex_lock_interruptible(&video->lock)) 1341 ret = mutex_lock_interruptible(&video->lock);
1331 return mutex_lock_interruptible(&video->lock); 1342 if (ret)
1343 return ret;
1332 1344
1333 if (video->io_usrs != 0) { 1345 if (video->io_usrs != 0) {
1334 v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n"); 1346 v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
@@ -1354,10 +1366,11 @@ static int vpfe_reqbufs(struct file *file, void *priv,
1354 q->buf_struct_size = sizeof(struct vpfe_cap_buffer); 1366 q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
1355 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1367 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1356 1368
1357 if (vb2_queue_init(q)) { 1369 ret = vb2_queue_init(q);
1370 if (ret) {
1358 v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n"); 1371 v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
1359 vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev); 1372 vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev);
1360 return vb2_queue_init(q); 1373 return ret;
1361 } 1374 }
1362 1375
1363 fh->io_allowed = 1; 1376 fh->io_allowed = 1;
@@ -1533,8 +1546,9 @@ static int vpfe_streamoff(struct file *file, void *priv,
1533 return -EINVAL; 1546 return -EINVAL;
1534 } 1547 }
1535 1548
1536 if (mutex_lock_interruptible(&video->lock)) 1549 ret = mutex_lock_interruptible(&video->lock);
1537 return mutex_lock_interruptible(&video->lock); 1550 if (ret)
1551 return ret;
1538 1552
1539 vpfe_stop_capture(video); 1553 vpfe_stop_capture(video);
1540 ret = vb2_streamoff(&video->buffer_queue, buf_type); 1554 ret = vb2_streamoff(&video->buffer_queue, buf_type);
diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
index 05de0dad8762..4c6f1d7d2eaf 100644
--- a/drivers/staging/rdma/hfi1/TODO
+++ b/drivers/staging/rdma/hfi1/TODO
@@ -3,4 +3,4 @@ July, 2015
3- Remove unneeded file entries in sysfs 3- Remove unneeded file entries in sysfs
4- Remove software processing of IB protocol and place in library for use 4- Remove software processing of IB protocol and place in library for use
5 by qib, ipath (if still present), hfi1, and eventually soft-roce 5 by qib, ipath (if still present), hfi1, and eventually soft-roce
6 6- Replace incorrect uAPI
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
index 8396dc5fb6c1..c1c5bf82addb 100644
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ b/drivers/staging/rdma/hfi1/file_ops.c
@@ -49,6 +49,8 @@
49#include <linux/vmalloc.h> 49#include <linux/vmalloc.h>
50#include <linux/io.h> 50#include <linux/io.h>
51 51
52#include <rdma/ib.h>
53
52#include "hfi.h" 54#include "hfi.h"
53#include "pio.h" 55#include "pio.h"
54#include "device.h" 56#include "device.h"
@@ -190,6 +192,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
190 int uctxt_required = 1; 192 int uctxt_required = 1;
191 int must_be_root = 0; 193 int must_be_root = 0;
192 194
195 /* FIXME: This interface cannot continue out of staging */
196 if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
197 return -EACCES;
198
193 if (count < sizeof(cmd)) { 199 if (count < sizeof(cmd)) {
194 ret = -EINVAL; 200 ret = -EINVAL;
195 goto bail; 201 goto bail;
@@ -791,15 +797,16 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
791 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 797 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
792 798
793 dd->rcd[uctxt->ctxt] = NULL; 799 dd->rcd[uctxt->ctxt] = NULL;
800
801 hfi1_user_exp_rcv_free(fdata);
802 hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
803
794 uctxt->rcvwait_to = 0; 804 uctxt->rcvwait_to = 0;
795 uctxt->piowait_to = 0; 805 uctxt->piowait_to = 0;
796 uctxt->rcvnowait = 0; 806 uctxt->rcvnowait = 0;
797 uctxt->pionowait = 0; 807 uctxt->pionowait = 0;
798 uctxt->event_flags = 0; 808 uctxt->event_flags = 0;
799 809
800 hfi1_user_exp_rcv_free(fdata);
801 hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
802
803 hfi1_stats.sps_ctxts--; 810 hfi1_stats.sps_ctxts--;
804 if (++dd->freectxts == dd->num_user_contexts) 811 if (++dd->freectxts == dd->num_user_contexts)
805 aspm_enable_all(dd); 812 aspm_enable_all(dd);
@@ -1127,27 +1134,13 @@ bail:
1127 1134
1128static int user_init(struct file *fp) 1135static int user_init(struct file *fp)
1129{ 1136{
1130 int ret;
1131 unsigned int rcvctrl_ops = 0; 1137 unsigned int rcvctrl_ops = 0;
1132 struct hfi1_filedata *fd = fp->private_data; 1138 struct hfi1_filedata *fd = fp->private_data;
1133 struct hfi1_ctxtdata *uctxt = fd->uctxt; 1139 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1134 1140
1135 /* make sure that the context has already been setup */ 1141 /* make sure that the context has already been setup */
1136 if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) { 1142 if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
1137 ret = -EFAULT; 1143 return -EFAULT;
1138 goto done;
1139 }
1140
1141 /*
1142 * Subctxts don't need to initialize anything since master
1143 * has done it.
1144 */
1145 if (fd->subctxt) {
1146 ret = wait_event_interruptible(uctxt->wait, !test_bit(
1147 HFI1_CTXT_MASTER_UNINIT,
1148 &uctxt->event_flags));
1149 goto expected;
1150 }
1151 1144
1152 /* initialize poll variables... */ 1145 /* initialize poll variables... */
1153 uctxt->urgent = 0; 1146 uctxt->urgent = 0;
@@ -1202,19 +1195,7 @@ static int user_init(struct file *fp)
1202 wake_up(&uctxt->wait); 1195 wake_up(&uctxt->wait);
1203 } 1196 }
1204 1197
1205expected: 1198 return 0;
1206 /*
1207 * Expected receive has to be setup for all processes (including
1208 * shared contexts). However, it has to be done after the master
1209 * context has been fully configured as it depends on the
1210 * eager/expected split of the RcvArray entries.
1211 * Setting it up here ensures that the subcontexts will be waiting
1212 * (due to the above wait_event_interruptible() until the master
1213 * is setup.
1214 */
1215 ret = hfi1_user_exp_rcv_init(fp);
1216done:
1217 return ret;
1218} 1199}
1219 1200
1220static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len) 1201static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
@@ -1261,7 +1242,7 @@ static int setup_ctxt(struct file *fp)
1261 int ret = 0; 1242 int ret = 0;
1262 1243
1263 /* 1244 /*
1264 * Context should be set up only once (including allocation and 1245 * Context should be set up only once, including allocation and
1265 * programming of eager buffers. This is done if context sharing 1246 * programming of eager buffers. This is done if context sharing
1266 * is not requested or by the master process. 1247 * is not requested or by the master process.
1267 */ 1248 */
@@ -1282,10 +1263,29 @@ static int setup_ctxt(struct file *fp)
1282 if (ret) 1263 if (ret)
1283 goto done; 1264 goto done;
1284 } 1265 }
1266 } else {
1267 ret = wait_event_interruptible(uctxt->wait, !test_bit(
1268 HFI1_CTXT_MASTER_UNINIT,
1269 &uctxt->event_flags));
1270 if (ret)
1271 goto done;
1285 } 1272 }
1273
1286 ret = hfi1_user_sdma_alloc_queues(uctxt, fp); 1274 ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
1287 if (ret) 1275 if (ret)
1288 goto done; 1276 goto done;
1277 /*
1278 * Expected receive has to be setup for all processes (including
1279 * shared contexts). However, it has to be done after the master
1280 * context has been fully configured as it depends on the
1281 * eager/expected split of the RcvArray entries.
1282 * Setting it up here ensures that the subcontexts will be waiting
1283 * (due to the above wait_event_interruptible() until the master
1284 * is setup.
1285 */
1286 ret = hfi1_user_exp_rcv_init(fp);
1287 if (ret)
1288 goto done;
1289 1289
1290 set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags); 1290 set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
1291done: 1291done:
@@ -1565,29 +1565,8 @@ static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
1565{ 1565{
1566 struct hfi1_devdata *dd = filp->private_data; 1566 struct hfi1_devdata *dd = filp->private_data;
1567 1567
1568 switch (whence) { 1568 return fixed_size_llseek(filp, offset, whence,
1569 case SEEK_SET: 1569 (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE);
1570 break;
1571 case SEEK_CUR:
1572 offset += filp->f_pos;
1573 break;
1574 case SEEK_END:
1575 offset = ((dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) -
1576 offset;
1577 break;
1578 default:
1579 return -EINVAL;
1580 }
1581
1582 if (offset < 0)
1583 return -EINVAL;
1584
1585 if (offset >= (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE)
1586 return -EINVAL;
1587
1588 filp->f_pos = offset;
1589
1590 return filp->f_pos;
1591} 1570}
1592 1571
1593/* NOTE: assumes unsigned long is 8 bytes */ 1572/* NOTE: assumes unsigned long is 8 bytes */
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c
index c7ad0164ea9a..b3f0682a36c9 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.c
+++ b/drivers/staging/rdma/hfi1/mmu_rb.c
@@ -71,6 +71,7 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *,
71 struct mm_struct *, 71 struct mm_struct *,
72 unsigned long, unsigned long); 72 unsigned long, unsigned long);
73static void mmu_notifier_mem_invalidate(struct mmu_notifier *, 73static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
74 struct mm_struct *,
74 unsigned long, unsigned long); 75 unsigned long, unsigned long);
75static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, 76static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
76 unsigned long, unsigned long); 77 unsigned long, unsigned long);
@@ -137,7 +138,7 @@ void hfi1_mmu_rb_unregister(struct rb_root *root)
137 rbnode = rb_entry(node, struct mmu_rb_node, node); 138 rbnode = rb_entry(node, struct mmu_rb_node, node);
138 rb_erase(node, root); 139 rb_erase(node, root);
139 if (handler->ops->remove) 140 if (handler->ops->remove)
140 handler->ops->remove(root, rbnode, false); 141 handler->ops->remove(root, rbnode, NULL);
141 } 142 }
142 } 143 }
143 144
@@ -176,7 +177,7 @@ unlock:
176 return ret; 177 return ret;
177} 178}
178 179
179/* Caller must host handler lock */ 180/* Caller must hold handler lock */
180static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, 181static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
181 unsigned long addr, 182 unsigned long addr,
182 unsigned long len) 183 unsigned long len)
@@ -200,15 +201,21 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
200 return node; 201 return node;
201} 202}
202 203
204/* Caller must *not* hold handler lock. */
203static void __mmu_rb_remove(struct mmu_rb_handler *handler, 205static void __mmu_rb_remove(struct mmu_rb_handler *handler,
204 struct mmu_rb_node *node, bool arg) 206 struct mmu_rb_node *node, struct mm_struct *mm)
205{ 207{
208 unsigned long flags;
209
206 /* Validity of handler and node pointers has been checked by caller. */ 210 /* Validity of handler and node pointers has been checked by caller. */
207 hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr, 211 hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr,
208 node->len); 212 node->len);
213 spin_lock_irqsave(&handler->lock, flags);
209 __mmu_int_rb_remove(node, handler->root); 214 __mmu_int_rb_remove(node, handler->root);
215 spin_unlock_irqrestore(&handler->lock, flags);
216
210 if (handler->ops->remove) 217 if (handler->ops->remove)
211 handler->ops->remove(handler->root, node, arg); 218 handler->ops->remove(handler->root, node, mm);
212} 219}
213 220
214struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr, 221struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
@@ -231,14 +238,11 @@ struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
231void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) 238void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
232{ 239{
233 struct mmu_rb_handler *handler = find_mmu_handler(root); 240 struct mmu_rb_handler *handler = find_mmu_handler(root);
234 unsigned long flags;
235 241
236 if (!handler || !node) 242 if (!handler || !node)
237 return; 243 return;
238 244
239 spin_lock_irqsave(&handler->lock, flags); 245 __mmu_rb_remove(handler, node, NULL);
240 __mmu_rb_remove(handler, node, false);
241 spin_unlock_irqrestore(&handler->lock, flags);
242} 246}
243 247
244static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root) 248static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
@@ -260,7 +264,7 @@ unlock:
260static inline void mmu_notifier_page(struct mmu_notifier *mn, 264static inline void mmu_notifier_page(struct mmu_notifier *mn,
261 struct mm_struct *mm, unsigned long addr) 265 struct mm_struct *mm, unsigned long addr)
262{ 266{
263 mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE); 267 mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE);
264} 268}
265 269
266static inline void mmu_notifier_range_start(struct mmu_notifier *mn, 270static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
@@ -268,25 +272,31 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
268 unsigned long start, 272 unsigned long start,
269 unsigned long end) 273 unsigned long end)
270{ 274{
271 mmu_notifier_mem_invalidate(mn, start, end); 275 mmu_notifier_mem_invalidate(mn, mm, start, end);
272} 276}
273 277
274static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, 278static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
279 struct mm_struct *mm,
275 unsigned long start, unsigned long end) 280 unsigned long start, unsigned long end)
276{ 281{
277 struct mmu_rb_handler *handler = 282 struct mmu_rb_handler *handler =
278 container_of(mn, struct mmu_rb_handler, mn); 283 container_of(mn, struct mmu_rb_handler, mn);
279 struct rb_root *root = handler->root; 284 struct rb_root *root = handler->root;
280 struct mmu_rb_node *node; 285 struct mmu_rb_node *node, *ptr = NULL;
281 unsigned long flags; 286 unsigned long flags;
282 287
283 spin_lock_irqsave(&handler->lock, flags); 288 spin_lock_irqsave(&handler->lock, flags);
284 for (node = __mmu_int_rb_iter_first(root, start, end - 1); node; 289 for (node = __mmu_int_rb_iter_first(root, start, end - 1);
285 node = __mmu_int_rb_iter_next(node, start, end - 1)) { 290 node; node = ptr) {
291 /* Guard against node removal. */
292 ptr = __mmu_int_rb_iter_next(node, start, end - 1);
286 hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u", 293 hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u",
287 node->addr, node->len); 294 node->addr, node->len);
288 if (handler->ops->invalidate(root, node)) 295 if (handler->ops->invalidate(root, node)) {
289 __mmu_rb_remove(handler, node, true); 296 spin_unlock_irqrestore(&handler->lock, flags);
297 __mmu_rb_remove(handler, node, mm);
298 spin_lock_irqsave(&handler->lock, flags);
299 }
290 } 300 }
291 spin_unlock_irqrestore(&handler->lock, flags); 301 spin_unlock_irqrestore(&handler->lock, flags);
292} 302}
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/staging/rdma/hfi1/mmu_rb.h
index f8523fdb8a18..19a306e83c7d 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.h
+++ b/drivers/staging/rdma/hfi1/mmu_rb.h
@@ -59,7 +59,8 @@ struct mmu_rb_node {
59struct mmu_rb_ops { 59struct mmu_rb_ops {
60 bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long); 60 bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long);
61 int (*insert)(struct rb_root *, struct mmu_rb_node *); 61 int (*insert)(struct rb_root *, struct mmu_rb_node *);
62 void (*remove)(struct rb_root *, struct mmu_rb_node *, bool); 62 void (*remove)(struct rb_root *, struct mmu_rb_node *,
63 struct mm_struct *);
63 int (*invalidate)(struct rb_root *, struct mmu_rb_node *); 64 int (*invalidate)(struct rb_root *, struct mmu_rb_node *);
64}; 65};
65 66
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c
index 29a5ad28019b..dc9119e1b458 100644
--- a/drivers/staging/rdma/hfi1/qp.c
+++ b/drivers/staging/rdma/hfi1/qp.c
@@ -519,10 +519,12 @@ static void iowait_sdma_drained(struct iowait *wait)
519 * do the flush work until that QP's 519 * do the flush work until that QP's
520 * sdma work has finished. 520 * sdma work has finished.
521 */ 521 */
522 spin_lock(&qp->s_lock);
522 if (qp->s_flags & RVT_S_WAIT_DMA) { 523 if (qp->s_flags & RVT_S_WAIT_DMA) {
523 qp->s_flags &= ~RVT_S_WAIT_DMA; 524 qp->s_flags &= ~RVT_S_WAIT_DMA;
524 hfi1_schedule_send(qp); 525 hfi1_schedule_send(qp);
525 } 526 }
527 spin_unlock(&qp->s_lock);
526} 528}
527 529
528/** 530/**
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c
index 0861e095df8d..8bd56d5c783d 100644
--- a/drivers/staging/rdma/hfi1/user_exp_rcv.c
+++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c
@@ -87,7 +87,8 @@ static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *);
87static int set_rcvarray_entry(struct file *, unsigned long, u32, 87static int set_rcvarray_entry(struct file *, unsigned long, u32,
88 struct tid_group *, struct page **, unsigned); 88 struct tid_group *, struct page **, unsigned);
89static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *); 89static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
90static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *, bool); 90static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *,
91 struct mm_struct *);
91static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *); 92static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
92static int program_rcvarray(struct file *, unsigned long, struct tid_group *, 93static int program_rcvarray(struct file *, unsigned long, struct tid_group *,
93 struct tid_pageset *, unsigned, u16, struct page **, 94 struct tid_pageset *, unsigned, u16, struct page **,
@@ -254,6 +255,8 @@ int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
254 struct hfi1_ctxtdata *uctxt = fd->uctxt; 255 struct hfi1_ctxtdata *uctxt = fd->uctxt;
255 struct tid_group *grp, *gptr; 256 struct tid_group *grp, *gptr;
256 257
258 if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
259 return 0;
257 /* 260 /*
258 * The notifier would have been removed when the process'es mm 261 * The notifier would have been removed when the process'es mm
259 * was freed. 262 * was freed.
@@ -899,7 +902,7 @@ static int unprogram_rcvarray(struct file *fp, u32 tidinfo,
899 if (!node || node->rcventry != (uctxt->expected_base + rcventry)) 902 if (!node || node->rcventry != (uctxt->expected_base + rcventry))
900 return -EBADF; 903 return -EBADF;
901 if (HFI1_CAP_IS_USET(TID_UNMAP)) 904 if (HFI1_CAP_IS_USET(TID_UNMAP))
902 mmu_rb_remove(&fd->tid_rb_root, &node->mmu, false); 905 mmu_rb_remove(&fd->tid_rb_root, &node->mmu, NULL);
903 else 906 else
904 hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu); 907 hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu);
905 908
@@ -965,7 +968,7 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
965 continue; 968 continue;
966 if (HFI1_CAP_IS_USET(TID_UNMAP)) 969 if (HFI1_CAP_IS_USET(TID_UNMAP))
967 mmu_rb_remove(&fd->tid_rb_root, 970 mmu_rb_remove(&fd->tid_rb_root,
968 &node->mmu, false); 971 &node->mmu, NULL);
969 else 972 else
970 hfi1_mmu_rb_remove(&fd->tid_rb_root, 973 hfi1_mmu_rb_remove(&fd->tid_rb_root,
971 &node->mmu); 974 &node->mmu);
@@ -1032,7 +1035,7 @@ static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node)
1032} 1035}
1033 1036
1034static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node, 1037static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node,
1035 bool notifier) 1038 struct mm_struct *mm)
1036{ 1039{
1037 struct hfi1_filedata *fdata = 1040 struct hfi1_filedata *fdata =
1038 container_of(root, struct hfi1_filedata, tid_rb_root); 1041 container_of(root, struct hfi1_filedata, tid_rb_root);
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c
index ab6b6a42000f..d53a659548e0 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.c
+++ b/drivers/staging/rdma/hfi1/user_sdma.c
@@ -278,7 +278,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *);
278static void user_sdma_free_request(struct user_sdma_request *, bool); 278static void user_sdma_free_request(struct user_sdma_request *, bool);
279static int pin_vector_pages(struct user_sdma_request *, 279static int pin_vector_pages(struct user_sdma_request *,
280 struct user_sdma_iovec *); 280 struct user_sdma_iovec *);
281static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned); 281static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned,
282 unsigned);
282static int check_header_template(struct user_sdma_request *, 283static int check_header_template(struct user_sdma_request *,
283 struct hfi1_pkt_header *, u32, u32); 284 struct hfi1_pkt_header *, u32, u32);
284static int set_txreq_header(struct user_sdma_request *, 285static int set_txreq_header(struct user_sdma_request *,
@@ -299,7 +300,8 @@ static int defer_packet_queue(
299static void activate_packet_queue(struct iowait *, int); 300static void activate_packet_queue(struct iowait *, int);
300static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long); 301static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long);
301static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *); 302static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *);
302static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *, bool); 303static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *,
304 struct mm_struct *);
303static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *); 305static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
304 306
305static struct mmu_rb_ops sdma_rb_ops = { 307static struct mmu_rb_ops sdma_rb_ops = {
@@ -1063,8 +1065,10 @@ static int pin_vector_pages(struct user_sdma_request *req,
1063 rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root, 1065 rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root,
1064 (unsigned long)iovec->iov.iov_base, 1066 (unsigned long)iovec->iov.iov_base,
1065 iovec->iov.iov_len); 1067 iovec->iov.iov_len);
1066 if (rb_node) 1068 if (rb_node && !IS_ERR(rb_node))
1067 node = container_of(rb_node, struct sdma_mmu_node, rb); 1069 node = container_of(rb_node, struct sdma_mmu_node, rb);
1070 else
1071 rb_node = NULL;
1068 1072
1069 if (!node) { 1073 if (!node) {
1070 node = kzalloc(sizeof(*node), GFP_KERNEL); 1074 node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -1107,7 +1111,8 @@ retry:
1107 goto bail; 1111 goto bail;
1108 } 1112 }
1109 if (pinned != npages) { 1113 if (pinned != npages) {
1110 unpin_vector_pages(current->mm, pages, pinned); 1114 unpin_vector_pages(current->mm, pages, node->npages,
1115 pinned);
1111 ret = -EFAULT; 1116 ret = -EFAULT;
1112 goto bail; 1117 goto bail;
1113 } 1118 }
@@ -1147,9 +1152,9 @@ bail:
1147} 1152}
1148 1153
1149static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, 1154static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
1150 unsigned npages) 1155 unsigned start, unsigned npages)
1151{ 1156{
1152 hfi1_release_user_pages(mm, pages, npages, 0); 1157 hfi1_release_user_pages(mm, pages + start, npages, 0);
1153 kfree(pages); 1158 kfree(pages);
1154} 1159}
1155 1160
@@ -1502,7 +1507,7 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
1502 &req->pq->sdma_rb_root, 1507 &req->pq->sdma_rb_root,
1503 (unsigned long)req->iovs[i].iov.iov_base, 1508 (unsigned long)req->iovs[i].iov.iov_base,
1504 req->iovs[i].iov.iov_len); 1509 req->iovs[i].iov.iov_len);
1505 if (!mnode) 1510 if (!mnode || IS_ERR(mnode))
1506 continue; 1511 continue;
1507 1512
1508 node = container_of(mnode, struct sdma_mmu_node, rb); 1513 node = container_of(mnode, struct sdma_mmu_node, rb);
@@ -1547,7 +1552,7 @@ static int sdma_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
1547} 1552}
1548 1553
1549static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode, 1554static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
1550 bool notifier) 1555 struct mm_struct *mm)
1551{ 1556{
1552 struct sdma_mmu_node *node = 1557 struct sdma_mmu_node *node =
1553 container_of(mnode, struct sdma_mmu_node, rb); 1558 container_of(mnode, struct sdma_mmu_node, rb);
@@ -1557,14 +1562,20 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
1557 node->pq->n_locked -= node->npages; 1562 node->pq->n_locked -= node->npages;
1558 spin_unlock(&node->pq->evict_lock); 1563 spin_unlock(&node->pq->evict_lock);
1559 1564
1560 unpin_vector_pages(notifier ? NULL : current->mm, node->pages, 1565 /*
1566 * If mm is set, we are being called by the MMU notifier and we
1567 * should not pass a mm_struct to unpin_vector_page(). This is to
1568 * prevent a deadlock when hfi1_release_user_pages() attempts to
1569 * take the mmap_sem, which the MMU notifier has already taken.
1570 */
1571 unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0,
1561 node->npages); 1572 node->npages);
1562 /* 1573 /*
1563 * If called by the MMU notifier, we have to adjust the pinned 1574 * If called by the MMU notifier, we have to adjust the pinned
1564 * page count ourselves. 1575 * page count ourselves.
1565 */ 1576 */
1566 if (notifier) 1577 if (mm)
1567 current->mm->pinned_vm -= node->npages; 1578 mm->pinned_vm -= node->npages;
1568 kfree(node); 1579 kfree(node);
1569} 1580}
1570 1581
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 36d07295f8e3..5e820b541506 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -68,12 +68,12 @@ static inline int _step_to_temp(int step)
68 * Every step equals (1 * 200) / 255 celsius, and finally 68 * Every step equals (1 * 200) / 255 celsius, and finally
69 * need convert to millicelsius. 69 * need convert to millicelsius.
70 */ 70 */
71 return (HISI_TEMP_BASE + (step * 200 / 255)) * 1000; 71 return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255));
72} 72}
73 73
74static inline long _temp_to_step(long temp) 74static inline long _temp_to_step(long temp)
75{ 75{
76 return ((temp / 1000 - HISI_TEMP_BASE) * 255 / 200); 76 return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000;
77} 77}
78 78
79static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data, 79static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index f1db49625555..5133cd1e10b7 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -959,7 +959,7 @@ static DEVICE_ATTR(sustainable_power, S_IWUSR | S_IRUGO, sustainable_power_show,
959 struct thermal_zone_device *tz = to_thermal_zone(dev); \ 959 struct thermal_zone_device *tz = to_thermal_zone(dev); \
960 \ 960 \
961 if (tz->tzp) \ 961 if (tz->tzp) \
962 return sprintf(buf, "%u\n", tz->tzp->name); \ 962 return sprintf(buf, "%d\n", tz->tzp->name); \
963 else \ 963 else \
964 return -EIO; \ 964 return -EIO; \
965 } \ 965 } \
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 541ead4d8965..85b8517f17a0 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -386,9 +386,7 @@ void ceph_put_mds_session(struct ceph_mds_session *s)
386 atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); 386 atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
387 if (atomic_dec_and_test(&s->s_ref)) { 387 if (atomic_dec_and_test(&s->s_ref)) {
388 if (s->s_auth.authorizer) 388 if (s->s_auth.authorizer)
389 ceph_auth_destroy_authorizer( 389 ceph_auth_destroy_authorizer(s->s_auth.authorizer);
390 s->s_mdsc->fsc->client->monc.auth,
391 s->s_auth.authorizer);
392 kfree(s); 390 kfree(s);
393 } 391 }
394} 392}
@@ -3900,7 +3898,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
3900 struct ceph_auth_handshake *auth = &s->s_auth; 3898 struct ceph_auth_handshake *auth = &s->s_auth;
3901 3899
3902 if (force_new && auth->authorizer) { 3900 if (force_new && auth->authorizer) {
3903 ceph_auth_destroy_authorizer(ac, auth->authorizer); 3901 ceph_auth_destroy_authorizer(auth->authorizer);
3904 auth->authorizer = NULL; 3902 auth->authorizer = NULL;
3905 } 3903 }
3906 if (!auth->authorizer) { 3904 if (!auth->authorizer) {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 719924d6c706..dcad5e210525 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1295,7 +1295,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
1295 1295
1296 *nbytesp = nbytes; 1296 *nbytesp = nbytes;
1297 1297
1298 return ret; 1298 return ret < 0 ? ret : 0;
1299} 1299}
1300 1300
1301static inline int fuse_iter_npages(const struct iov_iter *ii_p) 1301static inline int fuse_iter_npages(const struct iov_iter *ii_p)
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 9aed6e202201..13719d3f35f8 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2455,6 +2455,8 @@ int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data,
2455 2455
2456 spin_unlock(&dlm->spinlock); 2456 spin_unlock(&dlm->spinlock);
2457 2457
2458 ret = 0;
2459
2458done: 2460done:
2459 dlm_put(dlm); 2461 dlm_put(dlm);
2460 return ret; 2462 return ret;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 229cb546bee0..541583510cfb 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1518,6 +1518,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1518 return page; 1518 return page;
1519} 1519}
1520 1520
1521#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1522static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
1523 struct vm_area_struct *vma,
1524 unsigned long addr)
1525{
1526 struct page *page;
1527 int nid;
1528
1529 if (!pmd_present(pmd))
1530 return NULL;
1531
1532 page = vm_normal_page_pmd(vma, addr, pmd);
1533 if (!page)
1534 return NULL;
1535
1536 if (PageReserved(page))
1537 return NULL;
1538
1539 nid = page_to_nid(page);
1540 if (!node_isset(nid, node_states[N_MEMORY]))
1541 return NULL;
1542
1543 return page;
1544}
1545#endif
1546
1521static int gather_pte_stats(pmd_t *pmd, unsigned long addr, 1547static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1522 unsigned long end, struct mm_walk *walk) 1548 unsigned long end, struct mm_walk *walk)
1523{ 1549{
@@ -1527,14 +1553,14 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1527 pte_t *orig_pte; 1553 pte_t *orig_pte;
1528 pte_t *pte; 1554 pte_t *pte;
1529 1555
1556#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1530 ptl = pmd_trans_huge_lock(pmd, vma); 1557 ptl = pmd_trans_huge_lock(pmd, vma);
1531 if (ptl) { 1558 if (ptl) {
1532 pte_t huge_pte = *(pte_t *)pmd;
1533 struct page *page; 1559 struct page *page;
1534 1560
1535 page = can_gather_numa_stats(huge_pte, vma, addr); 1561 page = can_gather_numa_stats_pmd(*pmd, vma, addr);
1536 if (page) 1562 if (page)
1537 gather_stats(page, md, pte_dirty(huge_pte), 1563 gather_stats(page, md, pmd_dirty(*pmd),
1538 HPAGE_PMD_SIZE/PAGE_SIZE); 1564 HPAGE_PMD_SIZE/PAGE_SIZE);
1539 spin_unlock(ptl); 1565 spin_unlock(ptl);
1540 return 0; 1566 return 0;
@@ -1542,6 +1568,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1542 1568
1543 if (pmd_trans_unstable(pmd)) 1569 if (pmd_trans_unstable(pmd))
1544 return 0; 1570 return 0;
1571#endif
1545 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 1572 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1546 do { 1573 do {
1547 struct page *page = can_gather_numa_stats(*pte, vma, addr); 1574 struct page *page = can_gather_numa_stats(*pte, vma, addr);
diff --git a/fs/udf/super.c b/fs/udf/super.c
index fa92fe839fda..36661acaf33b 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -919,14 +919,14 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
919#endif 919#endif
920 } 920 }
921 921
922 ret = udf_CS0toUTF8(outstr, 31, pvoldesc->volIdent, 32); 922 ret = udf_dstrCS0toUTF8(outstr, 31, pvoldesc->volIdent, 32);
923 if (ret < 0) 923 if (ret < 0)
924 goto out_bh; 924 goto out_bh;
925 925
926 strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret); 926 strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
927 udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident); 927 udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
928 928
929 ret = udf_CS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128); 929 ret = udf_dstrCS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128);
930 if (ret < 0) 930 if (ret < 0)
931 goto out_bh; 931 goto out_bh;
932 932
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 972b70625614..263829ef1873 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -212,7 +212,7 @@ extern int udf_get_filename(struct super_block *, const uint8_t *, int,
212 uint8_t *, int); 212 uint8_t *, int);
213extern int udf_put_filename(struct super_block *, const uint8_t *, int, 213extern int udf_put_filename(struct super_block *, const uint8_t *, int,
214 uint8_t *, int); 214 uint8_t *, int);
215extern int udf_CS0toUTF8(uint8_t *, int, const uint8_t *, int); 215extern int udf_dstrCS0toUTF8(uint8_t *, int, const uint8_t *, int);
216 216
217/* ialloc.c */ 217/* ialloc.c */
218extern void udf_free_inode(struct inode *); 218extern void udf_free_inode(struct inode *);
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index 3ff42f4437f3..695389a4fc23 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -335,9 +335,21 @@ try_again:
335 return u_len; 335 return u_len;
336} 336}
337 337
338int udf_CS0toUTF8(uint8_t *utf_o, int o_len, const uint8_t *ocu_i, int i_len) 338int udf_dstrCS0toUTF8(uint8_t *utf_o, int o_len,
339 const uint8_t *ocu_i, int i_len)
339{ 340{
340 return udf_name_from_CS0(utf_o, o_len, ocu_i, i_len, 341 int s_len = 0;
342
343 if (i_len > 0) {
344 s_len = ocu_i[i_len - 1];
345 if (s_len >= i_len) {
346 pr_err("incorrect dstring lengths (%d/%d)\n",
347 s_len, i_len);
348 return -EINVAL;
349 }
350 }
351
352 return udf_name_from_CS0(utf_o, o_len, ocu_i, s_len,
341 udf_uni2char_utf8, 0); 353 udf_uni2char_utf8, 0);
342} 354}
343 355
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f63afdc43bec..8ee27b8afe81 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -180,12 +180,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
180void bpf_register_map_type(struct bpf_map_type_list *tl); 180void bpf_register_map_type(struct bpf_map_type_list *tl);
181 181
182struct bpf_prog *bpf_prog_get(u32 ufd); 182struct bpf_prog *bpf_prog_get(u32 ufd);
183struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
183void bpf_prog_put(struct bpf_prog *prog); 184void bpf_prog_put(struct bpf_prog *prog);
184void bpf_prog_put_rcu(struct bpf_prog *prog); 185void bpf_prog_put_rcu(struct bpf_prog *prog);
185 186
186struct bpf_map *bpf_map_get_with_uref(u32 ufd); 187struct bpf_map *bpf_map_get_with_uref(u32 ufd);
187struct bpf_map *__bpf_map_get(struct fd f); 188struct bpf_map *__bpf_map_get(struct fd f);
188void bpf_map_inc(struct bpf_map *map, bool uref); 189struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
189void bpf_map_put_with_uref(struct bpf_map *map); 190void bpf_map_put_with_uref(struct bpf_map *map);
190void bpf_map_put(struct bpf_map *map); 191void bpf_map_put(struct bpf_map *map);
191int bpf_map_precharge_memlock(u32 pages); 192int bpf_map_precharge_memlock(u32 pages);
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 260d78b587c4..1563265d2097 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -12,9 +12,12 @@
12 */ 12 */
13 13
14struct ceph_auth_client; 14struct ceph_auth_client;
15struct ceph_authorizer;
16struct ceph_msg; 15struct ceph_msg;
17 16
17struct ceph_authorizer {
18 void (*destroy)(struct ceph_authorizer *);
19};
20
18struct ceph_auth_handshake { 21struct ceph_auth_handshake {
19 struct ceph_authorizer *authorizer; 22 struct ceph_authorizer *authorizer;
20 void *authorizer_buf; 23 void *authorizer_buf;
@@ -62,8 +65,6 @@ struct ceph_auth_client_ops {
62 struct ceph_auth_handshake *auth); 65 struct ceph_auth_handshake *auth);
63 int (*verify_authorizer_reply)(struct ceph_auth_client *ac, 66 int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
64 struct ceph_authorizer *a, size_t len); 67 struct ceph_authorizer *a, size_t len);
65 void (*destroy_authorizer)(struct ceph_auth_client *ac,
66 struct ceph_authorizer *a);
67 void (*invalidate_authorizer)(struct ceph_auth_client *ac, 68 void (*invalidate_authorizer)(struct ceph_auth_client *ac,
68 int peer_type); 69 int peer_type);
69 70
@@ -112,8 +113,7 @@ extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac);
112extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac, 113extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
113 int peer_type, 114 int peer_type,
114 struct ceph_auth_handshake *auth); 115 struct ceph_auth_handshake *auth);
115extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, 116void ceph_auth_destroy_authorizer(struct ceph_authorizer *a);
116 struct ceph_authorizer *a);
117extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac, 117extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
118 int peer_type, 118 int peer_type,
119 struct ceph_auth_handshake *a); 119 struct ceph_auth_handshake *a);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 4343df806710..cbf460927c42 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -16,7 +16,6 @@ struct ceph_msg;
16struct ceph_snap_context; 16struct ceph_snap_context;
17struct ceph_osd_request; 17struct ceph_osd_request;
18struct ceph_osd_client; 18struct ceph_osd_client;
19struct ceph_authorizer;
20 19
21/* 20/*
22 * completion callback for async writepages 21 * completion callback for async writepages
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 3e39ae5bc799..5b17de62c962 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -444,6 +444,7 @@ struct cgroup_subsys {
444 int (*can_attach)(struct cgroup_taskset *tset); 444 int (*can_attach)(struct cgroup_taskset *tset);
445 void (*cancel_attach)(struct cgroup_taskset *tset); 445 void (*cancel_attach)(struct cgroup_taskset *tset);
446 void (*attach)(struct cgroup_taskset *tset); 446 void (*attach)(struct cgroup_taskset *tset);
447 void (*post_attach)(void);
447 int (*can_fork)(struct task_struct *task); 448 int (*can_fork)(struct task_struct *task);
448 void (*cancel_fork)(struct task_struct *task); 449 void (*cancel_fork)(struct task_struct *task);
449 void (*fork)(struct task_struct *task); 450 void (*fork)(struct task_struct *task);
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index fea160ee5803..85a868ccb493 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask)
137 task_unlock(current); 137 task_unlock(current);
138} 138}
139 139
140extern void cpuset_post_attach_flush(void);
141
142#else /* !CONFIG_CPUSETS */ 140#else /* !CONFIG_CPUSETS */
143 141
144static inline bool cpusets_enabled(void) { return false; } 142static inline bool cpusets_enabled(void) { return false; }
@@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
245 return false; 243 return false;
246} 244}
247 245
248static inline void cpuset_post_attach_flush(void)
249{
250}
251
252#endif /* !CONFIG_CPUSETS */ 246#endif /* !CONFIG_CPUSETS */
253 247
254#endif /* _LINUX_CPUSET_H */ 248#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/hash.h b/include/linux/hash.h
index 1afde47e1528..79c52fa81cac 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -32,12 +32,28 @@
32#error Wordsize not 32 or 64 32#error Wordsize not 32 or 64
33#endif 33#endif
34 34
35/*
36 * The above primes are actively bad for hashing, since they are
37 * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
38 * real problems. Besides, the "prime" part is pointless for the
39 * multiplicative hash.
40 *
41 * Although a random odd number will do, it turns out that the golden
42 * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
43 * properties.
44 *
45 * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
46 * (See Knuth vol 3, section 6.4, exercise 9.)
47 */
48#define GOLDEN_RATIO_32 0x61C88647
49#define GOLDEN_RATIO_64 0x61C8864680B583EBull
50
35static __always_inline u64 hash_64(u64 val, unsigned int bits) 51static __always_inline u64 hash_64(u64 val, unsigned int bits)
36{ 52{
37 u64 hash = val; 53 u64 hash = val;
38 54
39#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 55#if BITS_PER_LONG == 64
40 hash = hash * GOLDEN_RATIO_PRIME_64; 56 hash = hash * GOLDEN_RATIO_64;
41#else 57#else
42 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ 58 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
43 u64 n = hash; 59 u64 n = hash;
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 7008623e24b1..d7b9e5346fba 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -152,6 +152,7 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
152} 152}
153 153
154struct page *get_huge_zero_page(void); 154struct page *get_huge_zero_page(void);
155void put_huge_zero_page(void);
155 156
156#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 157#else /* CONFIG_TRANSPARENT_HUGEPAGE */
157#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 158#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
@@ -208,6 +209,10 @@ static inline bool is_huge_zero_page(struct page *page)
208 return false; 209 return false;
209} 210}
210 211
212static inline void put_huge_zero_page(void)
213{
214 BUILD_BUG();
215}
211 216
212static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, 217static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
213 unsigned long addr, pmd_t *pmd, int flags) 218 unsigned long addr, pmd_t *pmd, int flags)
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index d5569734f672..548fd535fd02 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -28,6 +28,11 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
28 return (struct ethhdr *)skb_mac_header(skb); 28 return (struct ethhdr *)skb_mac_header(skb);
29} 29}
30 30
31static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
32{
33 return (struct ethhdr *)skb_inner_mac_header(skb);
34}
35
31int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); 36int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
32 37
33extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); 38extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index d026b190c530..d10ef06971b5 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -196,9 +196,11 @@ struct lock_list {
196 * We record lock dependency chains, so that we can cache them: 196 * We record lock dependency chains, so that we can cache them:
197 */ 197 */
198struct lock_chain { 198struct lock_chain {
199 u8 irq_context; 199 /* see BUILD_BUG_ON()s in lookup_chain_cache() */
200 u8 depth; 200 unsigned int irq_context : 2,
201 u16 base; 201 depth : 6,
202 base : 24;
203 /* 4 byte hole */
202 struct hlist_node entry; 204 struct hlist_node entry;
203 u64 chain_key; 205 u64 chain_key;
204}; 206};
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 6bd429b53b77..8fecd6d6f814 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -393,6 +393,17 @@ enum {
393 MLX5_CAP_OFF_CMDIF_CSUM = 46, 393 MLX5_CAP_OFF_CMDIF_CSUM = 46,
394}; 394};
395 395
396enum {
397 /*
398 * Max wqe size for rdma read is 512 bytes, so this
399 * limits our max_sge_rd as the wqe needs to fit:
400 * - ctrl segment (16 bytes)
401 * - rdma segment (16 bytes)
402 * - scatter elements (16 bytes each)
403 */
404 MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
405};
406
396struct mlx5_inbox_hdr { 407struct mlx5_inbox_hdr {
397 __be16 opcode; 408 __be16 opcode;
398 u8 rsvd[4]; 409 u8 rsvd[4];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a55e5be0894f..864d7221de84 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1031,6 +1031,8 @@ static inline bool page_mapped(struct page *page)
1031 page = compound_head(page); 1031 page = compound_head(page);
1032 if (atomic_read(compound_mapcount_ptr(page)) >= 0) 1032 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
1033 return true; 1033 return true;
1034 if (PageHuge(page))
1035 return false;
1034 for (i = 0; i < hpage_nr_pages(page); i++) { 1036 for (i = 0; i < hpage_nr_pages(page); i++) {
1035 if (atomic_read(&page[i]._mapcount) >= 0) 1037 if (atomic_read(&page[i]._mapcount) >= 0)
1036 return true; 1038 return true;
@@ -1138,6 +1140,8 @@ struct zap_details {
1138 1140
1139struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 1141struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1140 pte_t pte); 1142 pte_t pte);
1143struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1144 pmd_t pmd);
1141 1145
1142int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1146int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1143 unsigned long size); 1147 unsigned long size);
diff --git a/include/linux/net.h b/include/linux/net.h
index 72c1e0622ce2..9aa49a05fe38 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -245,7 +245,15 @@ do { \
245 net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__) 245 net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
246#define net_info_ratelimited(fmt, ...) \ 246#define net_info_ratelimited(fmt, ...) \
247 net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__) 247 net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
248#if defined(DEBUG) 248#if defined(CONFIG_DYNAMIC_DEBUG)
249#define net_dbg_ratelimited(fmt, ...) \
250do { \
251 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
252 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
253 net_ratelimit()) \
254 __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
255} while (0)
256#elif defined(DEBUG)
249#define net_dbg_ratelimited(fmt, ...) \ 257#define net_dbg_ratelimited(fmt, ...) \
250 net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__) 258 net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
251#else 259#else
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f2182594160e..bcf012637d10 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3992,7 +3992,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb);
3992 3992
3993static inline bool net_gso_ok(netdev_features_t features, int gso_type) 3993static inline bool net_gso_ok(netdev_features_t features, int gso_type)
3994{ 3994{
3995 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT; 3995 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
3996 3996
3997 /* check flags correspondence */ 3997 /* check flags correspondence */
3998 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); 3998 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 8a0f55b6c2ba..88e3ab496e8f 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -375,6 +375,9 @@ struct vb2_ops {
375/** 375/**
376 * struct vb2_ops - driver-specific callbacks 376 * struct vb2_ops - driver-specific callbacks
377 * 377 *
378 * @verify_planes_array: Verify that a given user space structure contains
379 * enough planes for the buffer. This is called
380 * for each dequeued buffer.
378 * @fill_user_buffer: given a vb2_buffer fill in the userspace structure. 381 * @fill_user_buffer: given a vb2_buffer fill in the userspace structure.
379 * For V4L2 this is a struct v4l2_buffer. 382 * For V4L2 this is a struct v4l2_buffer.
380 * @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer. 383 * @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer.
@@ -384,6 +387,7 @@ struct vb2_ops {
384 * the vb2_buffer struct. 387 * the vb2_buffer struct.
385 */ 388 */
386struct vb2_buf_ops { 389struct vb2_buf_ops {
390 int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
387 void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb); 391 void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
388 int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb, 392 int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
389 struct vb2_plane *planes); 393 struct vb2_plane *planes);
@@ -400,6 +404,9 @@ struct vb2_buf_ops {
400 * @fileio_read_once: report EOF after reading the first buffer 404 * @fileio_read_once: report EOF after reading the first buffer
401 * @fileio_write_immediately: queue buffer after each write() call 405 * @fileio_write_immediately: queue buffer after each write() call
402 * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver 406 * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver
407 * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF
408 * has not been called. This is a vb1 idiom that has been adopted
409 * also by vb2.
403 * @lock: pointer to a mutex that protects the vb2_queue struct. The 410 * @lock: pointer to a mutex that protects the vb2_queue struct. The
404 * driver can set this to a mutex to let the v4l2 core serialize 411 * driver can set this to a mutex to let the v4l2 core serialize
405 * the queuing ioctls. If the driver wants to handle locking 412 * the queuing ioctls. If the driver wants to handle locking
@@ -463,6 +470,7 @@ struct vb2_queue {
463 unsigned fileio_read_once:1; 470 unsigned fileio_read_once:1;
464 unsigned fileio_write_immediately:1; 471 unsigned fileio_write_immediately:1;
465 unsigned allow_zero_bytesused:1; 472 unsigned allow_zero_bytesused:1;
473 unsigned quirk_poll_must_check_waiting_for_buffers:1;
466 474
467 struct mutex *lock; 475 struct mutex *lock;
468 void *owner; 476 void *owner;
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 673e9f9e6da7..b8803165df91 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -317,7 +317,9 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
317 (skb->inner_protocol_type != ENCAP_TYPE_ETHER || 317 (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
318 skb->inner_protocol != htons(ETH_P_TEB) || 318 skb->inner_protocol != htons(ETH_P_TEB) ||
319 (skb_inner_mac_header(skb) - skb_transport_header(skb) != 319 (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
320 sizeof(struct udphdr) + sizeof(struct vxlanhdr)))) 320 sizeof(struct udphdr) + sizeof(struct vxlanhdr)) ||
321 (skb->ip_summed != CHECKSUM_NONE &&
322 !can_checksum_protocol(features, inner_eth_hdr(skb)->h_proto))))
321 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 323 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
322 324
323 return features; 325 return features;
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
index cf8f9e700e48..a6b93706b0fc 100644
--- a/include/rdma/ib.h
+++ b/include/rdma/ib.h
@@ -34,6 +34,7 @@
34#define _RDMA_IB_H 34#define _RDMA_IB_H
35 35
36#include <linux/types.h> 36#include <linux/types.h>
37#include <linux/sched.h>
37 38
38struct ib_addr { 39struct ib_addr {
39 union { 40 union {
@@ -86,4 +87,19 @@ struct sockaddr_ib {
86 __u64 sib_scope_id; 87 __u64 sib_scope_id;
87}; 88};
88 89
90/*
91 * The IB interfaces that use write() as bi-directional ioctl() are
92 * fundamentally unsafe, since there are lots of ways to trigger "write()"
93 * calls from various contexts with elevated privileges. That includes the
94 * traditional suid executable error message writes, but also various kernel
95 * interfaces that can write to file descriptors.
96 *
97 * This function provides protection for the legacy API by restricting the
98 * calling context.
99 */
100static inline bool ib_safe_file_access(struct file *filp)
101{
102 return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
103}
104
89#endif /* _RDMA_IB_H */ 105#endif /* _RDMA_IB_H */
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index fa341fcb5829..f5842bcd9c94 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -9,7 +9,7 @@
9#ifdef CONFIG_SND_HDA_I915 9#ifdef CONFIG_SND_HDA_I915
10int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable); 10int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
11int snd_hdac_display_power(struct hdac_bus *bus, bool enable); 11int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
12int snd_hdac_get_display_clk(struct hdac_bus *bus); 12void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
13int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate); 13int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate);
14int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid, 14int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
15 bool *audio_enabled, char *buffer, int max_bytes); 15 bool *audio_enabled, char *buffer, int max_bytes);
@@ -25,9 +25,8 @@ static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
25{ 25{
26 return 0; 26 return 0;
27} 27}
28static inline int snd_hdac_get_display_clk(struct hdac_bus *bus) 28static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
29{ 29{
30 return 0;
31} 30}
32static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, 31static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid,
33 int rate) 32 int rate)
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index c039f1d68a09..086168e18ca8 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -183,7 +183,8 @@
183 183
184#define V4L2_DV_BT_CEA_3840X2160P24 { \ 184#define V4L2_DV_BT_CEA_3840X2160P24 { \
185 .type = V4L2_DV_BT_656_1120, \ 185 .type = V4L2_DV_BT_656_1120, \
186 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 186 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
187 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
187 297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \ 188 297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
188 V4L2_DV_BT_STD_CEA861, \ 189 V4L2_DV_BT_STD_CEA861, \
189 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ 190 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -191,14 +192,16 @@
191 192
192#define V4L2_DV_BT_CEA_3840X2160P25 { \ 193#define V4L2_DV_BT_CEA_3840X2160P25 { \
193 .type = V4L2_DV_BT_656_1120, \ 194 .type = V4L2_DV_BT_656_1120, \
194 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 195 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
196 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
195 297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \ 197 297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
196 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ 198 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
197} 199}
198 200
199#define V4L2_DV_BT_CEA_3840X2160P30 { \ 201#define V4L2_DV_BT_CEA_3840X2160P30 { \
200 .type = V4L2_DV_BT_656_1120, \ 202 .type = V4L2_DV_BT_656_1120, \
201 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 203 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
204 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
202 297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \ 205 297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
203 V4L2_DV_BT_STD_CEA861, \ 206 V4L2_DV_BT_STD_CEA861, \
204 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ 207 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -206,14 +209,16 @@
206 209
207#define V4L2_DV_BT_CEA_3840X2160P50 { \ 210#define V4L2_DV_BT_CEA_3840X2160P50 { \
208 .type = V4L2_DV_BT_656_1120, \ 211 .type = V4L2_DV_BT_656_1120, \
209 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 212 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
213 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
210 594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \ 214 594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
211 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ 215 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
212} 216}
213 217
214#define V4L2_DV_BT_CEA_3840X2160P60 { \ 218#define V4L2_DV_BT_CEA_3840X2160P60 { \
215 .type = V4L2_DV_BT_656_1120, \ 219 .type = V4L2_DV_BT_656_1120, \
216 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 220 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
221 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
217 594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \ 222 594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
218 V4L2_DV_BT_STD_CEA861, \ 223 V4L2_DV_BT_STD_CEA861, \
219 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ 224 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -221,7 +226,8 @@
221 226
222#define V4L2_DV_BT_CEA_4096X2160P24 { \ 227#define V4L2_DV_BT_CEA_4096X2160P24 { \
223 .type = V4L2_DV_BT_656_1120, \ 228 .type = V4L2_DV_BT_656_1120, \
224 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 229 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
230 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
225 297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \ 231 297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
226 V4L2_DV_BT_STD_CEA861, \ 232 V4L2_DV_BT_STD_CEA861, \
227 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ 233 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -229,14 +235,16 @@
229 235
230#define V4L2_DV_BT_CEA_4096X2160P25 { \ 236#define V4L2_DV_BT_CEA_4096X2160P25 { \
231 .type = V4L2_DV_BT_656_1120, \ 237 .type = V4L2_DV_BT_656_1120, \
232 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 238 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
239 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
233 297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \ 240 297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
234 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ 241 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
235} 242}
236 243
237#define V4L2_DV_BT_CEA_4096X2160P30 { \ 244#define V4L2_DV_BT_CEA_4096X2160P30 { \
238 .type = V4L2_DV_BT_656_1120, \ 245 .type = V4L2_DV_BT_656_1120, \
239 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 246 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
247 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
240 297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \ 248 297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
241 V4L2_DV_BT_STD_CEA861, \ 249 V4L2_DV_BT_STD_CEA861, \
242 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ 250 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -244,14 +252,16 @@
244 252
245#define V4L2_DV_BT_CEA_4096X2160P50 { \ 253#define V4L2_DV_BT_CEA_4096X2160P50 { \
246 .type = V4L2_DV_BT_656_1120, \ 254 .type = V4L2_DV_BT_656_1120, \
247 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 255 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
256 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
248 594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \ 257 594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
249 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ 258 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
250} 259}
251 260
252#define V4L2_DV_BT_CEA_4096X2160P60 { \ 261#define V4L2_DV_BT_CEA_4096X2160P60 { \
253 .type = V4L2_DV_BT_656_1120, \ 262 .type = V4L2_DV_BT_656_1120, \
254 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 263 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
264 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
255 594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \ 265 594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
256 V4L2_DV_BT_STD_CEA861, \ 266 V4L2_DV_BT_STD_CEA861, \
257 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ 267 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index f2ece3c174a5..8f94ca1860cf 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
31{ 31{
32 switch (type) { 32 switch (type) {
33 case BPF_TYPE_PROG: 33 case BPF_TYPE_PROG:
34 atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt); 34 raw = bpf_prog_inc(raw);
35 break; 35 break;
36 case BPF_TYPE_MAP: 36 case BPF_TYPE_MAP:
37 bpf_map_inc(raw, true); 37 raw = bpf_map_inc(raw, true);
38 break; 38 break;
39 default: 39 default:
40 WARN_ON_ONCE(1); 40 WARN_ON_ONCE(1);
@@ -297,7 +297,8 @@ static void *bpf_obj_do_get(const struct filename *pathname,
297 goto out; 297 goto out;
298 298
299 raw = bpf_any_get(inode->i_private, *type); 299 raw = bpf_any_get(inode->i_private, *type);
300 touch_atime(&path); 300 if (!IS_ERR(raw))
301 touch_atime(&path);
301 302
302 path_put(&path); 303 path_put(&path);
303 return raw; 304 return raw;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index adc5e4bd74f8..cf5e9f7ad13a 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -218,11 +218,18 @@ struct bpf_map *__bpf_map_get(struct fd f)
218 return f.file->private_data; 218 return f.file->private_data;
219} 219}
220 220
221void bpf_map_inc(struct bpf_map *map, bool uref) 221/* prog's and map's refcnt limit */
222#define BPF_MAX_REFCNT 32768
223
224struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
222{ 225{
223 atomic_inc(&map->refcnt); 226 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
227 atomic_dec(&map->refcnt);
228 return ERR_PTR(-EBUSY);
229 }
224 if (uref) 230 if (uref)
225 atomic_inc(&map->usercnt); 231 atomic_inc(&map->usercnt);
232 return map;
226} 233}
227 234
228struct bpf_map *bpf_map_get_with_uref(u32 ufd) 235struct bpf_map *bpf_map_get_with_uref(u32 ufd)
@@ -234,7 +241,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
234 if (IS_ERR(map)) 241 if (IS_ERR(map))
235 return map; 242 return map;
236 243
237 bpf_map_inc(map, true); 244 map = bpf_map_inc(map, true);
238 fdput(f); 245 fdput(f);
239 246
240 return map; 247 return map;
@@ -658,6 +665,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f)
658 return f.file->private_data; 665 return f.file->private_data;
659} 666}
660 667
668struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
669{
670 if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
671 atomic_dec(&prog->aux->refcnt);
672 return ERR_PTR(-EBUSY);
673 }
674 return prog;
675}
676
661/* called by sockets/tracing/seccomp before attaching program to an event 677/* called by sockets/tracing/seccomp before attaching program to an event
662 * pairs with bpf_prog_put() 678 * pairs with bpf_prog_put()
663 */ 679 */
@@ -670,7 +686,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
670 if (IS_ERR(prog)) 686 if (IS_ERR(prog))
671 return prog; 687 return prog;
672 688
673 atomic_inc(&prog->aux->refcnt); 689 prog = bpf_prog_inc(prog);
674 fdput(f); 690 fdput(f);
675 691
676 return prog; 692 return prog;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 56f18068b52b..63554b6d4e25 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -249,16 +249,6 @@ static const char * const reg_type_str[] = {
249 [CONST_IMM] = "imm", 249 [CONST_IMM] = "imm",
250}; 250};
251 251
252static const struct {
253 int map_type;
254 int func_id;
255} func_limit[] = {
256 {BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
257 {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
258 {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
259 {BPF_MAP_TYPE_STACK_TRACE, BPF_FUNC_get_stackid},
260};
261
262static void print_verifier_state(struct verifier_env *env) 252static void print_verifier_state(struct verifier_env *env)
263{ 253{
264 enum bpf_reg_type t; 254 enum bpf_reg_type t;
@@ -943,27 +933,52 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
943 933
944static int check_map_func_compatibility(struct bpf_map *map, int func_id) 934static int check_map_func_compatibility(struct bpf_map *map, int func_id)
945{ 935{
946 bool bool_map, bool_func;
947 int i;
948
949 if (!map) 936 if (!map)
950 return 0; 937 return 0;
951 938
952 for (i = 0; i < ARRAY_SIZE(func_limit); i++) { 939 /* We need a two way check, first is from map perspective ... */
953 bool_map = (map->map_type == func_limit[i].map_type); 940 switch (map->map_type) {
954 bool_func = (func_id == func_limit[i].func_id); 941 case BPF_MAP_TYPE_PROG_ARRAY:
955 /* only when map & func pair match it can continue. 942 if (func_id != BPF_FUNC_tail_call)
956 * don't allow any other map type to be passed into 943 goto error;
957 * the special func; 944 break;
958 */ 945 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
959 if (bool_func && bool_map != bool_func) { 946 if (func_id != BPF_FUNC_perf_event_read &&
960 verbose("cannot pass map_type %d into func %d\n", 947 func_id != BPF_FUNC_perf_event_output)
961 map->map_type, func_id); 948 goto error;
962 return -EINVAL; 949 break;
963 } 950 case BPF_MAP_TYPE_STACK_TRACE:
951 if (func_id != BPF_FUNC_get_stackid)
952 goto error;
953 break;
954 default:
955 break;
956 }
957
958 /* ... and second from the function itself. */
959 switch (func_id) {
960 case BPF_FUNC_tail_call:
961 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
962 goto error;
963 break;
964 case BPF_FUNC_perf_event_read:
965 case BPF_FUNC_perf_event_output:
966 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
967 goto error;
968 break;
969 case BPF_FUNC_get_stackid:
970 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
971 goto error;
972 break;
973 default:
974 break;
964 } 975 }
965 976
966 return 0; 977 return 0;
978error:
979 verbose("cannot pass map_type %d into func %d\n",
980 map->map_type, func_id);
981 return -EINVAL;
967} 982}
968 983
969static int check_raw_mode(const struct bpf_func_proto *fn) 984static int check_raw_mode(const struct bpf_func_proto *fn)
@@ -2111,15 +2126,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
2111 return -E2BIG; 2126 return -E2BIG;
2112 } 2127 }
2113 2128
2114 /* remember this map */
2115 env->used_maps[env->used_map_cnt++] = map;
2116
2117 /* hold the map. If the program is rejected by verifier, 2129 /* hold the map. If the program is rejected by verifier,
2118 * the map will be released by release_maps() or it 2130 * the map will be released by release_maps() or it
2119 * will be used by the valid program until it's unloaded 2131 * will be used by the valid program until it's unloaded
2120 * and all maps are released in free_bpf_prog_info() 2132 * and all maps are released in free_bpf_prog_info()
2121 */ 2133 */
2122 bpf_map_inc(map, false); 2134 map = bpf_map_inc(map, false);
2135 if (IS_ERR(map)) {
2136 fdput(f);
2137 return PTR_ERR(map);
2138 }
2139 env->used_maps[env->used_map_cnt++] = map;
2140
2123 fdput(f); 2141 fdput(f);
2124next_insn: 2142next_insn:
2125 insn++; 2143 insn++;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 671dc05c0b0f..909a7d31ffd3 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2825,9 +2825,10 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2825 size_t nbytes, loff_t off, bool threadgroup) 2825 size_t nbytes, loff_t off, bool threadgroup)
2826{ 2826{
2827 struct task_struct *tsk; 2827 struct task_struct *tsk;
2828 struct cgroup_subsys *ss;
2828 struct cgroup *cgrp; 2829 struct cgroup *cgrp;
2829 pid_t pid; 2830 pid_t pid;
2830 int ret; 2831 int ssid, ret;
2831 2832
2832 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) 2833 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
2833 return -EINVAL; 2834 return -EINVAL;
@@ -2875,8 +2876,10 @@ out_unlock_rcu:
2875 rcu_read_unlock(); 2876 rcu_read_unlock();
2876out_unlock_threadgroup: 2877out_unlock_threadgroup:
2877 percpu_up_write(&cgroup_threadgroup_rwsem); 2878 percpu_up_write(&cgroup_threadgroup_rwsem);
2879 for_each_subsys(ss, ssid)
2880 if (ss->post_attach)
2881 ss->post_attach();
2878 cgroup_kn_unlock(of->kn); 2882 cgroup_kn_unlock(of->kn);
2879 cpuset_post_attach_flush();
2880 return ret ?: nbytes; 2883 return ret ?: nbytes;
2881} 2884}
2882 2885
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 00ab5c2b7c5b..1902956baba1 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -58,7 +58,6 @@
58#include <asm/uaccess.h> 58#include <asm/uaccess.h>
59#include <linux/atomic.h> 59#include <linux/atomic.h>
60#include <linux/mutex.h> 60#include <linux/mutex.h>
61#include <linux/workqueue.h>
62#include <linux/cgroup.h> 61#include <linux/cgroup.h>
63#include <linux/wait.h> 62#include <linux/wait.h>
64 63
@@ -1016,7 +1015,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1016 } 1015 }
1017} 1016}
1018 1017
1019void cpuset_post_attach_flush(void) 1018static void cpuset_post_attach(void)
1020{ 1019{
1021 flush_workqueue(cpuset_migrate_mm_wq); 1020 flush_workqueue(cpuset_migrate_mm_wq);
1022} 1021}
@@ -2087,6 +2086,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
2087 .can_attach = cpuset_can_attach, 2086 .can_attach = cpuset_can_attach,
2088 .cancel_attach = cpuset_cancel_attach, 2087 .cancel_attach = cpuset_cancel_attach,
2089 .attach = cpuset_attach, 2088 .attach = cpuset_attach,
2089 .post_attach = cpuset_post_attach,
2090 .bind = cpuset_bind, 2090 .bind = cpuset_bind,
2091 .legacy_cftypes = files, 2091 .legacy_cftypes = files,
2092 .early_init = true, 2092 .early_init = true,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9eb23dc27462..0bdc6e7d4908 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -412,7 +412,8 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
412 if (ret || !write) 412 if (ret || !write)
413 return ret; 413 return ret;
414 414
415 if (sysctl_perf_cpu_time_max_percent == 100) { 415 if (sysctl_perf_cpu_time_max_percent == 100 ||
416 sysctl_perf_cpu_time_max_percent == 0) {
416 printk(KERN_WARNING 417 printk(KERN_WARNING
417 "perf: Dynamic interrupt throttling disabled, can hang your system!\n"); 418 "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
418 WRITE_ONCE(perf_sample_allowed_ns, 0); 419 WRITE_ONCE(perf_sample_allowed_ns, 0);
@@ -1105,6 +1106,7 @@ static void put_ctx(struct perf_event_context *ctx)
1105 * function. 1106 * function.
1106 * 1107 *
1107 * Lock order: 1108 * Lock order:
1109 * cred_guard_mutex
1108 * task_struct::perf_event_mutex 1110 * task_struct::perf_event_mutex
1109 * perf_event_context::mutex 1111 * perf_event_context::mutex
1110 * perf_event::child_mutex; 1112 * perf_event::child_mutex;
@@ -3420,7 +3422,6 @@ static struct task_struct *
3420find_lively_task_by_vpid(pid_t vpid) 3422find_lively_task_by_vpid(pid_t vpid)
3421{ 3423{
3422 struct task_struct *task; 3424 struct task_struct *task;
3423 int err;
3424 3425
3425 rcu_read_lock(); 3426 rcu_read_lock();
3426 if (!vpid) 3427 if (!vpid)
@@ -3434,16 +3435,7 @@ find_lively_task_by_vpid(pid_t vpid)
3434 if (!task) 3435 if (!task)
3435 return ERR_PTR(-ESRCH); 3436 return ERR_PTR(-ESRCH);
3436 3437
3437 /* Reuse ptrace permission checks for now. */
3438 err = -EACCES;
3439 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
3440 goto errout;
3441
3442 return task; 3438 return task;
3443errout:
3444 put_task_struct(task);
3445 return ERR_PTR(err);
3446
3447} 3439}
3448 3440
3449/* 3441/*
@@ -8446,6 +8438,24 @@ SYSCALL_DEFINE5(perf_event_open,
8446 8438
8447 get_online_cpus(); 8439 get_online_cpus();
8448 8440
8441 if (task) {
8442 err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
8443 if (err)
8444 goto err_cpus;
8445
8446 /*
8447 * Reuse ptrace permission checks for now.
8448 *
8449 * We must hold cred_guard_mutex across this and any potential
8450 * perf_install_in_context() call for this new event to
8451 * serialize against exec() altering our credentials (and the
8452 * perf_event_exit_task() that could imply).
8453 */
8454 err = -EACCES;
8455 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
8456 goto err_cred;
8457 }
8458
8449 if (flags & PERF_FLAG_PID_CGROUP) 8459 if (flags & PERF_FLAG_PID_CGROUP)
8450 cgroup_fd = pid; 8460 cgroup_fd = pid;
8451 8461
@@ -8453,7 +8463,7 @@ SYSCALL_DEFINE5(perf_event_open,
8453 NULL, NULL, cgroup_fd); 8463 NULL, NULL, cgroup_fd);
8454 if (IS_ERR(event)) { 8464 if (IS_ERR(event)) {
8455 err = PTR_ERR(event); 8465 err = PTR_ERR(event);
8456 goto err_cpus; 8466 goto err_cred;
8457 } 8467 }
8458 8468
8459 if (is_sampling_event(event)) { 8469 if (is_sampling_event(event)) {
@@ -8512,11 +8522,6 @@ SYSCALL_DEFINE5(perf_event_open,
8512 goto err_context; 8522 goto err_context;
8513 } 8523 }
8514 8524
8515 if (task) {
8516 put_task_struct(task);
8517 task = NULL;
8518 }
8519
8520 /* 8525 /*
8521 * Look up the group leader (we will attach this event to it): 8526 * Look up the group leader (we will attach this event to it):
8522 */ 8527 */
@@ -8614,6 +8619,11 @@ SYSCALL_DEFINE5(perf_event_open,
8614 8619
8615 WARN_ON_ONCE(ctx->parent_ctx); 8620 WARN_ON_ONCE(ctx->parent_ctx);
8616 8621
8622 /*
8623 * This is the point on no return; we cannot fail hereafter. This is
8624 * where we start modifying current state.
8625 */
8626
8617 if (move_group) { 8627 if (move_group) {
8618 /* 8628 /*
8619 * See perf_event_ctx_lock() for comments on the details 8629 * See perf_event_ctx_lock() for comments on the details
@@ -8685,6 +8695,11 @@ SYSCALL_DEFINE5(perf_event_open,
8685 mutex_unlock(&gctx->mutex); 8695 mutex_unlock(&gctx->mutex);
8686 mutex_unlock(&ctx->mutex); 8696 mutex_unlock(&ctx->mutex);
8687 8697
8698 if (task) {
8699 mutex_unlock(&task->signal->cred_guard_mutex);
8700 put_task_struct(task);
8701 }
8702
8688 put_online_cpus(); 8703 put_online_cpus();
8689 8704
8690 mutex_lock(&current->perf_event_mutex); 8705 mutex_lock(&current->perf_event_mutex);
@@ -8717,6 +8732,9 @@ err_alloc:
8717 */ 8732 */
8718 if (!event_file) 8733 if (!event_file)
8719 free_event(event); 8734 free_event(event);
8735err_cred:
8736 if (task)
8737 mutex_unlock(&task->signal->cred_guard_mutex);
8720err_cpus: 8738err_cpus:
8721 put_online_cpus(); 8739 put_online_cpus();
8722err_task: 8740err_task:
@@ -9001,6 +9019,9 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
9001 9019
9002/* 9020/*
9003 * When a child task exits, feed back event values to parent events. 9021 * When a child task exits, feed back event values to parent events.
9022 *
9023 * Can be called with cred_guard_mutex held when called from
9024 * install_exec_creds().
9004 */ 9025 */
9005void perf_event_exit_task(struct task_struct *child) 9026void perf_event_exit_task(struct task_struct *child)
9006{ 9027{
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 3efbee0834a8..a02f2dddd1d7 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -1,5 +1,6 @@
1#define pr_fmt(fmt) "kcov: " fmt 1#define pr_fmt(fmt) "kcov: " fmt
2 2
3#define DISABLE_BRANCH_PROFILING
3#include <linux/compiler.h> 4#include <linux/compiler.h>
4#include <linux/types.h> 5#include <linux/types.h>
5#include <linux/file.h> 6#include <linux/file.h>
@@ -43,7 +44,7 @@ struct kcov {
43 * Entry point from instrumented code. 44 * Entry point from instrumented code.
44 * This is called once per basic-block/edge. 45 * This is called once per basic-block/edge.
45 */ 46 */
46void __sanitizer_cov_trace_pc(void) 47void notrace __sanitizer_cov_trace_pc(void)
47{ 48{
48 struct task_struct *t; 49 struct task_struct *t;
49 enum kcov_mode mode; 50 enum kcov_mode mode;
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 8d34308ea449..1391d3ee3b86 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -1415,6 +1415,9 @@ static int __init crash_save_vmcoreinfo_init(void)
1415 VMCOREINFO_OFFSET(page, lru); 1415 VMCOREINFO_OFFSET(page, lru);
1416 VMCOREINFO_OFFSET(page, _mapcount); 1416 VMCOREINFO_OFFSET(page, _mapcount);
1417 VMCOREINFO_OFFSET(page, private); 1417 VMCOREINFO_OFFSET(page, private);
1418 VMCOREINFO_OFFSET(page, compound_dtor);
1419 VMCOREINFO_OFFSET(page, compound_order);
1420 VMCOREINFO_OFFSET(page, compound_head);
1418 VMCOREINFO_OFFSET(pglist_data, node_zones); 1421 VMCOREINFO_OFFSET(pglist_data, node_zones);
1419 VMCOREINFO_OFFSET(pglist_data, nr_zones); 1422 VMCOREINFO_OFFSET(pglist_data, nr_zones);
1420#ifdef CONFIG_FLAT_NODE_MEM_MAP 1423#ifdef CONFIG_FLAT_NODE_MEM_MAP
@@ -1447,8 +1450,8 @@ static int __init crash_save_vmcoreinfo_init(void)
1447#ifdef CONFIG_X86 1450#ifdef CONFIG_X86
1448 VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE); 1451 VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
1449#endif 1452#endif
1450#ifdef CONFIG_HUGETLBFS 1453#ifdef CONFIG_HUGETLB_PAGE
1451 VMCOREINFO_SYMBOL(free_huge_page); 1454 VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR);
1452#endif 1455#endif
1453 1456
1454 arch_crash_save_vmcoreinfo(); 1457 arch_crash_save_vmcoreinfo();
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index ed9410936a22..78c1c0ee6dc1 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2176,15 +2176,37 @@ cache_hit:
2176 chain->irq_context = hlock->irq_context; 2176 chain->irq_context = hlock->irq_context;
2177 i = get_first_held_lock(curr, hlock); 2177 i = get_first_held_lock(curr, hlock);
2178 chain->depth = curr->lockdep_depth + 1 - i; 2178 chain->depth = curr->lockdep_depth + 1 - i;
2179
2180 BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
2181 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks));
2182 BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
2183
2179 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { 2184 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2180 chain->base = nr_chain_hlocks; 2185 chain->base = nr_chain_hlocks;
2181 nr_chain_hlocks += chain->depth;
2182 for (j = 0; j < chain->depth - 1; j++, i++) { 2186 for (j = 0; j < chain->depth - 1; j++, i++) {
2183 int lock_id = curr->held_locks[i].class_idx - 1; 2187 int lock_id = curr->held_locks[i].class_idx - 1;
2184 chain_hlocks[chain->base + j] = lock_id; 2188 chain_hlocks[chain->base + j] = lock_id;
2185 } 2189 }
2186 chain_hlocks[chain->base + j] = class - lock_classes; 2190 chain_hlocks[chain->base + j] = class - lock_classes;
2187 } 2191 }
2192
2193 if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
2194 nr_chain_hlocks += chain->depth;
2195
2196#ifdef CONFIG_DEBUG_LOCKDEP
2197 /*
2198 * Important for check_no_collision().
2199 */
2200 if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
2201 if (debug_locks_off_graph_unlock())
2202 return 0;
2203
2204 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2205 dump_stack();
2206 return 0;
2207 }
2208#endif
2209
2188 hlist_add_head_rcu(&chain->entry, hash_head); 2210 hlist_add_head_rcu(&chain->entry, hash_head);
2189 debug_atomic_inc(chain_lookup_misses); 2211 debug_atomic_inc(chain_lookup_misses);
2190 inc_chains(); 2212 inc_chains();
@@ -2932,6 +2954,11 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2932 return 1; 2954 return 1;
2933} 2955}
2934 2956
2957static inline unsigned int task_irq_context(struct task_struct *task)
2958{
2959 return 2 * !!task->hardirq_context + !!task->softirq_context;
2960}
2961
2935static int separate_irq_context(struct task_struct *curr, 2962static int separate_irq_context(struct task_struct *curr,
2936 struct held_lock *hlock) 2963 struct held_lock *hlock)
2937{ 2964{
@@ -2940,8 +2967,6 @@ static int separate_irq_context(struct task_struct *curr,
2940 /* 2967 /*
2941 * Keep track of points where we cross into an interrupt context: 2968 * Keep track of points where we cross into an interrupt context:
2942 */ 2969 */
2943 hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2944 curr->softirq_context;
2945 if (depth) { 2970 if (depth) {
2946 struct held_lock *prev_hlock; 2971 struct held_lock *prev_hlock;
2947 2972
@@ -2973,6 +2998,11 @@ static inline int mark_irqflags(struct task_struct *curr,
2973 return 1; 2998 return 1;
2974} 2999}
2975 3000
3001static inline unsigned int task_irq_context(struct task_struct *task)
3002{
3003 return 0;
3004}
3005
2976static inline int separate_irq_context(struct task_struct *curr, 3006static inline int separate_irq_context(struct task_struct *curr,
2977 struct held_lock *hlock) 3007 struct held_lock *hlock)
2978{ 3008{
@@ -3241,6 +3271,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3241 hlock->acquire_ip = ip; 3271 hlock->acquire_ip = ip;
3242 hlock->instance = lock; 3272 hlock->instance = lock;
3243 hlock->nest_lock = nest_lock; 3273 hlock->nest_lock = nest_lock;
3274 hlock->irq_context = task_irq_context(curr);
3244 hlock->trylock = trylock; 3275 hlock->trylock = trylock;
3245 hlock->read = read; 3276 hlock->read = read;
3246 hlock->check = check; 3277 hlock->check = check;
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index dbb61a302548..a0f61effad25 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -141,6 +141,8 @@ static int lc_show(struct seq_file *m, void *v)
141 int i; 141 int i;
142 142
143 if (v == SEQ_START_TOKEN) { 143 if (v == SEQ_START_TOKEN) {
144 if (nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)
145 seq_printf(m, "(buggered) ");
144 seq_printf(m, "all lock chains:\n"); 146 seq_printf(m, "all lock chains:\n");
145 return 0; 147 return 0;
146 } 148 }
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2232ae3e3ad6..3bfdff06eea7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -666,6 +666,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
666 */ 666 */
667 smp_wmb(); 667 smp_wmb();
668 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); 668 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
669 /*
670 * The following mb guarantees that previous clear of a PENDING bit
671 * will not be reordered with any speculative LOADS or STORES from
672 * work->current_func, which is executed afterwards. This possible
673 * reordering can lead to a missed execution on attempt to qeueue
674 * the same @work. E.g. consider this case:
675 *
676 * CPU#0 CPU#1
677 * ---------------------------- --------------------------------
678 *
679 * 1 STORE event_indicated
680 * 2 queue_work_on() {
681 * 3 test_and_set_bit(PENDING)
682 * 4 } set_..._and_clear_pending() {
683 * 5 set_work_data() # clear bit
684 * 6 smp_mb()
685 * 7 work->current_func() {
686 * 8 LOAD event_indicated
687 * }
688 *
689 * Without an explicit full barrier speculative LOAD on line 8 can
690 * be executed before CPU#0 does STORE on line 1. If that happens,
691 * CPU#0 observes the PENDING bit is still set and new execution of
692 * a @work is not queued in a hope, that CPU#1 will eventually
693 * finish the queued @work. Meanwhile CPU#1 does not see
694 * event_indicated is set, because speculative LOAD was executed
695 * before actual STORE.
696 */
697 smp_mb();
669} 698}
670 699
671static void clear_work_data(struct work_struct *work) 700static void clear_work_data(struct work_struct *work)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 654c9d87e83a..9e0b0315a724 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -210,10 +210,6 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
210 goto fast_exit; 210 goto fast_exit;
211 211
212 hash = hash_stack(trace->entries, trace->nr_entries); 212 hash = hash_stack(trace->entries, trace->nr_entries);
213 /* Bad luck, we won't store this stack. */
214 if (hash == 0)
215 goto exit;
216
217 bucket = &stack_table[hash & STACK_HASH_MASK]; 213 bucket = &stack_table[hash & STACK_HASH_MASK];
218 214
219 /* 215 /*
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 86f9f8b82f8e..df67b53ae3c5 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -232,7 +232,7 @@ retry:
232 return READ_ONCE(huge_zero_page); 232 return READ_ONCE(huge_zero_page);
233} 233}
234 234
235static void put_huge_zero_page(void) 235void put_huge_zero_page(void)
236{ 236{
237 /* 237 /*
238 * Counter should never go to zero here. Only shrinker can put 238 * Counter should never go to zero here. Only shrinker can put
@@ -1684,12 +1684,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1684 if (vma_is_dax(vma)) { 1684 if (vma_is_dax(vma)) {
1685 spin_unlock(ptl); 1685 spin_unlock(ptl);
1686 if (is_huge_zero_pmd(orig_pmd)) 1686 if (is_huge_zero_pmd(orig_pmd))
1687 put_huge_zero_page(); 1687 tlb_remove_page(tlb, pmd_page(orig_pmd));
1688 } else if (is_huge_zero_pmd(orig_pmd)) { 1688 } else if (is_huge_zero_pmd(orig_pmd)) {
1689 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); 1689 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
1690 atomic_long_dec(&tlb->mm->nr_ptes); 1690 atomic_long_dec(&tlb->mm->nr_ptes);
1691 spin_unlock(ptl); 1691 spin_unlock(ptl);
1692 put_huge_zero_page(); 1692 tlb_remove_page(tlb, pmd_page(orig_pmd));
1693 } else { 1693 } else {
1694 struct page *page = pmd_page(orig_pmd); 1694 struct page *page = pmd_page(orig_pmd);
1695 page_remove_rmap(page, true); 1695 page_remove_rmap(page, true);
@@ -1960,10 +1960,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
1960 * page fault if needed. 1960 * page fault if needed.
1961 */ 1961 */
1962 return 0; 1962 return 0;
1963 if (vma->vm_ops) 1963 if (vma->vm_ops || (vm_flags & VM_NO_THP))
1964 /* khugepaged not yet working on file or special mappings */ 1964 /* khugepaged not yet working on file or special mappings */
1965 return 0; 1965 return 0;
1966 VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
1967 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 1966 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1968 hend = vma->vm_end & HPAGE_PMD_MASK; 1967 hend = vma->vm_end & HPAGE_PMD_MASK;
1969 if (hstart < hend) 1968 if (hstart < hend)
@@ -2352,8 +2351,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
2352 return false; 2351 return false;
2353 if (is_vma_temporary_stack(vma)) 2352 if (is_vma_temporary_stack(vma))
2354 return false; 2353 return false;
2355 VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); 2354 return !(vma->vm_flags & VM_NO_THP);
2356 return true;
2357} 2355}
2358 2356
2359static void collapse_huge_page(struct mm_struct *mm, 2357static void collapse_huge_page(struct mm_struct *mm,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 36db05fa8acb..fe787f5c41bd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -207,6 +207,7 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
207/* "mc" and its members are protected by cgroup_mutex */ 207/* "mc" and its members are protected by cgroup_mutex */
208static struct move_charge_struct { 208static struct move_charge_struct {
209 spinlock_t lock; /* for from, to */ 209 spinlock_t lock; /* for from, to */
210 struct mm_struct *mm;
210 struct mem_cgroup *from; 211 struct mem_cgroup *from;
211 struct mem_cgroup *to; 212 struct mem_cgroup *to;
212 unsigned long flags; 213 unsigned long flags;
@@ -4667,6 +4668,8 @@ static void __mem_cgroup_clear_mc(void)
4667 4668
4668static void mem_cgroup_clear_mc(void) 4669static void mem_cgroup_clear_mc(void)
4669{ 4670{
4671 struct mm_struct *mm = mc.mm;
4672
4670 /* 4673 /*
4671 * we must clear moving_task before waking up waiters at the end of 4674 * we must clear moving_task before waking up waiters at the end of
4672 * task migration. 4675 * task migration.
@@ -4676,7 +4679,10 @@ static void mem_cgroup_clear_mc(void)
4676 spin_lock(&mc.lock); 4679 spin_lock(&mc.lock);
4677 mc.from = NULL; 4680 mc.from = NULL;
4678 mc.to = NULL; 4681 mc.to = NULL;
4682 mc.mm = NULL;
4679 spin_unlock(&mc.lock); 4683 spin_unlock(&mc.lock);
4684
4685 mmput(mm);
4680} 4686}
4681 4687
4682static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 4688static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -4733,6 +4739,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4733 VM_BUG_ON(mc.moved_swap); 4739 VM_BUG_ON(mc.moved_swap);
4734 4740
4735 spin_lock(&mc.lock); 4741 spin_lock(&mc.lock);
4742 mc.mm = mm;
4736 mc.from = from; 4743 mc.from = from;
4737 mc.to = memcg; 4744 mc.to = memcg;
4738 mc.flags = move_flags; 4745 mc.flags = move_flags;
@@ -4742,8 +4749,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4742 ret = mem_cgroup_precharge_mc(mm); 4749 ret = mem_cgroup_precharge_mc(mm);
4743 if (ret) 4750 if (ret)
4744 mem_cgroup_clear_mc(); 4751 mem_cgroup_clear_mc();
4752 } else {
4753 mmput(mm);
4745 } 4754 }
4746 mmput(mm);
4747 return ret; 4755 return ret;
4748} 4756}
4749 4757
@@ -4852,11 +4860,11 @@ put: /* get_mctgt_type() gets the page */
4852 return ret; 4860 return ret;
4853} 4861}
4854 4862
4855static void mem_cgroup_move_charge(struct mm_struct *mm) 4863static void mem_cgroup_move_charge(void)
4856{ 4864{
4857 struct mm_walk mem_cgroup_move_charge_walk = { 4865 struct mm_walk mem_cgroup_move_charge_walk = {
4858 .pmd_entry = mem_cgroup_move_charge_pte_range, 4866 .pmd_entry = mem_cgroup_move_charge_pte_range,
4859 .mm = mm, 4867 .mm = mc.mm,
4860 }; 4868 };
4861 4869
4862 lru_add_drain_all(); 4870 lru_add_drain_all();
@@ -4868,7 +4876,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
4868 atomic_inc(&mc.from->moving_account); 4876 atomic_inc(&mc.from->moving_account);
4869 synchronize_rcu(); 4877 synchronize_rcu();
4870retry: 4878retry:
4871 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 4879 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
4872 /* 4880 /*
4873 * Someone who are holding the mmap_sem might be waiting in 4881 * Someone who are holding the mmap_sem might be waiting in
4874 * waitq. So we cancel all extra charges, wake up all waiters, 4882 * waitq. So we cancel all extra charges, wake up all waiters,
@@ -4885,23 +4893,16 @@ retry:
4885 * additional charge, the page walk just aborts. 4893 * additional charge, the page walk just aborts.
4886 */ 4894 */
4887 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk); 4895 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
4888 up_read(&mm->mmap_sem); 4896 up_read(&mc.mm->mmap_sem);
4889 atomic_dec(&mc.from->moving_account); 4897 atomic_dec(&mc.from->moving_account);
4890} 4898}
4891 4899
4892static void mem_cgroup_move_task(struct cgroup_taskset *tset) 4900static void mem_cgroup_move_task(void)
4893{ 4901{
4894 struct cgroup_subsys_state *css; 4902 if (mc.to) {
4895 struct task_struct *p = cgroup_taskset_first(tset, &css); 4903 mem_cgroup_move_charge();
4896 struct mm_struct *mm = get_task_mm(p);
4897
4898 if (mm) {
4899 if (mc.to)
4900 mem_cgroup_move_charge(mm);
4901 mmput(mm);
4902 }
4903 if (mc.to)
4904 mem_cgroup_clear_mc(); 4904 mem_cgroup_clear_mc();
4905 }
4905} 4906}
4906#else /* !CONFIG_MMU */ 4907#else /* !CONFIG_MMU */
4907static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 4908static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -4911,7 +4912,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4911static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 4912static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4912{ 4913{
4913} 4914}
4914static void mem_cgroup_move_task(struct cgroup_taskset *tset) 4915static void mem_cgroup_move_task(void)
4915{ 4916{
4916} 4917}
4917#endif 4918#endif
@@ -5195,7 +5196,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
5195 .css_reset = mem_cgroup_css_reset, 5196 .css_reset = mem_cgroup_css_reset,
5196 .can_attach = mem_cgroup_can_attach, 5197 .can_attach = mem_cgroup_can_attach,
5197 .cancel_attach = mem_cgroup_cancel_attach, 5198 .cancel_attach = mem_cgroup_cancel_attach,
5198 .attach = mem_cgroup_move_task, 5199 .post_attach = mem_cgroup_move_task,
5199 .bind = mem_cgroup_bind, 5200 .bind = mem_cgroup_bind,
5200 .dfl_cftypes = memory_files, 5201 .dfl_cftypes = memory_files,
5201 .legacy_cftypes = mem_cgroup_legacy_files, 5202 .legacy_cftypes = mem_cgroup_legacy_files,
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 78f5f2641b91..ca5acee53b7a 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -888,7 +888,15 @@ int get_hwpoison_page(struct page *page)
888 } 888 }
889 } 889 }
890 890
891 return get_page_unless_zero(head); 891 if (get_page_unless_zero(head)) {
892 if (head == compound_head(page))
893 return 1;
894
895 pr_info("MCE: %#lx cannot catch tail\n", page_to_pfn(page));
896 put_page(head);
897 }
898
899 return 0;
892} 900}
893EXPORT_SYMBOL_GPL(get_hwpoison_page); 901EXPORT_SYMBOL_GPL(get_hwpoison_page);
894 902
diff --git a/mm/memory.c b/mm/memory.c
index 93897f23cc11..305537fc8640 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -789,6 +789,46 @@ out:
789 return pfn_to_page(pfn); 789 return pfn_to_page(pfn);
790} 790}
791 791
792#ifdef CONFIG_TRANSPARENT_HUGEPAGE
793struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
794 pmd_t pmd)
795{
796 unsigned long pfn = pmd_pfn(pmd);
797
798 /*
799 * There is no pmd_special() but there may be special pmds, e.g.
800 * in a direct-access (dax) mapping, so let's just replicate the
801 * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
802 */
803 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
804 if (vma->vm_flags & VM_MIXEDMAP) {
805 if (!pfn_valid(pfn))
806 return NULL;
807 goto out;
808 } else {
809 unsigned long off;
810 off = (addr - vma->vm_start) >> PAGE_SHIFT;
811 if (pfn == vma->vm_pgoff + off)
812 return NULL;
813 if (!is_cow_mapping(vma->vm_flags))
814 return NULL;
815 }
816 }
817
818 if (is_zero_pfn(pfn))
819 return NULL;
820 if (unlikely(pfn > highest_memmap_pfn))
821 return NULL;
822
823 /*
824 * NOTE! We still have PageReserved() pages in the page tables.
825 * eg. VDSO mappings can cause them to exist.
826 */
827out:
828 return pfn_to_page(pfn);
829}
830#endif
831
792/* 832/*
793 * copy one vm_area from one task to the other. Assumes the page tables 833 * copy one vm_area from one task to the other. Assumes the page tables
794 * already present in the new task to be cleared in the whole range 834 * already present in the new task to be cleared in the whole range
diff --git a/mm/migrate.c b/mm/migrate.c
index 6c822a7b27e0..f9dfb18a4eba 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -975,7 +975,13 @@ out:
975 dec_zone_page_state(page, NR_ISOLATED_ANON + 975 dec_zone_page_state(page, NR_ISOLATED_ANON +
976 page_is_file_cache(page)); 976 page_is_file_cache(page));
977 /* Soft-offlined page shouldn't go through lru cache list */ 977 /* Soft-offlined page shouldn't go through lru cache list */
978 if (reason == MR_MEMORY_FAILURE) { 978 if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
979 /*
980 * With this release, we free successfully migrated
981 * page and set PG_HWPoison on just freed page
982 * intentionally. Although it's rather weird, it's how
983 * HWPoison flag works at the moment.
984 */
979 put_page(page); 985 put_page(page);
980 if (!test_set_page_hwpoison(page)) 986 if (!test_set_page_hwpoison(page))
981 num_poisoned_pages_inc(); 987 num_poisoned_pages_inc();
diff --git a/mm/page_io.c b/mm/page_io.c
index cd92e3d67a32..985f23cfa79b 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -353,7 +353,11 @@ int swap_readpage(struct page *page)
353 353
354 ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); 354 ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
355 if (!ret) { 355 if (!ret) {
356 swap_slot_free_notify(page); 356 if (trylock_page(page)) {
357 swap_slot_free_notify(page);
358 unlock_page(page);
359 }
360
357 count_vm_event(PSWPIN); 361 count_vm_event(PSWPIN);
358 return 0; 362 return 0;
359 } 363 }
diff --git a/mm/swap.c b/mm/swap.c
index a0bc206b4ac6..03aacbcb013f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -728,6 +728,11 @@ void release_pages(struct page **pages, int nr, bool cold)
728 zone = NULL; 728 zone = NULL;
729 } 729 }
730 730
731 if (is_huge_zero_page(page)) {
732 put_huge_zero_page();
733 continue;
734 }
735
731 page = compound_head(page); 736 page = compound_head(page);
732 if (!put_page_testzero(page)) 737 if (!put_page_testzero(page))
733 continue; 738 continue;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b934223eaa45..142cb61f4822 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2553,7 +2553,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2553 sc->gfp_mask |= __GFP_HIGHMEM; 2553 sc->gfp_mask |= __GFP_HIGHMEM;
2554 2554
2555 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2555 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2556 requested_highidx, sc->nodemask) { 2556 gfp_zone(sc->gfp_mask), sc->nodemask) {
2557 enum zone_type classzone_idx; 2557 enum zone_type classzone_idx;
2558 2558
2559 if (!populated_zone(zone)) 2559 if (!populated_zone(zone))
@@ -3318,6 +3318,20 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order,
3318 /* Try to sleep for a short interval */ 3318 /* Try to sleep for a short interval */
3319 if (prepare_kswapd_sleep(pgdat, order, remaining, 3319 if (prepare_kswapd_sleep(pgdat, order, remaining,
3320 balanced_classzone_idx)) { 3320 balanced_classzone_idx)) {
3321 /*
3322 * Compaction records what page blocks it recently failed to
3323 * isolate pages from and skips them in the future scanning.
3324 * When kswapd is going to sleep, it is reasonable to assume
3325 * that pages and compaction may succeed so reset the cache.
3326 */
3327 reset_isolation_suitable(pgdat);
3328
3329 /*
3330 * We have freed the memory, now we should compact it to make
3331 * allocation of the requested order possible.
3332 */
3333 wakeup_kcompactd(pgdat, order, classzone_idx);
3334
3321 remaining = schedule_timeout(HZ/10); 3335 remaining = schedule_timeout(HZ/10);
3322 finish_wait(&pgdat->kswapd_wait, &wait); 3336 finish_wait(&pgdat->kswapd_wait, &wait);
3323 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 3337 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
@@ -3341,20 +3355,6 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order,
3341 */ 3355 */
3342 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); 3356 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
3343 3357
3344 /*
3345 * Compaction records what page blocks it recently failed to
3346 * isolate pages from and skips them in the future scanning.
3347 * When kswapd is going to sleep, it is reasonable to assume
3348 * that pages and compaction may succeed so reset the cache.
3349 */
3350 reset_isolation_suitable(pgdat);
3351
3352 /*
3353 * We have freed the memory, now we should compact it to make
3354 * allocation of the requested order possible.
3355 */
3356 wakeup_kcompactd(pgdat, order, classzone_idx);
3357
3358 if (!kthread_should_stop()) 3358 if (!kthread_should_stop())
3359 schedule(); 3359 schedule();
3360 3360
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 3315b9a598af..4026f198a734 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -32,10 +32,21 @@
32 32
33#include "bat_v_elp.h" 33#include "bat_v_elp.h"
34#include "bat_v_ogm.h" 34#include "bat_v_ogm.h"
35#include "hard-interface.h"
35#include "hash.h" 36#include "hash.h"
36#include "originator.h" 37#include "originator.h"
37#include "packet.h" 38#include "packet.h"
38 39
40static void batadv_v_iface_activate(struct batadv_hard_iface *hard_iface)
41{
42 /* B.A.T.M.A.N. V does not use any queuing mechanism, therefore it can
43 * set the interface as ACTIVE right away, without any risk of race
44 * condition
45 */
46 if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
47 hard_iface->if_status = BATADV_IF_ACTIVE;
48}
49
39static int batadv_v_iface_enable(struct batadv_hard_iface *hard_iface) 50static int batadv_v_iface_enable(struct batadv_hard_iface *hard_iface)
40{ 51{
41 int ret; 52 int ret;
@@ -274,6 +285,7 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1,
274 285
275static struct batadv_algo_ops batadv_batman_v __read_mostly = { 286static struct batadv_algo_ops batadv_batman_v __read_mostly = {
276 .name = "BATMAN_V", 287 .name = "BATMAN_V",
288 .bat_iface_activate = batadv_v_iface_activate,
277 .bat_iface_enable = batadv_v_iface_enable, 289 .bat_iface_enable = batadv_v_iface_enable,
278 .bat_iface_disable = batadv_v_iface_disable, 290 .bat_iface_disable = batadv_v_iface_disable,
279 .bat_iface_update_mac = batadv_v_iface_update_mac, 291 .bat_iface_update_mac = batadv_v_iface_update_mac,
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index e96d7c745b4a..3e6b2624f980 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -568,6 +568,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
568 * be sent to 568 * be sent to
569 * @bat_priv: the bat priv with all the soft interface information 569 * @bat_priv: the bat priv with all the soft interface information
570 * @ip_dst: ipv4 to look up in the DHT 570 * @ip_dst: ipv4 to look up in the DHT
571 * @vid: VLAN identifier
571 * 572 *
572 * An originator O is selected if and only if its DHT_ID value is one of three 573 * An originator O is selected if and only if its DHT_ID value is one of three
573 * closest values (from the LEFT, with wrap around if needed) then the hash 574 * closest values (from the LEFT, with wrap around if needed) then the hash
@@ -576,7 +577,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
576 * Return: the candidate array of size BATADV_DAT_CANDIDATE_NUM. 577 * Return: the candidate array of size BATADV_DAT_CANDIDATE_NUM.
577 */ 578 */
578static struct batadv_dat_candidate * 579static struct batadv_dat_candidate *
579batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst) 580batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
581 unsigned short vid)
580{ 582{
581 int select; 583 int select;
582 batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key; 584 batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
@@ -592,7 +594,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
592 return NULL; 594 return NULL;
593 595
594 dat.ip = ip_dst; 596 dat.ip = ip_dst;
595 dat.vid = 0; 597 dat.vid = vid;
596 ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat, 598 ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
597 BATADV_DAT_ADDR_MAX); 599 BATADV_DAT_ADDR_MAX);
598 600
@@ -612,6 +614,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
612 * @bat_priv: the bat priv with all the soft interface information 614 * @bat_priv: the bat priv with all the soft interface information
613 * @skb: payload to send 615 * @skb: payload to send
614 * @ip: the DHT key 616 * @ip: the DHT key
617 * @vid: VLAN identifier
615 * @packet_subtype: unicast4addr packet subtype to use 618 * @packet_subtype: unicast4addr packet subtype to use
616 * 619 *
617 * This function copies the skb with pskb_copy() and is sent as unicast packet 620 * This function copies the skb with pskb_copy() and is sent as unicast packet
@@ -622,7 +625,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
622 */ 625 */
623static bool batadv_dat_send_data(struct batadv_priv *bat_priv, 626static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
624 struct sk_buff *skb, __be32 ip, 627 struct sk_buff *skb, __be32 ip,
625 int packet_subtype) 628 unsigned short vid, int packet_subtype)
626{ 629{
627 int i; 630 int i;
628 bool ret = false; 631 bool ret = false;
@@ -631,7 +634,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
631 struct sk_buff *tmp_skb; 634 struct sk_buff *tmp_skb;
632 struct batadv_dat_candidate *cand; 635 struct batadv_dat_candidate *cand;
633 636
634 cand = batadv_dat_select_candidates(bat_priv, ip); 637 cand = batadv_dat_select_candidates(bat_priv, ip, vid);
635 if (!cand) 638 if (!cand)
636 goto out; 639 goto out;
637 640
@@ -1022,7 +1025,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
1022 ret = true; 1025 ret = true;
1023 } else { 1026 } else {
1024 /* Send the request to the DHT */ 1027 /* Send the request to the DHT */
1025 ret = batadv_dat_send_data(bat_priv, skb, ip_dst, 1028 ret = batadv_dat_send_data(bat_priv, skb, ip_dst, vid,
1026 BATADV_P_DAT_DHT_GET); 1029 BATADV_P_DAT_DHT_GET);
1027 } 1030 }
1028out: 1031out:
@@ -1150,8 +1153,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
1150 /* Send the ARP reply to the candidates for both the IP addresses that 1153 /* Send the ARP reply to the candidates for both the IP addresses that
1151 * the node obtained from the ARP reply 1154 * the node obtained from the ARP reply
1152 */ 1155 */
1153 batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT); 1156 batadv_dat_send_data(bat_priv, skb, ip_src, vid, BATADV_P_DAT_DHT_PUT);
1154 batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT); 1157 batadv_dat_send_data(bat_priv, skb, ip_dst, vid, BATADV_P_DAT_DHT_PUT);
1155} 1158}
1156 1159
1157/** 1160/**
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index b22b2775a0a5..0a7deaf2670a 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -407,6 +407,9 @@ batadv_hardif_activate_interface(struct batadv_hard_iface *hard_iface)
407 407
408 batadv_update_min_mtu(hard_iface->soft_iface); 408 batadv_update_min_mtu(hard_iface->soft_iface);
409 409
410 if (bat_priv->bat_algo_ops->bat_iface_activate)
411 bat_priv->bat_algo_ops->bat_iface_activate(hard_iface);
412
410out: 413out:
411 if (primary_if) 414 if (primary_if)
412 batadv_hardif_put(primary_if); 415 batadv_hardif_put(primary_if);
@@ -572,8 +575,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
572 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 575 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
573 struct batadv_hard_iface *primary_if = NULL; 576 struct batadv_hard_iface *primary_if = NULL;
574 577
575 if (hard_iface->if_status == BATADV_IF_ACTIVE) 578 batadv_hardif_deactivate_interface(hard_iface);
576 batadv_hardif_deactivate_interface(hard_iface);
577 579
578 if (hard_iface->if_status != BATADV_IF_INACTIVE) 580 if (hard_iface->if_status != BATADV_IF_INACTIVE)
579 goto out; 581 goto out;
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index e4cbb0753e37..c355a824713c 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -250,7 +250,6 @@ static void batadv_neigh_node_release(struct kref *ref)
250{ 250{
251 struct hlist_node *node_tmp; 251 struct hlist_node *node_tmp;
252 struct batadv_neigh_node *neigh_node; 252 struct batadv_neigh_node *neigh_node;
253 struct batadv_hardif_neigh_node *hardif_neigh;
254 struct batadv_neigh_ifinfo *neigh_ifinfo; 253 struct batadv_neigh_ifinfo *neigh_ifinfo;
255 struct batadv_algo_ops *bao; 254 struct batadv_algo_ops *bao;
256 255
@@ -262,13 +261,7 @@ static void batadv_neigh_node_release(struct kref *ref)
262 batadv_neigh_ifinfo_put(neigh_ifinfo); 261 batadv_neigh_ifinfo_put(neigh_ifinfo);
263 } 262 }
264 263
265 hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming, 264 batadv_hardif_neigh_put(neigh_node->hardif_neigh);
266 neigh_node->addr);
267 if (hardif_neigh) {
268 /* batadv_hardif_neigh_get() increases refcount too */
269 batadv_hardif_neigh_put(hardif_neigh);
270 batadv_hardif_neigh_put(hardif_neigh);
271 }
272 265
273 if (bao->bat_neigh_free) 266 if (bao->bat_neigh_free)
274 bao->bat_neigh_free(neigh_node); 267 bao->bat_neigh_free(neigh_node);
@@ -663,6 +656,11 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node,
663 ether_addr_copy(neigh_node->addr, neigh_addr); 656 ether_addr_copy(neigh_node->addr, neigh_addr);
664 neigh_node->if_incoming = hard_iface; 657 neigh_node->if_incoming = hard_iface;
665 neigh_node->orig_node = orig_node; 658 neigh_node->orig_node = orig_node;
659 neigh_node->last_seen = jiffies;
660
661 /* increment unique neighbor refcount */
662 kref_get(&hardif_neigh->refcount);
663 neigh_node->hardif_neigh = hardif_neigh;
666 664
667 /* extra reference for return */ 665 /* extra reference for return */
668 kref_init(&neigh_node->refcount); 666 kref_init(&neigh_node->refcount);
@@ -672,9 +670,6 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node,
672 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); 670 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
673 spin_unlock_bh(&orig_node->neigh_list_lock); 671 spin_unlock_bh(&orig_node->neigh_list_lock);
674 672
675 /* increment unique neighbor refcount */
676 kref_get(&hardif_neigh->refcount);
677
678 batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv, 673 batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
679 "Creating new neighbor %pM for orig_node %pM on interface %s\n", 674 "Creating new neighbor %pM for orig_node %pM on interface %s\n",
680 neigh_addr, orig_node->orig, hard_iface->net_dev->name); 675 neigh_addr, orig_node->orig, hard_iface->net_dev->name);
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 4dd646a52f1a..b781bf753250 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -105,6 +105,15 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
105 neigh_node = NULL; 105 neigh_node = NULL;
106 106
107 spin_lock_bh(&orig_node->neigh_list_lock); 107 spin_lock_bh(&orig_node->neigh_list_lock);
108 /* curr_router used earlier may not be the current orig_ifinfo->router
109 * anymore because it was dereferenced outside of the neigh_list_lock
110 * protected region. After the new best neighbor has replace the current
111 * best neighbor the reference counter needs to decrease. Consequently,
112 * the code needs to ensure the curr_router variable contains a pointer
113 * to the replaced best neighbor.
114 */
115 curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
116
108 rcu_assign_pointer(orig_ifinfo->router, neigh_node); 117 rcu_assign_pointer(orig_ifinfo->router, neigh_node);
109 spin_unlock_bh(&orig_node->neigh_list_lock); 118 spin_unlock_bh(&orig_node->neigh_list_lock);
110 batadv_orig_ifinfo_put(orig_ifinfo); 119 batadv_orig_ifinfo_put(orig_ifinfo);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 3ce06e0a91b1..76417850d3fc 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -675,6 +675,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
675 675
676 if (pending) { 676 if (pending) {
677 hlist_del(&forw_packet->list); 677 hlist_del(&forw_packet->list);
678 if (!forw_packet->own)
679 atomic_inc(&bat_priv->bcast_queue_left);
680
678 batadv_forw_packet_free(forw_packet); 681 batadv_forw_packet_free(forw_packet);
679 } 682 }
680 } 683 }
@@ -702,6 +705,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
702 705
703 if (pending) { 706 if (pending) {
704 hlist_del(&forw_packet->list); 707 hlist_del(&forw_packet->list);
708 if (!forw_packet->own)
709 atomic_inc(&bat_priv->batman_queue_left);
710
705 batadv_forw_packet_free(forw_packet); 711 batadv_forw_packet_free(forw_packet);
706 } 712 }
707 } 713 }
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 0710379491bf..8a136b6a1ff0 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -408,11 +408,17 @@ void batadv_interface_rx(struct net_device *soft_iface,
408 */ 408 */
409 nf_reset(skb); 409 nf_reset(skb);
410 410
411 if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
412 goto dropped;
413
411 vid = batadv_get_vid(skb, 0); 414 vid = batadv_get_vid(skb, 0);
412 ethhdr = eth_hdr(skb); 415 ethhdr = eth_hdr(skb);
413 416
414 switch (ntohs(ethhdr->h_proto)) { 417 switch (ntohs(ethhdr->h_proto)) {
415 case ETH_P_8021Q: 418 case ETH_P_8021Q:
419 if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
420 goto dropped;
421
416 vhdr = (struct vlan_ethhdr *)skb->data; 422 vhdr = (struct vlan_ethhdr *)skb->data;
417 423
418 if (vhdr->h_vlan_encapsulated_proto != ethertype) 424 if (vhdr->h_vlan_encapsulated_proto != ethertype)
@@ -424,8 +430,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
424 } 430 }
425 431
426 /* skb->dev & skb->pkt_type are set here */ 432 /* skb->dev & skb->pkt_type are set here */
427 if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
428 goto dropped;
429 skb->protocol = eth_type_trans(skb, soft_iface); 433 skb->protocol = eth_type_trans(skb, soft_iface);
430 434
431 /* should not be necessary anymore as we use skb_pull_rcsum() 435 /* should not be necessary anymore as we use skb_pull_rcsum()
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 0b43e86328a5..9b4551a86535 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -215,6 +215,8 @@ static void batadv_tt_local_entry_release(struct kref *ref)
215 tt_local_entry = container_of(ref, struct batadv_tt_local_entry, 215 tt_local_entry = container_of(ref, struct batadv_tt_local_entry,
216 common.refcount); 216 common.refcount);
217 217
218 batadv_softif_vlan_put(tt_local_entry->vlan);
219
218 kfree_rcu(tt_local_entry, common.rcu); 220 kfree_rcu(tt_local_entry, common.rcu);
219} 221}
220 222
@@ -673,6 +675,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
673 kref_get(&tt_local->common.refcount); 675 kref_get(&tt_local->common.refcount);
674 tt_local->last_seen = jiffies; 676 tt_local->last_seen = jiffies;
675 tt_local->common.added_at = tt_local->last_seen; 677 tt_local->common.added_at = tt_local->last_seen;
678 tt_local->vlan = vlan;
676 679
677 /* the batman interface mac and multicast addresses should never be 680 /* the batman interface mac and multicast addresses should never be
678 * purged 681 * purged
@@ -991,7 +994,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
991 struct batadv_tt_common_entry *tt_common_entry; 994 struct batadv_tt_common_entry *tt_common_entry;
992 struct batadv_tt_local_entry *tt_local; 995 struct batadv_tt_local_entry *tt_local;
993 struct batadv_hard_iface *primary_if; 996 struct batadv_hard_iface *primary_if;
994 struct batadv_softif_vlan *vlan;
995 struct hlist_head *head; 997 struct hlist_head *head;
996 unsigned short vid; 998 unsigned short vid;
997 u32 i; 999 u32 i;
@@ -1027,14 +1029,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
1027 last_seen_msecs = last_seen_msecs % 1000; 1029 last_seen_msecs = last_seen_msecs % 1000;
1028 1030
1029 no_purge = tt_common_entry->flags & np_flag; 1031 no_purge = tt_common_entry->flags & np_flag;
1030
1031 vlan = batadv_softif_vlan_get(bat_priv, vid);
1032 if (!vlan) {
1033 seq_printf(seq, "Cannot retrieve VLAN %d\n",
1034 BATADV_PRINT_VID(vid));
1035 continue;
1036 }
1037
1038 seq_printf(seq, 1032 seq_printf(seq,
1039 " * %pM %4i [%c%c%c%c%c%c] %3u.%03u (%#.8x)\n", 1033 " * %pM %4i [%c%c%c%c%c%c] %3u.%03u (%#.8x)\n",
1040 tt_common_entry->addr, 1034 tt_common_entry->addr,
@@ -1052,9 +1046,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
1052 BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'), 1046 BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'),
1053 no_purge ? 0 : last_seen_secs, 1047 no_purge ? 0 : last_seen_secs,
1054 no_purge ? 0 : last_seen_msecs, 1048 no_purge ? 0 : last_seen_msecs,
1055 vlan->tt.crc); 1049 tt_local->vlan->tt.crc);
1056
1057 batadv_softif_vlan_put(vlan);
1058 } 1050 }
1059 rcu_read_unlock(); 1051 rcu_read_unlock();
1060 } 1052 }
@@ -1099,7 +1091,6 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
1099{ 1091{
1100 struct batadv_tt_local_entry *tt_local_entry; 1092 struct batadv_tt_local_entry *tt_local_entry;
1101 u16 flags, curr_flags = BATADV_NO_FLAGS; 1093 u16 flags, curr_flags = BATADV_NO_FLAGS;
1102 struct batadv_softif_vlan *vlan;
1103 void *tt_entry_exists; 1094 void *tt_entry_exists;
1104 1095
1105 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); 1096 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
@@ -1139,14 +1130,6 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
1139 /* extra call to free the local tt entry */ 1130 /* extra call to free the local tt entry */
1140 batadv_tt_local_entry_put(tt_local_entry); 1131 batadv_tt_local_entry_put(tt_local_entry);
1141 1132
1142 /* decrease the reference held for this vlan */
1143 vlan = batadv_softif_vlan_get(bat_priv, vid);
1144 if (!vlan)
1145 goto out;
1146
1147 batadv_softif_vlan_put(vlan);
1148 batadv_softif_vlan_put(vlan);
1149
1150out: 1133out:
1151 if (tt_local_entry) 1134 if (tt_local_entry)
1152 batadv_tt_local_entry_put(tt_local_entry); 1135 batadv_tt_local_entry_put(tt_local_entry);
@@ -1219,7 +1202,6 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
1219 spinlock_t *list_lock; /* protects write access to the hash lists */ 1202 spinlock_t *list_lock; /* protects write access to the hash lists */
1220 struct batadv_tt_common_entry *tt_common_entry; 1203 struct batadv_tt_common_entry *tt_common_entry;
1221 struct batadv_tt_local_entry *tt_local; 1204 struct batadv_tt_local_entry *tt_local;
1222 struct batadv_softif_vlan *vlan;
1223 struct hlist_node *node_tmp; 1205 struct hlist_node *node_tmp;
1224 struct hlist_head *head; 1206 struct hlist_head *head;
1225 u32 i; 1207 u32 i;
@@ -1241,14 +1223,6 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
1241 struct batadv_tt_local_entry, 1223 struct batadv_tt_local_entry,
1242 common); 1224 common);
1243 1225
1244 /* decrease the reference held for this vlan */
1245 vlan = batadv_softif_vlan_get(bat_priv,
1246 tt_common_entry->vid);
1247 if (vlan) {
1248 batadv_softif_vlan_put(vlan);
1249 batadv_softif_vlan_put(vlan);
1250 }
1251
1252 batadv_tt_local_entry_put(tt_local); 1226 batadv_tt_local_entry_put(tt_local);
1253 } 1227 }
1254 spin_unlock_bh(list_lock); 1228 spin_unlock_bh(list_lock);
@@ -3309,7 +3283,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
3309 struct batadv_hashtable *hash = bat_priv->tt.local_hash; 3283 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
3310 struct batadv_tt_common_entry *tt_common; 3284 struct batadv_tt_common_entry *tt_common;
3311 struct batadv_tt_local_entry *tt_local; 3285 struct batadv_tt_local_entry *tt_local;
3312 struct batadv_softif_vlan *vlan;
3313 struct hlist_node *node_tmp; 3286 struct hlist_node *node_tmp;
3314 struct hlist_head *head; 3287 struct hlist_head *head;
3315 spinlock_t *list_lock; /* protects write access to the hash lists */ 3288 spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -3339,13 +3312,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
3339 struct batadv_tt_local_entry, 3312 struct batadv_tt_local_entry,
3340 common); 3313 common);
3341 3314
3342 /* decrease the reference held for this vlan */
3343 vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
3344 if (vlan) {
3345 batadv_softif_vlan_put(vlan);
3346 batadv_softif_vlan_put(vlan);
3347 }
3348
3349 batadv_tt_local_entry_put(tt_local); 3315 batadv_tt_local_entry_put(tt_local);
3350 } 3316 }
3351 spin_unlock_bh(list_lock); 3317 spin_unlock_bh(list_lock);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 9abfb3e73c34..1e47fbe8bb7b 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -433,6 +433,7 @@ struct batadv_hardif_neigh_node {
433 * @ifinfo_lock: lock protecting private ifinfo members and list 433 * @ifinfo_lock: lock protecting private ifinfo members and list
434 * @if_incoming: pointer to incoming hard-interface 434 * @if_incoming: pointer to incoming hard-interface
435 * @last_seen: when last packet via this neighbor was received 435 * @last_seen: when last packet via this neighbor was received
436 * @hardif_neigh: hardif_neigh of this neighbor
436 * @refcount: number of contexts the object is used 437 * @refcount: number of contexts the object is used
437 * @rcu: struct used for freeing in an RCU-safe manner 438 * @rcu: struct used for freeing in an RCU-safe manner
438 */ 439 */
@@ -444,6 +445,7 @@ struct batadv_neigh_node {
444 spinlock_t ifinfo_lock; /* protects ifinfo_list and its members */ 445 spinlock_t ifinfo_lock; /* protects ifinfo_list and its members */
445 struct batadv_hard_iface *if_incoming; 446 struct batadv_hard_iface *if_incoming;
446 unsigned long last_seen; 447 unsigned long last_seen;
448 struct batadv_hardif_neigh_node *hardif_neigh;
447 struct kref refcount; 449 struct kref refcount;
448 struct rcu_head rcu; 450 struct rcu_head rcu;
449}; 451};
@@ -1073,10 +1075,12 @@ struct batadv_tt_common_entry {
1073 * struct batadv_tt_local_entry - translation table local entry data 1075 * struct batadv_tt_local_entry - translation table local entry data
1074 * @common: general translation table data 1076 * @common: general translation table data
1075 * @last_seen: timestamp used for purging stale tt local entries 1077 * @last_seen: timestamp used for purging stale tt local entries
1078 * @vlan: soft-interface vlan of the entry
1076 */ 1079 */
1077struct batadv_tt_local_entry { 1080struct batadv_tt_local_entry {
1078 struct batadv_tt_common_entry common; 1081 struct batadv_tt_common_entry common;
1079 unsigned long last_seen; 1082 unsigned long last_seen;
1083 struct batadv_softif_vlan *vlan;
1080}; 1084};
1081 1085
1082/** 1086/**
@@ -1250,6 +1254,8 @@ struct batadv_forw_packet {
1250 * struct batadv_algo_ops - mesh algorithm callbacks 1254 * struct batadv_algo_ops - mesh algorithm callbacks
1251 * @list: list node for the batadv_algo_list 1255 * @list: list node for the batadv_algo_list
1252 * @name: name of the algorithm 1256 * @name: name of the algorithm
1257 * @bat_iface_activate: start routing mechanisms when hard-interface is brought
1258 * up
1253 * @bat_iface_enable: init routing info when hard-interface is enabled 1259 * @bat_iface_enable: init routing info when hard-interface is enabled
1254 * @bat_iface_disable: de-init routing info when hard-interface is disabled 1260 * @bat_iface_disable: de-init routing info when hard-interface is disabled
1255 * @bat_iface_update_mac: (re-)init mac addresses of the protocol information 1261 * @bat_iface_update_mac: (re-)init mac addresses of the protocol information
@@ -1277,6 +1283,7 @@ struct batadv_forw_packet {
1277struct batadv_algo_ops { 1283struct batadv_algo_ops {
1278 struct hlist_node list; 1284 struct hlist_node list;
1279 char *name; 1285 char *name;
1286 void (*bat_iface_activate)(struct batadv_hard_iface *hard_iface);
1280 int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface); 1287 int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface);
1281 void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface); 1288 void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface);
1282 void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface); 1289 void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface);
diff --git a/net/ceph/auth.c b/net/ceph/auth.c
index 6b923bcaa2a4..2bc5965fdd1e 100644
--- a/net/ceph/auth.c
+++ b/net/ceph/auth.c
@@ -293,13 +293,9 @@ int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
293} 293}
294EXPORT_SYMBOL(ceph_auth_create_authorizer); 294EXPORT_SYMBOL(ceph_auth_create_authorizer);
295 295
296void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, 296void ceph_auth_destroy_authorizer(struct ceph_authorizer *a)
297 struct ceph_authorizer *a)
298{ 297{
299 mutex_lock(&ac->mutex); 298 a->destroy(a);
300 if (ac->ops && ac->ops->destroy_authorizer)
301 ac->ops->destroy_authorizer(ac, a);
302 mutex_unlock(&ac->mutex);
303} 299}
304EXPORT_SYMBOL(ceph_auth_destroy_authorizer); 300EXPORT_SYMBOL(ceph_auth_destroy_authorizer);
305 301
diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c
index 8c93fa8d81bc..5f836f02ae36 100644
--- a/net/ceph/auth_none.c
+++ b/net/ceph/auth_none.c
@@ -16,7 +16,6 @@ static void reset(struct ceph_auth_client *ac)
16 struct ceph_auth_none_info *xi = ac->private; 16 struct ceph_auth_none_info *xi = ac->private;
17 17
18 xi->starting = true; 18 xi->starting = true;
19 xi->built_authorizer = false;
20} 19}
21 20
22static void destroy(struct ceph_auth_client *ac) 21static void destroy(struct ceph_auth_client *ac)
@@ -39,6 +38,27 @@ static int should_authenticate(struct ceph_auth_client *ac)
39 return xi->starting; 38 return xi->starting;
40} 39}
41 40
41static int ceph_auth_none_build_authorizer(struct ceph_auth_client *ac,
42 struct ceph_none_authorizer *au)
43{
44 void *p = au->buf;
45 void *const end = p + sizeof(au->buf);
46 int ret;
47
48 ceph_encode_8_safe(&p, end, 1, e_range);
49 ret = ceph_entity_name_encode(ac->name, &p, end);
50 if (ret < 0)
51 return ret;
52
53 ceph_encode_64_safe(&p, end, ac->global_id, e_range);
54 au->buf_len = p - (void *)au->buf;
55 dout("%s built authorizer len %d\n", __func__, au->buf_len);
56 return 0;
57
58e_range:
59 return -ERANGE;
60}
61
42static int build_request(struct ceph_auth_client *ac, void *buf, void *end) 62static int build_request(struct ceph_auth_client *ac, void *buf, void *end)
43{ 63{
44 return 0; 64 return 0;
@@ -57,32 +77,32 @@ static int handle_reply(struct ceph_auth_client *ac, int result,
57 return result; 77 return result;
58} 78}
59 79
80static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a)
81{
82 kfree(a);
83}
84
60/* 85/*
61 * build an 'authorizer' with our entity_name and global_id. we can 86 * build an 'authorizer' with our entity_name and global_id. it is
62 * reuse a single static copy since it is identical for all services 87 * identical for all services we connect to.
63 * we connect to.
64 */ 88 */
65static int ceph_auth_none_create_authorizer( 89static int ceph_auth_none_create_authorizer(
66 struct ceph_auth_client *ac, int peer_type, 90 struct ceph_auth_client *ac, int peer_type,
67 struct ceph_auth_handshake *auth) 91 struct ceph_auth_handshake *auth)
68{ 92{
69 struct ceph_auth_none_info *ai = ac->private; 93 struct ceph_none_authorizer *au;
70 struct ceph_none_authorizer *au = &ai->au;
71 void *p, *end;
72 int ret; 94 int ret;
73 95
74 if (!ai->built_authorizer) { 96 au = kmalloc(sizeof(*au), GFP_NOFS);
75 p = au->buf; 97 if (!au)
76 end = p + sizeof(au->buf); 98 return -ENOMEM;
77 ceph_encode_8(&p, 1); 99
78 ret = ceph_entity_name_encode(ac->name, &p, end - 8); 100 au->base.destroy = ceph_auth_none_destroy_authorizer;
79 if (ret < 0) 101
80 goto bad; 102 ret = ceph_auth_none_build_authorizer(ac, au);
81 ceph_decode_need(&p, end, sizeof(u64), bad2); 103 if (ret) {
82 ceph_encode_64(&p, ac->global_id); 104 kfree(au);
83 au->buf_len = p - (void *)au->buf; 105 return ret;
84 ai->built_authorizer = true;
85 dout("built authorizer len %d\n", au->buf_len);
86 } 106 }
87 107
88 auth->authorizer = (struct ceph_authorizer *) au; 108 auth->authorizer = (struct ceph_authorizer *) au;
@@ -92,17 +112,6 @@ static int ceph_auth_none_create_authorizer(
92 auth->authorizer_reply_buf_len = sizeof (au->reply_buf); 112 auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
93 113
94 return 0; 114 return 0;
95
96bad2:
97 ret = -ERANGE;
98bad:
99 return ret;
100}
101
102static void ceph_auth_none_destroy_authorizer(struct ceph_auth_client *ac,
103 struct ceph_authorizer *a)
104{
105 /* nothing to do */
106} 115}
107 116
108static const struct ceph_auth_client_ops ceph_auth_none_ops = { 117static const struct ceph_auth_client_ops ceph_auth_none_ops = {
@@ -114,7 +123,6 @@ static const struct ceph_auth_client_ops ceph_auth_none_ops = {
114 .build_request = build_request, 123 .build_request = build_request,
115 .handle_reply = handle_reply, 124 .handle_reply = handle_reply,
116 .create_authorizer = ceph_auth_none_create_authorizer, 125 .create_authorizer = ceph_auth_none_create_authorizer,
117 .destroy_authorizer = ceph_auth_none_destroy_authorizer,
118}; 126};
119 127
120int ceph_auth_none_init(struct ceph_auth_client *ac) 128int ceph_auth_none_init(struct ceph_auth_client *ac)
@@ -127,7 +135,6 @@ int ceph_auth_none_init(struct ceph_auth_client *ac)
127 return -ENOMEM; 135 return -ENOMEM;
128 136
129 xi->starting = true; 137 xi->starting = true;
130 xi->built_authorizer = false;
131 138
132 ac->protocol = CEPH_AUTH_NONE; 139 ac->protocol = CEPH_AUTH_NONE;
133 ac->private = xi; 140 ac->private = xi;
diff --git a/net/ceph/auth_none.h b/net/ceph/auth_none.h
index 059a3ce4b53f..62021535ae4a 100644
--- a/net/ceph/auth_none.h
+++ b/net/ceph/auth_none.h
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14struct ceph_none_authorizer { 14struct ceph_none_authorizer {
15 struct ceph_authorizer base;
15 char buf[128]; 16 char buf[128];
16 int buf_len; 17 int buf_len;
17 char reply_buf[0]; 18 char reply_buf[0];
@@ -19,8 +20,6 @@ struct ceph_none_authorizer {
19 20
20struct ceph_auth_none_info { 21struct ceph_auth_none_info {
21 bool starting; 22 bool starting;
22 bool built_authorizer;
23 struct ceph_none_authorizer au; /* we only need one; it's static */
24}; 23};
25 24
26int ceph_auth_none_init(struct ceph_auth_client *ac); 25int ceph_auth_none_init(struct ceph_auth_client *ac);
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 9e43a315e662..a0905f04bd13 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -565,6 +565,14 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
565 return -EAGAIN; 565 return -EAGAIN;
566} 566}
567 567
568static void ceph_x_destroy_authorizer(struct ceph_authorizer *a)
569{
570 struct ceph_x_authorizer *au = (void *)a;
571
572 ceph_x_authorizer_cleanup(au);
573 kfree(au);
574}
575
568static int ceph_x_create_authorizer( 576static int ceph_x_create_authorizer(
569 struct ceph_auth_client *ac, int peer_type, 577 struct ceph_auth_client *ac, int peer_type,
570 struct ceph_auth_handshake *auth) 578 struct ceph_auth_handshake *auth)
@@ -581,6 +589,8 @@ static int ceph_x_create_authorizer(
581 if (!au) 589 if (!au)
582 return -ENOMEM; 590 return -ENOMEM;
583 591
592 au->base.destroy = ceph_x_destroy_authorizer;
593
584 ret = ceph_x_build_authorizer(ac, th, au); 594 ret = ceph_x_build_authorizer(ac, th, au);
585 if (ret) { 595 if (ret) {
586 kfree(au); 596 kfree(au);
@@ -643,16 +653,6 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
643 return ret; 653 return ret;
644} 654}
645 655
646static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac,
647 struct ceph_authorizer *a)
648{
649 struct ceph_x_authorizer *au = (void *)a;
650
651 ceph_x_authorizer_cleanup(au);
652 kfree(au);
653}
654
655
656static void ceph_x_reset(struct ceph_auth_client *ac) 656static void ceph_x_reset(struct ceph_auth_client *ac)
657{ 657{
658 struct ceph_x_info *xi = ac->private; 658 struct ceph_x_info *xi = ac->private;
@@ -770,7 +770,6 @@ static const struct ceph_auth_client_ops ceph_x_ops = {
770 .create_authorizer = ceph_x_create_authorizer, 770 .create_authorizer = ceph_x_create_authorizer,
771 .update_authorizer = ceph_x_update_authorizer, 771 .update_authorizer = ceph_x_update_authorizer,
772 .verify_authorizer_reply = ceph_x_verify_authorizer_reply, 772 .verify_authorizer_reply = ceph_x_verify_authorizer_reply,
773 .destroy_authorizer = ceph_x_destroy_authorizer,
774 .invalidate_authorizer = ceph_x_invalidate_authorizer, 773 .invalidate_authorizer = ceph_x_invalidate_authorizer,
775 .reset = ceph_x_reset, 774 .reset = ceph_x_reset,
776 .destroy = ceph_x_destroy, 775 .destroy = ceph_x_destroy,
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
index 40b1a3cf7397..21a5af904bae 100644
--- a/net/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -26,6 +26,7 @@ struct ceph_x_ticket_handler {
26 26
27 27
28struct ceph_x_authorizer { 28struct ceph_x_authorizer {
29 struct ceph_authorizer base;
29 struct ceph_crypto_key session_key; 30 struct ceph_crypto_key session_key;
30 struct ceph_buffer *buf; 31 struct ceph_buffer *buf;
31 unsigned int service; 32 unsigned int service;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 32355d9d0103..40a53a70efdf 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1087,10 +1087,8 @@ static void put_osd(struct ceph_osd *osd)
1087 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), 1087 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
1088 atomic_read(&osd->o_ref) - 1); 1088 atomic_read(&osd->o_ref) - 1);
1089 if (atomic_dec_and_test(&osd->o_ref)) { 1089 if (atomic_dec_and_test(&osd->o_ref)) {
1090 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
1091
1092 if (osd->o_auth.authorizer) 1090 if (osd->o_auth.authorizer)
1093 ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); 1091 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1094 kfree(osd); 1092 kfree(osd);
1095 } 1093 }
1096} 1094}
@@ -2984,7 +2982,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
2984 struct ceph_auth_handshake *auth = &o->o_auth; 2982 struct ceph_auth_handshake *auth = &o->o_auth;
2985 2983
2986 if (force_new && auth->authorizer) { 2984 if (force_new && auth->authorizer) {
2987 ceph_auth_destroy_authorizer(ac, auth->authorizer); 2985 ceph_auth_destroy_authorizer(auth->authorizer);
2988 auth->authorizer = NULL; 2986 auth->authorizer = NULL;
2989 } 2987 }
2990 if (!auth->authorizer) { 2988 if (!auth->authorizer) {
diff --git a/net/core/dev.c b/net/core/dev.c
index d91dfbec0fc6..673d1f118bfb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2815,7 +2815,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
2815 2815
2816 if (skb->ip_summed != CHECKSUM_NONE && 2816 if (skb->ip_summed != CHECKSUM_NONE &&
2817 !can_checksum_protocol(features, type)) { 2817 !can_checksum_protocol(features, type)) {
2818 features &= ~NETIF_F_CSUM_MASK; 2818 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2819 } else if (illegal_highdma(skb->dev, skb)) { 2819 } else if (illegal_highdma(skb->dev, skb)) {
2820 features &= ~NETIF_F_SG; 2820 features &= ~NETIF_F_SG;
2821 } 2821 }
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 3177211ab651..77c20a489218 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -438,6 +438,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
438 const struct sock *sk2, 438 const struct sock *sk2,
439 bool match_wildcard)) 439 bool match_wildcard))
440{ 440{
441 struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
441 struct sock *sk2; 442 struct sock *sk2;
442 kuid_t uid = sock_i_uid(sk); 443 kuid_t uid = sock_i_uid(sk);
443 444
@@ -446,6 +447,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
446 sk2->sk_family == sk->sk_family && 447 sk2->sk_family == sk->sk_family &&
447 ipv6_only_sock(sk2) == ipv6_only_sock(sk) && 448 ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
448 sk2->sk_bound_dev_if == sk->sk_bound_dev_if && 449 sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
450 inet_csk(sk2)->icsk_bind_hash == tb &&
449 sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && 451 sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
450 saddr_same(sk, sk2, false)) 452 saddr_same(sk, sk2, false))
451 return reuseport_add_sock(sk, sk2); 453 return reuseport_add_sock(sk, sk2);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 2480d79b0e37..b99213c46aac 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -369,7 +369,8 @@ static struct rtable *gre_get_rt(struct sk_buff *skb,
369 return ip_route_output_key(net, fl); 369 return ip_route_output_key(net, fl);
370} 370}
371 371
372static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) 372static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
373 __be16 proto)
373{ 374{
374 struct ip_tunnel_info *tun_info; 375 struct ip_tunnel_info *tun_info;
375 const struct ip_tunnel_key *key; 376 const struct ip_tunnel_key *key;
@@ -418,7 +419,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
418 goto err_free_rt; 419 goto err_free_rt;
419 420
420 flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY); 421 flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
421 gre_build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB), 422 gre_build_header(skb, tunnel_hlen, flags, proto,
422 tunnel_id_to_key(tun_info->key.tun_id), 0); 423 tunnel_id_to_key(tun_info->key.tun_id), 0);
423 424
424 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 425 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
@@ -459,7 +460,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
459 const struct iphdr *tnl_params; 460 const struct iphdr *tnl_params;
460 461
461 if (tunnel->collect_md) { 462 if (tunnel->collect_md) {
462 gre_fb_xmit(skb, dev); 463 gre_fb_xmit(skb, dev, skb->protocol);
463 return NETDEV_TX_OK; 464 return NETDEV_TX_OK;
464 } 465 }
465 466
@@ -501,7 +502,7 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
501 struct ip_tunnel *tunnel = netdev_priv(dev); 502 struct ip_tunnel *tunnel = netdev_priv(dev);
502 503
503 if (tunnel->collect_md) { 504 if (tunnel->collect_md) {
504 gre_fb_xmit(skb, dev); 505 gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
505 return NETDEV_TX_OK; 506 return NETDEV_TX_OK;
506 } 507 }
507 508
@@ -732,7 +733,7 @@ static int ipgre_tunnel_init(struct net_device *dev)
732 netif_keep_dst(dev); 733 netif_keep_dst(dev);
733 dev->addr_len = 4; 734 dev->addr_len = 4;
734 735
735 if (iph->daddr) { 736 if (iph->daddr && !tunnel->collect_md) {
736#ifdef CONFIG_NET_IPGRE_BROADCAST 737#ifdef CONFIG_NET_IPGRE_BROADCAST
737 if (ipv4_is_multicast(iph->daddr)) { 738 if (ipv4_is_multicast(iph->daddr)) {
738 if (!iph->saddr) 739 if (!iph->saddr)
@@ -741,8 +742,9 @@ static int ipgre_tunnel_init(struct net_device *dev)
741 dev->header_ops = &ipgre_header_ops; 742 dev->header_ops = &ipgre_header_ops;
742 } 743 }
743#endif 744#endif
744 } else 745 } else if (!tunnel->collect_md) {
745 dev->header_ops = &ipgre_header_ops; 746 dev->header_ops = &ipgre_header_ops;
747 }
746 748
747 return ip_tunnel_init(dev); 749 return ip_tunnel_init(dev);
748} 750}
@@ -785,6 +787,11 @@ static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
785 if (flags & (GRE_VERSION|GRE_ROUTING)) 787 if (flags & (GRE_VERSION|GRE_ROUTING))
786 return -EINVAL; 788 return -EINVAL;
787 789
790 if (data[IFLA_GRE_COLLECT_METADATA] &&
791 data[IFLA_GRE_ENCAP_TYPE] &&
792 nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
793 return -EINVAL;
794
788 return 0; 795 return 0;
789} 796}
790 797
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 6aad0192443d..a69ed94bda1b 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -326,12 +326,12 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
326 326
327 if (!IS_ERR(rt)) { 327 if (!IS_ERR(rt)) {
328 tdev = rt->dst.dev; 328 tdev = rt->dst.dev;
329 dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
330 fl4.saddr);
331 ip_rt_put(rt); 329 ip_rt_put(rt);
332 } 330 }
333 if (dev->type != ARPHRD_ETHER) 331 if (dev->type != ARPHRD_ETHER)
334 dev->flags |= IFF_POINTOPOINT; 332 dev->flags |= IFF_POINTOPOINT;
333
334 dst_cache_reset(&tunnel->dst_cache);
335 } 335 }
336 336
337 if (!tdev && tunnel->parms.link) 337 if (!tdev && tunnel->parms.link)
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index 4985e1a735a6..17038e1ede98 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -144,8 +144,7 @@ nla_put_failure:
144 144
145static int ila_encap_nlsize(struct lwtunnel_state *lwtstate) 145static int ila_encap_nlsize(struct lwtunnel_state *lwtstate)
146{ 146{
147 /* No encapsulation overhead */ 147 return nla_total_size(sizeof(u64)); /* ILA_ATTR_LOCATOR */
148 return 0;
149} 148}
150 149
151static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b) 150static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index afca2eb4dfa7..6edfa9980314 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1376,9 +1376,9 @@ static int l2tp_tunnel_sock_create(struct net *net,
1376 memcpy(&udp_conf.peer_ip6, cfg->peer_ip6, 1376 memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
1377 sizeof(udp_conf.peer_ip6)); 1377 sizeof(udp_conf.peer_ip6));
1378 udp_conf.use_udp6_tx_checksums = 1378 udp_conf.use_udp6_tx_checksums =
1379 cfg->udp6_zero_tx_checksums; 1379 ! cfg->udp6_zero_tx_checksums;
1380 udp_conf.use_udp6_rx_checksums = 1380 udp_conf.use_udp6_rx_checksums =
1381 cfg->udp6_zero_rx_checksums; 1381 ! cfg->udp6_zero_rx_checksums;
1382 } else 1382 } else
1383#endif 1383#endif
1384 { 1384 {
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 6a33f0b4d839..c59af3eb9fa4 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1761,7 +1761,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1761 1761
1762 ret = dev_alloc_name(ndev, ndev->name); 1762 ret = dev_alloc_name(ndev, ndev->name);
1763 if (ret < 0) { 1763 if (ret < 0) {
1764 free_netdev(ndev); 1764 ieee80211_if_free(ndev);
1765 return ret; 1765 return ret;
1766 } 1766 }
1767 1767
@@ -1847,7 +1847,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1847 1847
1848 ret = register_netdevice(ndev); 1848 ret = register_netdevice(ndev);
1849 if (ret) { 1849 if (ret) {
1850 free_netdev(ndev); 1850 ieee80211_if_free(ndev);
1851 return ret; 1851 return ret;
1852 } 1852 }
1853 } 1853 }
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 61ed2a8764ba..86187dad1440 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -127,7 +127,7 @@ void rds_tcp_restore_callbacks(struct socket *sock,
127 127
128/* 128/*
129 * This is the only path that sets tc->t_sock. Send and receive trust that 129 * This is the only path that sets tc->t_sock. Send and receive trust that
130 * it is set. The RDS_CONN_CONNECTED bit protects those paths from being 130 * it is set. The RDS_CONN_UP bit protects those paths from being
131 * called while it isn't set. 131 * called while it isn't set.
132 */ 132 */
133void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn) 133void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
@@ -216,6 +216,7 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
216 if (!tc) 216 if (!tc)
217 return -ENOMEM; 217 return -ENOMEM;
218 218
219 mutex_init(&tc->t_conn_lock);
219 tc->t_sock = NULL; 220 tc->t_sock = NULL;
220 tc->t_tinc = NULL; 221 tc->t_tinc = NULL;
221 tc->t_tinc_hdr_rem = sizeof(struct rds_header); 222 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 64f873c0c6b6..41c228300525 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -12,6 +12,10 @@ struct rds_tcp_connection {
12 12
13 struct list_head t_tcp_node; 13 struct list_head t_tcp_node;
14 struct rds_connection *conn; 14 struct rds_connection *conn;
15 /* t_conn_lock synchronizes the connection establishment between
16 * rds_tcp_accept_one and rds_tcp_conn_connect
17 */
18 struct mutex t_conn_lock;
15 struct socket *t_sock; 19 struct socket *t_sock;
16 void *t_orig_write_space; 20 void *t_orig_write_space;
17 void *t_orig_data_ready; 21 void *t_orig_data_ready;
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 5cb16875c460..49a3fcfed360 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -78,7 +78,14 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
78 struct socket *sock = NULL; 78 struct socket *sock = NULL;
79 struct sockaddr_in src, dest; 79 struct sockaddr_in src, dest;
80 int ret; 80 int ret;
81 struct rds_tcp_connection *tc = conn->c_transport_data;
82
83 mutex_lock(&tc->t_conn_lock);
81 84
85 if (rds_conn_up(conn)) {
86 mutex_unlock(&tc->t_conn_lock);
87 return 0;
88 }
82 ret = sock_create_kern(rds_conn_net(conn), PF_INET, 89 ret = sock_create_kern(rds_conn_net(conn), PF_INET,
83 SOCK_STREAM, IPPROTO_TCP, &sock); 90 SOCK_STREAM, IPPROTO_TCP, &sock);
84 if (ret < 0) 91 if (ret < 0)
@@ -120,6 +127,7 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
120 } 127 }
121 128
122out: 129out:
130 mutex_unlock(&tc->t_conn_lock);
123 if (sock) 131 if (sock)
124 sock_release(sock); 132 sock_release(sock);
125 return ret; 133 return ret;
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 0936a4a32b47..be263cdf268b 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -76,7 +76,9 @@ int rds_tcp_accept_one(struct socket *sock)
76 struct rds_connection *conn; 76 struct rds_connection *conn;
77 int ret; 77 int ret;
78 struct inet_sock *inet; 78 struct inet_sock *inet;
79 struct rds_tcp_connection *rs_tcp; 79 struct rds_tcp_connection *rs_tcp = NULL;
80 int conn_state;
81 struct sock *nsk;
80 82
81 ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family, 83 ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
82 sock->sk->sk_type, sock->sk->sk_protocol, 84 sock->sk->sk_type, sock->sk->sk_protocol,
@@ -115,28 +117,44 @@ int rds_tcp_accept_one(struct socket *sock)
115 * rds_tcp_state_change() will do that cleanup 117 * rds_tcp_state_change() will do that cleanup
116 */ 118 */
117 rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data; 119 rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data;
118 if (rs_tcp->t_sock &&
119 ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
120 struct sock *nsk = new_sock->sk;
121
122 nsk->sk_user_data = NULL;
123 nsk->sk_prot->disconnect(nsk, 0);
124 tcp_done(nsk);
125 new_sock = NULL;
126 ret = 0;
127 goto out;
128 } else if (rs_tcp->t_sock) {
129 rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
130 conn->c_outgoing = 0;
131 }
132
133 rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING); 120 rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
121 mutex_lock(&rs_tcp->t_conn_lock);
122 conn_state = rds_conn_state(conn);
123 if (conn_state != RDS_CONN_CONNECTING && conn_state != RDS_CONN_UP)
124 goto rst_nsk;
125 if (rs_tcp->t_sock) {
126 /* Need to resolve a duelling SYN between peers.
127 * We have an outstanding SYN to this peer, which may
128 * potentially have transitioned to the RDS_CONN_UP state,
129 * so we must quiesce any send threads before resetting
130 * c_transport_data.
131 */
132 wait_event(conn->c_waitq,
133 !test_bit(RDS_IN_XMIT, &conn->c_flags));
134 if (ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
135 goto rst_nsk;
136 } else if (rs_tcp->t_sock) {
137 rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
138 conn->c_outgoing = 0;
139 }
140 }
134 rds_tcp_set_callbacks(new_sock, conn); 141 rds_tcp_set_callbacks(new_sock, conn);
135 rds_connect_complete(conn); 142 rds_connect_complete(conn); /* marks RDS_CONN_UP */
143 new_sock = NULL;
144 ret = 0;
145 goto out;
146rst_nsk:
147 /* reset the newly returned accept sock and bail */
148 nsk = new_sock->sk;
149 rds_tcp_stats_inc(s_tcp_listen_closed_stale);
150 nsk->sk_user_data = NULL;
151 nsk->sk_prot->disconnect(nsk, 0);
152 tcp_done(nsk);
136 new_sock = NULL; 153 new_sock = NULL;
137 ret = 0; 154 ret = 0;
138
139out: 155out:
156 if (rs_tcp)
157 mutex_unlock(&rs_tcp->t_conn_lock);
140 if (new_sock) 158 if (new_sock)
141 sock_release(new_sock); 159 sock_release(new_sock);
142 return ret; 160 return ret;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 491d6fd6430c..205bed00dd34 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -395,6 +395,25 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
395 sch->q.qlen++; 395 sch->q.qlen++;
396} 396}
397 397
398/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
399 * when we statistically choose to corrupt one, we instead segment it, returning
400 * the first packet to be corrupted, and re-enqueue the remaining frames
401 */
402static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
403{
404 struct sk_buff *segs;
405 netdev_features_t features = netif_skb_features(skb);
406
407 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
408
409 if (IS_ERR_OR_NULL(segs)) {
410 qdisc_reshape_fail(skb, sch);
411 return NULL;
412 }
413 consume_skb(skb);
414 return segs;
415}
416
398/* 417/*
399 * Insert one skb into qdisc. 418 * Insert one skb into qdisc.
400 * Note: parent depends on return value to account for queue length. 419 * Note: parent depends on return value to account for queue length.
@@ -407,7 +426,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
407 /* We don't fill cb now as skb_unshare() may invalidate it */ 426 /* We don't fill cb now as skb_unshare() may invalidate it */
408 struct netem_skb_cb *cb; 427 struct netem_skb_cb *cb;
409 struct sk_buff *skb2; 428 struct sk_buff *skb2;
429 struct sk_buff *segs = NULL;
430 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
431 int nb = 0;
410 int count = 1; 432 int count = 1;
433 int rc = NET_XMIT_SUCCESS;
411 434
412 /* Random duplication */ 435 /* Random duplication */
413 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) 436 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
@@ -453,10 +476,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
453 * do it now in software before we mangle it. 476 * do it now in software before we mangle it.
454 */ 477 */
455 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { 478 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
479 if (skb_is_gso(skb)) {
480 segs = netem_segment(skb, sch);
481 if (!segs)
482 return NET_XMIT_DROP;
483 } else {
484 segs = skb;
485 }
486
487 skb = segs;
488 segs = segs->next;
489
456 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || 490 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
457 (skb->ip_summed == CHECKSUM_PARTIAL && 491 (skb->ip_summed == CHECKSUM_PARTIAL &&
458 skb_checksum_help(skb))) 492 skb_checksum_help(skb))) {
459 return qdisc_drop(skb, sch); 493 rc = qdisc_drop(skb, sch);
494 goto finish_segs;
495 }
460 496
461 skb->data[prandom_u32() % skb_headlen(skb)] ^= 497 skb->data[prandom_u32() % skb_headlen(skb)] ^=
462 1<<(prandom_u32() % 8); 498 1<<(prandom_u32() % 8);
@@ -516,6 +552,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
516 sch->qstats.requeues++; 552 sch->qstats.requeues++;
517 } 553 }
518 554
555finish_segs:
556 if (segs) {
557 while (segs) {
558 skb2 = segs->next;
559 segs->next = NULL;
560 qdisc_skb_cb(segs)->pkt_len = segs->len;
561 last_len = segs->len;
562 rc = qdisc_enqueue(segs, sch);
563 if (rc != NET_XMIT_SUCCESS) {
564 if (net_xmit_drop_count(rc))
565 qdisc_qstats_drop(sch);
566 } else {
567 nb++;
568 len += last_len;
569 }
570 segs = skb2;
571 }
572 sch->q.qlen += nb;
573 if (nb > 1)
574 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
575 }
519 return NET_XMIT_SUCCESS; 576 return NET_XMIT_SUCCESS;
520} 577}
521 578
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 29cc85319327..d903f560e2fd 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1469,6 +1469,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
1469 int bearer_id = b->identity; 1469 int bearer_id = b->identity;
1470 struct tipc_link_entry *le; 1470 struct tipc_link_entry *le;
1471 u16 bc_ack = msg_bcast_ack(hdr); 1471 u16 bc_ack = msg_bcast_ack(hdr);
1472 u32 self = tipc_own_addr(net);
1472 int rc = 0; 1473 int rc = 0;
1473 1474
1474 __skb_queue_head_init(&xmitq); 1475 __skb_queue_head_init(&xmitq);
@@ -1485,6 +1486,10 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
1485 return tipc_node_bc_rcv(net, skb, bearer_id); 1486 return tipc_node_bc_rcv(net, skb, bearer_id);
1486 } 1487 }
1487 1488
1489 /* Discard unicast link messages destined for another node */
1490 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
1491 goto discard;
1492
1488 /* Locate neighboring node that sent packet */ 1493 /* Locate neighboring node that sent packet */
1489 n = tipc_node_find(net, msg_prevnode(hdr)); 1494 n = tipc_node_find(net, msg_prevnode(hdr));
1490 if (unlikely(!n)) 1495 if (unlikely(!n))
diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output_kern.c
index 8d8d1ec429eb..9b96f4fb8cea 100644
--- a/samples/bpf/trace_output_kern.c
+++ b/samples/bpf/trace_output_kern.c
@@ -18,7 +18,6 @@ int bpf_prog1(struct pt_regs *ctx)
18 u64 cookie; 18 u64 cookie;
19 } data; 19 } data;
20 20
21 memset(&data, 0, sizeof(data));
22 data.pid = bpf_get_current_pid_tgid(); 21 data.pid = bpf_get_current_pid_tgid();
23 data.cookie = 0x12345678; 22 data.cookie = 0x12345678;
24 23
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index 023cc4cad5c1..626f3bb24c55 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -104,12 +104,11 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_init_all);
104 */ 104 */
105void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus) 105void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus)
106{ 106{
107 struct hdac_stream *s; 107 struct hdac_stream *s, *_s;
108 struct hdac_ext_stream *stream; 108 struct hdac_ext_stream *stream;
109 struct hdac_bus *bus = ebus_to_hbus(ebus); 109 struct hdac_bus *bus = ebus_to_hbus(ebus);
110 110
111 while (!list_empty(&bus->stream_list)) { 111 list_for_each_entry_safe(s, _s, &bus->stream_list, list) {
112 s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
113 stream = stream_to_hdac_ext_stream(s); 112 stream = stream_to_hdac_ext_stream(s);
114 snd_hdac_ext_stream_decouple(ebus, stream, false); 113 snd_hdac_ext_stream_decouple(ebus, stream, false);
115 list_del(&s->list); 114 list_del(&s->list);
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 54babe1c0b16..607bbeaebddf 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -20,6 +20,7 @@
20#include <sound/core.h> 20#include <sound/core.h>
21#include <sound/hdaudio.h> 21#include <sound/hdaudio.h>
22#include <sound/hda_i915.h> 22#include <sound/hda_i915.h>
23#include <sound/hda_register.h>
23 24
24static struct i915_audio_component *hdac_acomp; 25static struct i915_audio_component *hdac_acomp;
25 26
@@ -97,26 +98,65 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
97} 98}
98EXPORT_SYMBOL_GPL(snd_hdac_display_power); 99EXPORT_SYMBOL_GPL(snd_hdac_display_power);
99 100
101#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
102 ((pci)->device == 0x0c0c) || \
103 ((pci)->device == 0x0d0c) || \
104 ((pci)->device == 0x160c))
105
100/** 106/**
101 * snd_hdac_get_display_clk - Get CDCLK in kHz 107 * snd_hdac_i915_set_bclk - Reprogram BCLK for HSW/BDW
102 * @bus: HDA core bus 108 * @bus: HDA core bus
103 * 109 *
104 * This function is supposed to be used only by a HD-audio controller 110 * Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
105 * driver that needs the interaction with i915 graphics. 111 * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
112 * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
113 * BCLK = CDCLK * M / N
114 * The values will be lost when the display power well is disabled and need to
115 * be restored to avoid abnormal playback speed.
106 * 116 *
107 * This function queries CDCLK value in kHz from the graphics driver and 117 * Call this function at initializing and changing power well, as well as
108 * returns the value. A negative code is returned in error. 118 * at ELD notifier for the hotplug.
109 */ 119 */
110int snd_hdac_get_display_clk(struct hdac_bus *bus) 120void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
111{ 121{
112 struct i915_audio_component *acomp = bus->audio_component; 122 struct i915_audio_component *acomp = bus->audio_component;
123 struct pci_dev *pci = to_pci_dev(bus->dev);
124 int cdclk_freq;
125 unsigned int bclk_m, bclk_n;
126
127 if (!acomp || !acomp->ops || !acomp->ops->get_cdclk_freq)
128 return; /* only for i915 binding */
129 if (!CONTROLLER_IN_GPU(pci))
130 return; /* only HSW/BDW */
131
132 cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev);
133 switch (cdclk_freq) {
134 case 337500:
135 bclk_m = 16;
136 bclk_n = 225;
137 break;
138
139 case 450000:
140 default: /* default CDCLK 450MHz */
141 bclk_m = 4;
142 bclk_n = 75;
143 break;
144
145 case 540000:
146 bclk_m = 4;
147 bclk_n = 90;
148 break;
149
150 case 675000:
151 bclk_m = 8;
152 bclk_n = 225;
153 break;
154 }
113 155
114 if (!acomp || !acomp->ops) 156 snd_hdac_chip_writew(bus, HSW_EM4, bclk_m);
115 return -ENODEV; 157 snd_hdac_chip_writew(bus, HSW_EM5, bclk_n);
116
117 return acomp->ops->get_cdclk_freq(acomp->dev);
118} 158}
119EXPORT_SYMBOL_GPL(snd_hdac_get_display_clk); 159EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk);
120 160
121/* There is a fixed mapping between audio pin node and display port 161/* There is a fixed mapping between audio pin node and display port
122 * on current Intel platforms: 162 * on current Intel platforms:
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 637b8a0e2a91..9a0d1445ca5c 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -857,50 +857,6 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
857#define azx_del_card_list(chip) /* NOP */ 857#define azx_del_card_list(chip) /* NOP */
858#endif /* CONFIG_PM */ 858#endif /* CONFIG_PM */
859 859
860/* Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
861 * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
862 * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
863 * BCLK = CDCLK * M / N
864 * The values will be lost when the display power well is disabled and need to
865 * be restored to avoid abnormal playback speed.
866 */
867static void haswell_set_bclk(struct hda_intel *hda)
868{
869 struct azx *chip = &hda->chip;
870 int cdclk_freq;
871 unsigned int bclk_m, bclk_n;
872
873 if (!hda->need_i915_power)
874 return;
875
876 cdclk_freq = snd_hdac_get_display_clk(azx_bus(chip));
877 switch (cdclk_freq) {
878 case 337500:
879 bclk_m = 16;
880 bclk_n = 225;
881 break;
882
883 case 450000:
884 default: /* default CDCLK 450MHz */
885 bclk_m = 4;
886 bclk_n = 75;
887 break;
888
889 case 540000:
890 bclk_m = 4;
891 bclk_n = 90;
892 break;
893
894 case 675000:
895 bclk_m = 8;
896 bclk_n = 225;
897 break;
898 }
899
900 azx_writew(chip, HSW_EM4, bclk_m);
901 azx_writew(chip, HSW_EM5, bclk_n);
902}
903
904#if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO) 860#if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
905/* 861/*
906 * power management 862 * power management
@@ -958,7 +914,7 @@ static int azx_resume(struct device *dev)
958 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL 914 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
959 && hda->need_i915_power) { 915 && hda->need_i915_power) {
960 snd_hdac_display_power(azx_bus(chip), true); 916 snd_hdac_display_power(azx_bus(chip), true);
961 haswell_set_bclk(hda); 917 snd_hdac_i915_set_bclk(azx_bus(chip));
962 } 918 }
963 if (chip->msi) 919 if (chip->msi)
964 if (pci_enable_msi(pci) < 0) 920 if (pci_enable_msi(pci) < 0)
@@ -1058,7 +1014,7 @@ static int azx_runtime_resume(struct device *dev)
1058 bus = azx_bus(chip); 1014 bus = azx_bus(chip);
1059 if (hda->need_i915_power) { 1015 if (hda->need_i915_power) {
1060 snd_hdac_display_power(bus, true); 1016 snd_hdac_display_power(bus, true);
1061 haswell_set_bclk(hda); 1017 snd_hdac_i915_set_bclk(bus);
1062 } else { 1018 } else {
1063 /* toggle codec wakeup bit for STATESTS read */ 1019 /* toggle codec wakeup bit for STATESTS read */
1064 snd_hdac_set_codec_wakeup(bus, true); 1020 snd_hdac_set_codec_wakeup(bus, true);
@@ -1796,12 +1752,8 @@ static int azx_first_init(struct azx *chip)
1796 /* initialize chip */ 1752 /* initialize chip */
1797 azx_init_pci(chip); 1753 azx_init_pci(chip);
1798 1754
1799 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { 1755 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
1800 struct hda_intel *hda; 1756 snd_hdac_i915_set_bclk(bus);
1801
1802 hda = container_of(chip, struct hda_intel, chip);
1803 haswell_set_bclk(hda);
1804 }
1805 1757
1806 hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0); 1758 hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0);
1807 1759
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 40933aa33afe..1483f85999ec 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2232,6 +2232,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
2232 if (atomic_read(&(codec)->core.in_pm)) 2232 if (atomic_read(&(codec)->core.in_pm))
2233 return; 2233 return;
2234 2234
2235 snd_hdac_i915_set_bclk(&codec->bus->core);
2235 check_presence_and_report(codec, pin_nid); 2236 check_presence_and_report(codec, pin_nid);
2236} 2237}
2237 2238
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 810bceee4fd2..ac4490a96863 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5584,6 +5584,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5584 SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK), 5584 SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
5585 SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), 5585 SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
5586 SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK), 5586 SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
5587 SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
5587 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE), 5588 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
5588 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5589 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5589 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 5590 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 649e92a252ae..7ef3a0c16478 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -629,6 +629,7 @@ config SND_SOC_RT5514
629 629
630config SND_SOC_RT5616 630config SND_SOC_RT5616
631 tristate "Realtek RT5616 CODEC" 631 tristate "Realtek RT5616 CODEC"
632 depends on I2C
632 633
633config SND_SOC_RT5631 634config SND_SOC_RT5631
634 tristate "Realtek ALC5631/RT5631 CODEC" 635 tristate "Realtek ALC5631/RT5631 CODEC"
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 92d22a018d68..83959312f7a0 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -249,6 +249,18 @@ int arizona_init_spk(struct snd_soc_codec *codec)
249} 249}
250EXPORT_SYMBOL_GPL(arizona_init_spk); 250EXPORT_SYMBOL_GPL(arizona_init_spk);
251 251
252int arizona_free_spk(struct snd_soc_codec *codec)
253{
254 struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
255 struct arizona *arizona = priv->arizona;
256
257 arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT_WARN, arizona);
258 arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT, arizona);
259
260 return 0;
261}
262EXPORT_SYMBOL_GPL(arizona_free_spk);
263
252static const struct snd_soc_dapm_route arizona_mono_routes[] = { 264static const struct snd_soc_dapm_route arizona_mono_routes[] = {
253 { "OUT1R", NULL, "OUT1L" }, 265 { "OUT1R", NULL, "OUT1L" },
254 { "OUT2R", NULL, "OUT2L" }, 266 { "OUT2R", NULL, "OUT2L" },
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
index 1ea8e4ecf8d4..ce0531b8c632 100644
--- a/sound/soc/codecs/arizona.h
+++ b/sound/soc/codecs/arizona.h
@@ -307,6 +307,8 @@ extern int arizona_init_spk(struct snd_soc_codec *codec);
307extern int arizona_init_gpio(struct snd_soc_codec *codec); 307extern int arizona_init_gpio(struct snd_soc_codec *codec);
308extern int arizona_init_mono(struct snd_soc_codec *codec); 308extern int arizona_init_mono(struct snd_soc_codec *codec);
309 309
310extern int arizona_free_spk(struct snd_soc_codec *codec);
311
310extern int arizona_init_dai(struct arizona_priv *priv, int dai); 312extern int arizona_init_dai(struct arizona_priv *priv, int dai);
311 313
312int arizona_set_output_mode(struct snd_soc_codec *codec, int output, 314int arizona_set_output_mode(struct snd_soc_codec *codec, int output,
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index 44c30fe3e315..287d13740be4 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -274,7 +274,9 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
274 if (of_property_read_u32(np, "cirrus,sdout-share", &val) >= 0) 274 if (of_property_read_u32(np, "cirrus,sdout-share", &val) >= 0)
275 pdata->sdout_share = val; 275 pdata->sdout_share = val;
276 276
277 of_property_read_u32(np, "cirrus,boost-manager", &val); 277 if (of_property_read_u32(np, "cirrus,boost-manager", &val))
278 val = -1u;
279
278 switch (val) { 280 switch (val) {
279 case CS35L32_BOOST_MGR_AUTO: 281 case CS35L32_BOOST_MGR_AUTO:
280 case CS35L32_BOOST_MGR_AUTO_AUDIO: 282 case CS35L32_BOOST_MGR_AUTO_AUDIO:
@@ -282,13 +284,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
282 case CS35L32_BOOST_MGR_FIXED: 284 case CS35L32_BOOST_MGR_FIXED:
283 pdata->boost_mng = val; 285 pdata->boost_mng = val;
284 break; 286 break;
287 case -1u:
285 default: 288 default:
286 dev_err(&i2c_client->dev, 289 dev_err(&i2c_client->dev,
287 "Wrong cirrus,boost-manager DT value %d\n", val); 290 "Wrong cirrus,boost-manager DT value %d\n", val);
288 pdata->boost_mng = CS35L32_BOOST_MGR_BYPASS; 291 pdata->boost_mng = CS35L32_BOOST_MGR_BYPASS;
289 } 292 }
290 293
291 of_property_read_u32(np, "cirrus,sdout-datacfg", &val); 294 if (of_property_read_u32(np, "cirrus,sdout-datacfg", &val))
295 val = -1u;
292 switch (val) { 296 switch (val) {
293 case CS35L32_DATA_CFG_LR_VP: 297 case CS35L32_DATA_CFG_LR_VP:
294 case CS35L32_DATA_CFG_LR_STAT: 298 case CS35L32_DATA_CFG_LR_STAT:
@@ -296,13 +300,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
296 case CS35L32_DATA_CFG_LR_VPSTAT: 300 case CS35L32_DATA_CFG_LR_VPSTAT:
297 pdata->sdout_datacfg = val; 301 pdata->sdout_datacfg = val;
298 break; 302 break;
303 case -1u:
299 default: 304 default:
300 dev_err(&i2c_client->dev, 305 dev_err(&i2c_client->dev,
301 "Wrong cirrus,sdout-datacfg DT value %d\n", val); 306 "Wrong cirrus,sdout-datacfg DT value %d\n", val);
302 pdata->sdout_datacfg = CS35L32_DATA_CFG_LR; 307 pdata->sdout_datacfg = CS35L32_DATA_CFG_LR;
303 } 308 }
304 309
305 of_property_read_u32(np, "cirrus,battery-threshold", &val); 310 if (of_property_read_u32(np, "cirrus,battery-threshold", &val))
311 val = -1u;
306 switch (val) { 312 switch (val) {
307 case CS35L32_BATT_THRESH_3_1V: 313 case CS35L32_BATT_THRESH_3_1V:
308 case CS35L32_BATT_THRESH_3_2V: 314 case CS35L32_BATT_THRESH_3_2V:
@@ -310,13 +316,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
310 case CS35L32_BATT_THRESH_3_4V: 316 case CS35L32_BATT_THRESH_3_4V:
311 pdata->batt_thresh = val; 317 pdata->batt_thresh = val;
312 break; 318 break;
319 case -1u:
313 default: 320 default:
314 dev_err(&i2c_client->dev, 321 dev_err(&i2c_client->dev,
315 "Wrong cirrus,battery-threshold DT value %d\n", val); 322 "Wrong cirrus,battery-threshold DT value %d\n", val);
316 pdata->batt_thresh = CS35L32_BATT_THRESH_3_3V; 323 pdata->batt_thresh = CS35L32_BATT_THRESH_3_3V;
317 } 324 }
318 325
319 of_property_read_u32(np, "cirrus,battery-recovery", &val); 326 if (of_property_read_u32(np, "cirrus,battery-recovery", &val))
327 val = -1u;
320 switch (val) { 328 switch (val) {
321 case CS35L32_BATT_RECOV_3_1V: 329 case CS35L32_BATT_RECOV_3_1V:
322 case CS35L32_BATT_RECOV_3_2V: 330 case CS35L32_BATT_RECOV_3_2V:
@@ -326,6 +334,7 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
326 case CS35L32_BATT_RECOV_3_6V: 334 case CS35L32_BATT_RECOV_3_6V:
327 pdata->batt_recov = val; 335 pdata->batt_recov = val;
328 break; 336 break;
337 case -1u:
329 default: 338 default:
330 dev_err(&i2c_client->dev, 339 dev_err(&i2c_client->dev,
331 "Wrong cirrus,battery-recovery DT value %d\n", val); 340 "Wrong cirrus,battery-recovery DT value %d\n", val);
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index 576087bda330..00e9b6fc1b5c 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -1108,6 +1108,9 @@ static int cs47l24_codec_remove(struct snd_soc_codec *codec)
1108 priv->core.arizona->dapm = NULL; 1108 priv->core.arizona->dapm = NULL;
1109 1109
1110 arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv); 1110 arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
1111
1112 arizona_free_spk(codec);
1113
1111 return 0; 1114 return 0;
1112} 1115}
1113 1116
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 26f9459cb3bc..aaa038ffc8a5 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1420,32 +1420,39 @@ static int hdmi_codec_remove(struct snd_soc_codec *codec)
1420} 1420}
1421 1421
1422#ifdef CONFIG_PM 1422#ifdef CONFIG_PM
1423static int hdmi_codec_resume(struct snd_soc_codec *codec) 1423static int hdmi_codec_prepare(struct device *dev)
1424{ 1424{
1425 struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec); 1425 struct hdac_ext_device *edev = to_hda_ext_device(dev);
1426 struct hdac_device *hdac = &edev->hdac;
1427
1428 pm_runtime_get_sync(&edev->hdac.dev);
1429
1430 /*
1431 * Power down afg.
1432 * codec_read is preferred over codec_write to set the power state.
1433 * This way verb is send to set the power state and response
1434 * is received. So setting power state is ensured without using loop
1435 * to read the state.
1436 */
1437 snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
1438 AC_PWRST_D3);
1439
1440 return 0;
1441}
1442
1443static void hdmi_codec_complete(struct device *dev)
1444{
1445 struct hdac_ext_device *edev = to_hda_ext_device(dev);
1426 struct hdac_hdmi_priv *hdmi = edev->private_data; 1446 struct hdac_hdmi_priv *hdmi = edev->private_data;
1427 struct hdac_hdmi_pin *pin; 1447 struct hdac_hdmi_pin *pin;
1428 struct hdac_device *hdac = &edev->hdac; 1448 struct hdac_device *hdac = &edev->hdac;
1429 struct hdac_bus *bus = hdac->bus;
1430 int err;
1431 unsigned long timeout;
1432
1433 hdac_hdmi_skl_enable_all_pins(&edev->hdac);
1434 hdac_hdmi_skl_enable_dp12(&edev->hdac);
1435 1449
1436 /* Power up afg */ 1450 /* Power up afg */
1437 if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)) { 1451 snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
1438 1452 AC_PWRST_D0);
1439 snd_hdac_codec_write(hdac, hdac->afg, 0,
1440 AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
1441 1453
1442 /* Wait till power state is set to D0 */ 1454 hdac_hdmi_skl_enable_all_pins(&edev->hdac);
1443 timeout = jiffies + msecs_to_jiffies(1000); 1455 hdac_hdmi_skl_enable_dp12(&edev->hdac);
1444 while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)
1445 && time_before(jiffies, timeout)) {
1446 msleep(50);
1447 }
1448 }
1449 1456
1450 /* 1457 /*
1451 * As the ELD notify callback request is not entertained while the 1458 * As the ELD notify callback request is not entertained while the
@@ -1455,28 +1462,16 @@ static int hdmi_codec_resume(struct snd_soc_codec *codec)
1455 list_for_each_entry(pin, &hdmi->pin_list, head) 1462 list_for_each_entry(pin, &hdmi->pin_list, head)
1456 hdac_hdmi_present_sense(pin, 1); 1463 hdac_hdmi_present_sense(pin, 1);
1457 1464
1458 /* 1465 pm_runtime_put_sync(&edev->hdac.dev);
1459 * Codec power is turned ON during controller resume.
1460 * Turn it OFF here
1461 */
1462 err = snd_hdac_display_power(bus, false);
1463 if (err < 0) {
1464 dev_err(bus->dev,
1465 "Cannot turn OFF display power on i915, err: %d\n",
1466 err);
1467 return err;
1468 }
1469
1470 return 0;
1471} 1466}
1472#else 1467#else
1473#define hdmi_codec_resume NULL 1468#define hdmi_codec_prepare NULL
1469#define hdmi_codec_complete NULL
1474#endif 1470#endif
1475 1471
1476static struct snd_soc_codec_driver hdmi_hda_codec = { 1472static struct snd_soc_codec_driver hdmi_hda_codec = {
1477 .probe = hdmi_codec_probe, 1473 .probe = hdmi_codec_probe,
1478 .remove = hdmi_codec_remove, 1474 .remove = hdmi_codec_remove,
1479 .resume = hdmi_codec_resume,
1480 .idle_bias_off = true, 1475 .idle_bias_off = true,
1481}; 1476};
1482 1477
@@ -1561,7 +1556,6 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
1561 struct hdac_ext_device *edev = to_hda_ext_device(dev); 1556 struct hdac_ext_device *edev = to_hda_ext_device(dev);
1562 struct hdac_device *hdac = &edev->hdac; 1557 struct hdac_device *hdac = &edev->hdac;
1563 struct hdac_bus *bus = hdac->bus; 1558 struct hdac_bus *bus = hdac->bus;
1564 unsigned long timeout;
1565 int err; 1559 int err;
1566 1560
1567 dev_dbg(dev, "Enter: %s\n", __func__); 1561 dev_dbg(dev, "Enter: %s\n", __func__);
@@ -1570,20 +1564,15 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
1570 if (!bus) 1564 if (!bus)
1571 return 0; 1565 return 0;
1572 1566
1573 /* Power down afg */ 1567 /*
1574 if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3)) { 1568 * Power down afg.
1575 snd_hdac_codec_write(hdac, hdac->afg, 0, 1569 * codec_read is preferred over codec_write to set the power state.
1576 AC_VERB_SET_POWER_STATE, AC_PWRST_D3); 1570 * This way verb is send to set the power state and response
1577 1571 * is received. So setting power state is ensured without using loop
1578 /* Wait till power state is set to D3 */ 1572 * to read the state.
1579 timeout = jiffies + msecs_to_jiffies(1000); 1573 */
1580 while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3) 1574 snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
1581 && time_before(jiffies, timeout)) { 1575 AC_PWRST_D3);
1582
1583 msleep(50);
1584 }
1585 }
1586
1587 err = snd_hdac_display_power(bus, false); 1576 err = snd_hdac_display_power(bus, false);
1588 if (err < 0) { 1577 if (err < 0) {
1589 dev_err(bus->dev, "Cannot turn on display power on i915\n"); 1578 dev_err(bus->dev, "Cannot turn on display power on i915\n");
@@ -1616,9 +1605,8 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
1616 hdac_hdmi_skl_enable_dp12(&edev->hdac); 1605 hdac_hdmi_skl_enable_dp12(&edev->hdac);
1617 1606
1618 /* Power up afg */ 1607 /* Power up afg */
1619 if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)) 1608 snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
1620 snd_hdac_codec_write(hdac, hdac->afg, 0, 1609 AC_PWRST_D0);
1621 AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
1622 1610
1623 return 0; 1611 return 0;
1624} 1612}
@@ -1629,6 +1617,8 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
1629 1617
1630static const struct dev_pm_ops hdac_hdmi_pm = { 1618static const struct dev_pm_ops hdac_hdmi_pm = {
1631 SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL) 1619 SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL)
1620 .prepare = hdmi_codec_prepare,
1621 .complete = hdmi_codec_complete,
1632}; 1622};
1633 1623
1634static const struct hda_device_id hdmi_list[] = { 1624static const struct hda_device_id hdmi_list[] = {
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index 1c8729984c2b..683769f0f246 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -343,9 +343,12 @@ static const struct snd_soc_dapm_widget nau8825_dapm_widgets[] = {
343 SND_SOC_DAPM_SUPPLY("ADC Power", NAU8825_REG_ANALOG_ADC_2, 6, 0, NULL, 343 SND_SOC_DAPM_SUPPLY("ADC Power", NAU8825_REG_ANALOG_ADC_2, 6, 0, NULL,
344 0), 344 0),
345 345
346 /* ADC for button press detection */ 346 /* ADC for button press detection. A dapm supply widget is used to
347 SND_SOC_DAPM_ADC("SAR", NULL, NAU8825_REG_SAR_CTRL, 347 * prevent dapm_power_widgets keeping the codec at SND_SOC_BIAS_ON
348 NAU8825_SAR_ADC_EN_SFT, 0), 348 * during suspend.
349 */
350 SND_SOC_DAPM_SUPPLY("SAR", NAU8825_REG_SAR_CTRL,
351 NAU8825_SAR_ADC_EN_SFT, 0, NULL, 0),
349 352
350 SND_SOC_DAPM_PGA_S("ADACL", 2, NAU8825_REG_RDAC, 12, 0, NULL, 0), 353 SND_SOC_DAPM_PGA_S("ADACL", 2, NAU8825_REG_RDAC, 12, 0, NULL, 0),
351 SND_SOC_DAPM_PGA_S("ADACR", 2, NAU8825_REG_RDAC, 13, 0, NULL, 0), 354 SND_SOC_DAPM_PGA_S("ADACR", 2, NAU8825_REG_RDAC, 13, 0, NULL, 0),
@@ -607,6 +610,16 @@ static bool nau8825_is_jack_inserted(struct regmap *regmap)
607 610
608static void nau8825_restart_jack_detection(struct regmap *regmap) 611static void nau8825_restart_jack_detection(struct regmap *regmap)
609{ 612{
613 /* Chip needs one FSCLK cycle in order to generate interrupts,
614 * as we cannot guarantee one will be provided by the system. Turning
615 * master mode on then off enables us to generate that FSCLK cycle
616 * with a minimum of contention on the clock bus.
617 */
618 regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
619 NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
620 regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
621 NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
622
610 /* this will restart the entire jack detection process including MIC/GND 623 /* this will restart the entire jack detection process including MIC/GND
611 * switching and create interrupts. We have to go from 0 to 1 and back 624 * switching and create interrupts. We have to go from 0 to 1 and back
612 * to 0 to restart. 625 * to 0 to restart.
@@ -728,7 +741,10 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
728 struct regmap *regmap = nau8825->regmap; 741 struct regmap *regmap = nau8825->regmap;
729 int active_irq, clear_irq = 0, event = 0, event_mask = 0; 742 int active_irq, clear_irq = 0, event = 0, event_mask = 0;
730 743
731 regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq); 744 if (regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq)) {
745 dev_err(nau8825->dev, "failed to read irq status\n");
746 return IRQ_NONE;
747 }
732 748
733 if ((active_irq & NAU8825_JACK_EJECTION_IRQ_MASK) == 749 if ((active_irq & NAU8825_JACK_EJECTION_IRQ_MASK) ==
734 NAU8825_JACK_EJECTION_DETECTED) { 750 NAU8825_JACK_EJECTION_DETECTED) {
@@ -1141,33 +1157,74 @@ static int nau8825_set_bias_level(struct snd_soc_codec *codec,
1141 return ret; 1157 return ret;
1142 } 1158 }
1143 } 1159 }
1144
1145 ret = regcache_sync(nau8825->regmap);
1146 if (ret) {
1147 dev_err(codec->dev,
1148 "Failed to sync cache: %d\n", ret);
1149 return ret;
1150 }
1151 } 1160 }
1152
1153 break; 1161 break;
1154 1162
1155 case SND_SOC_BIAS_OFF: 1163 case SND_SOC_BIAS_OFF:
1156 if (nau8825->mclk_freq) 1164 if (nau8825->mclk_freq)
1157 clk_disable_unprepare(nau8825->mclk); 1165 clk_disable_unprepare(nau8825->mclk);
1158
1159 regcache_mark_dirty(nau8825->regmap);
1160 break; 1166 break;
1161 } 1167 }
1162 return 0; 1168 return 0;
1163} 1169}
1164 1170
1171#ifdef CONFIG_PM
1172static int nau8825_suspend(struct snd_soc_codec *codec)
1173{
1174 struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
1175
1176 disable_irq(nau8825->irq);
1177 regcache_cache_only(nau8825->regmap, true);
1178 regcache_mark_dirty(nau8825->regmap);
1179
1180 return 0;
1181}
1182
1183static int nau8825_resume(struct snd_soc_codec *codec)
1184{
1185 struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
1186
1187 /* The chip may lose power and reset in S3. regcache_sync restores
1188 * register values including configurations for sysclk, irq, and
1189 * jack/button detection.
1190 */
1191 regcache_cache_only(nau8825->regmap, false);
1192 regcache_sync(nau8825->regmap);
1193
1194 /* Check the jack plug status directly. If the headset is unplugged
1195 * during S3 when the chip has no power, there will be no jack
1196 * detection irq even after the nau8825_restart_jack_detection below,
1197 * because the chip just thinks no headset has ever been plugged in.
1198 */
1199 if (!nau8825_is_jack_inserted(nau8825->regmap)) {
1200 nau8825_eject_jack(nau8825);
1201 snd_soc_jack_report(nau8825->jack, 0, SND_JACK_HEADSET);
1202 }
1203
1204 enable_irq(nau8825->irq);
1205
1206 /* Run jack detection to check the type (OMTP or CTIA) of the headset
1207 * if there is one. This handles the case where a different type of
1208 * headset is plugged in during S3. This triggers an IRQ iff a headset
1209 * is already plugged in.
1210 */
1211 nau8825_restart_jack_detection(nau8825->regmap);
1212
1213 return 0;
1214}
1215#else
1216#define nau8825_suspend NULL
1217#define nau8825_resume NULL
1218#endif
1219
1165static struct snd_soc_codec_driver nau8825_codec_driver = { 1220static struct snd_soc_codec_driver nau8825_codec_driver = {
1166 .probe = nau8825_codec_probe, 1221 .probe = nau8825_codec_probe,
1167 .set_sysclk = nau8825_set_sysclk, 1222 .set_sysclk = nau8825_set_sysclk,
1168 .set_pll = nau8825_set_pll, 1223 .set_pll = nau8825_set_pll,
1169 .set_bias_level = nau8825_set_bias_level, 1224 .set_bias_level = nau8825_set_bias_level,
1170 .suspend_bias_off = true, 1225 .suspend_bias_off = true,
1226 .suspend = nau8825_suspend,
1227 .resume = nau8825_resume,
1171 1228
1172 .controls = nau8825_controls, 1229 .controls = nau8825_controls,
1173 .num_controls = ARRAY_SIZE(nau8825_controls), 1230 .num_controls = ARRAY_SIZE(nau8825_controls),
@@ -1277,16 +1334,6 @@ static int nau8825_setup_irq(struct nau8825 *nau8825)
1277 regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL, 1334 regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL,
1278 NAU8825_ENABLE_DACR, NAU8825_ENABLE_DACR); 1335 NAU8825_ENABLE_DACR, NAU8825_ENABLE_DACR);
1279 1336
1280 /* Chip needs one FSCLK cycle in order to generate interrupts,
1281 * as we cannot guarantee one will be provided by the system. Turning
1282 * master mode on then off enables us to generate that FSCLK cycle
1283 * with a minimum of contention on the clock bus.
1284 */
1285 regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
1286 NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
1287 regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
1288 NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
1289
1290 ret = devm_request_threaded_irq(nau8825->dev, nau8825->irq, NULL, 1337 ret = devm_request_threaded_irq(nau8825->dev, nau8825->irq, NULL,
1291 nau8825_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT, 1338 nau8825_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT,
1292 "nau8825", nau8825); 1339 "nau8825", nau8825);
@@ -1354,36 +1401,6 @@ static int nau8825_i2c_remove(struct i2c_client *client)
1354 return 0; 1401 return 0;
1355} 1402}
1356 1403
1357#ifdef CONFIG_PM_SLEEP
1358static int nau8825_suspend(struct device *dev)
1359{
1360 struct i2c_client *client = to_i2c_client(dev);
1361 struct nau8825 *nau8825 = dev_get_drvdata(dev);
1362
1363 disable_irq(client->irq);
1364 regcache_cache_only(nau8825->regmap, true);
1365 regcache_mark_dirty(nau8825->regmap);
1366
1367 return 0;
1368}
1369
1370static int nau8825_resume(struct device *dev)
1371{
1372 struct i2c_client *client = to_i2c_client(dev);
1373 struct nau8825 *nau8825 = dev_get_drvdata(dev);
1374
1375 regcache_cache_only(nau8825->regmap, false);
1376 regcache_sync(nau8825->regmap);
1377 enable_irq(client->irq);
1378
1379 return 0;
1380}
1381#endif
1382
1383static const struct dev_pm_ops nau8825_pm = {
1384 SET_SYSTEM_SLEEP_PM_OPS(nau8825_suspend, nau8825_resume)
1385};
1386
1387static const struct i2c_device_id nau8825_i2c_ids[] = { 1404static const struct i2c_device_id nau8825_i2c_ids[] = {
1388 { "nau8825", 0 }, 1405 { "nau8825", 0 },
1389 { } 1406 { }
@@ -1410,7 +1427,6 @@ static struct i2c_driver nau8825_driver = {
1410 .name = "nau8825", 1427 .name = "nau8825",
1411 .of_match_table = of_match_ptr(nau8825_of_ids), 1428 .of_match_table = of_match_ptr(nau8825_of_ids),
1412 .acpi_match_table = ACPI_PTR(nau8825_acpi_match), 1429 .acpi_match_table = ACPI_PTR(nau8825_acpi_match),
1413 .pm = &nau8825_pm,
1414 }, 1430 },
1415 .probe = nau8825_i2c_probe, 1431 .probe = nau8825_i2c_probe,
1416 .remove = nau8825_i2c_remove, 1432 .remove = nau8825_i2c_remove,
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index e8b5ba04417a..09e8988bbb2d 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -359,7 +359,7 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv,
359 359
360/* Interface data select */ 360/* Interface data select */
361static const char * const rt5640_data_select[] = { 361static const char * const rt5640_data_select[] = {
362 "Normal", "left copy to right", "right copy to left", "Swap"}; 362 "Normal", "Swap", "left copy to right", "right copy to left"};
363 363
364static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA, 364static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
365 RT5640_IF1_DAC_SEL_SFT, rt5640_data_select); 365 RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
index 1761c3a98b76..58b664b06c16 100644
--- a/sound/soc/codecs/rt5640.h
+++ b/sound/soc/codecs/rt5640.h
@@ -443,39 +443,39 @@
443#define RT5640_IF1_DAC_SEL_MASK (0x3 << 14) 443#define RT5640_IF1_DAC_SEL_MASK (0x3 << 14)
444#define RT5640_IF1_DAC_SEL_SFT 14 444#define RT5640_IF1_DAC_SEL_SFT 14
445#define RT5640_IF1_DAC_SEL_NOR (0x0 << 14) 445#define RT5640_IF1_DAC_SEL_NOR (0x0 << 14)
446#define RT5640_IF1_DAC_SEL_L2R (0x1 << 14) 446#define RT5640_IF1_DAC_SEL_SWAP (0x1 << 14)
447#define RT5640_IF1_DAC_SEL_R2L (0x2 << 14) 447#define RT5640_IF1_DAC_SEL_L2R (0x2 << 14)
448#define RT5640_IF1_DAC_SEL_SWAP (0x3 << 14) 448#define RT5640_IF1_DAC_SEL_R2L (0x3 << 14)
449#define RT5640_IF1_ADC_SEL_MASK (0x3 << 12) 449#define RT5640_IF1_ADC_SEL_MASK (0x3 << 12)
450#define RT5640_IF1_ADC_SEL_SFT 12 450#define RT5640_IF1_ADC_SEL_SFT 12
451#define RT5640_IF1_ADC_SEL_NOR (0x0 << 12) 451#define RT5640_IF1_ADC_SEL_NOR (0x0 << 12)
452#define RT5640_IF1_ADC_SEL_L2R (0x1 << 12) 452#define RT5640_IF1_ADC_SEL_SWAP (0x1 << 12)
453#define RT5640_IF1_ADC_SEL_R2L (0x2 << 12) 453#define RT5640_IF1_ADC_SEL_L2R (0x2 << 12)
454#define RT5640_IF1_ADC_SEL_SWAP (0x3 << 12) 454#define RT5640_IF1_ADC_SEL_R2L (0x3 << 12)
455#define RT5640_IF2_DAC_SEL_MASK (0x3 << 10) 455#define RT5640_IF2_DAC_SEL_MASK (0x3 << 10)
456#define RT5640_IF2_DAC_SEL_SFT 10 456#define RT5640_IF2_DAC_SEL_SFT 10
457#define RT5640_IF2_DAC_SEL_NOR (0x0 << 10) 457#define RT5640_IF2_DAC_SEL_NOR (0x0 << 10)
458#define RT5640_IF2_DAC_SEL_L2R (0x1 << 10) 458#define RT5640_IF2_DAC_SEL_SWAP (0x1 << 10)
459#define RT5640_IF2_DAC_SEL_R2L (0x2 << 10) 459#define RT5640_IF2_DAC_SEL_L2R (0x2 << 10)
460#define RT5640_IF2_DAC_SEL_SWAP (0x3 << 10) 460#define RT5640_IF2_DAC_SEL_R2L (0x3 << 10)
461#define RT5640_IF2_ADC_SEL_MASK (0x3 << 8) 461#define RT5640_IF2_ADC_SEL_MASK (0x3 << 8)
462#define RT5640_IF2_ADC_SEL_SFT 8 462#define RT5640_IF2_ADC_SEL_SFT 8
463#define RT5640_IF2_ADC_SEL_NOR (0x0 << 8) 463#define RT5640_IF2_ADC_SEL_NOR (0x0 << 8)
464#define RT5640_IF2_ADC_SEL_L2R (0x1 << 8) 464#define RT5640_IF2_ADC_SEL_SWAP (0x1 << 8)
465#define RT5640_IF2_ADC_SEL_R2L (0x2 << 8) 465#define RT5640_IF2_ADC_SEL_L2R (0x2 << 8)
466#define RT5640_IF2_ADC_SEL_SWAP (0x3 << 8) 466#define RT5640_IF2_ADC_SEL_R2L (0x3 << 8)
467#define RT5640_IF3_DAC_SEL_MASK (0x3 << 6) 467#define RT5640_IF3_DAC_SEL_MASK (0x3 << 6)
468#define RT5640_IF3_DAC_SEL_SFT 6 468#define RT5640_IF3_DAC_SEL_SFT 6
469#define RT5640_IF3_DAC_SEL_NOR (0x0 << 6) 469#define RT5640_IF3_DAC_SEL_NOR (0x0 << 6)
470#define RT5640_IF3_DAC_SEL_L2R (0x1 << 6) 470#define RT5640_IF3_DAC_SEL_SWAP (0x1 << 6)
471#define RT5640_IF3_DAC_SEL_R2L (0x2 << 6) 471#define RT5640_IF3_DAC_SEL_L2R (0x2 << 6)
472#define RT5640_IF3_DAC_SEL_SWAP (0x3 << 6) 472#define RT5640_IF3_DAC_SEL_R2L (0x3 << 6)
473#define RT5640_IF3_ADC_SEL_MASK (0x3 << 4) 473#define RT5640_IF3_ADC_SEL_MASK (0x3 << 4)
474#define RT5640_IF3_ADC_SEL_SFT 4 474#define RT5640_IF3_ADC_SEL_SFT 4
475#define RT5640_IF3_ADC_SEL_NOR (0x0 << 4) 475#define RT5640_IF3_ADC_SEL_NOR (0x0 << 4)
476#define RT5640_IF3_ADC_SEL_L2R (0x1 << 4) 476#define RT5640_IF3_ADC_SEL_SWAP (0x1 << 4)
477#define RT5640_IF3_ADC_SEL_R2L (0x2 << 4) 477#define RT5640_IF3_ADC_SEL_L2R (0x2 << 4)
478#define RT5640_IF3_ADC_SEL_SWAP (0x3 << 4) 478#define RT5640_IF3_ADC_SEL_R2L (0x3 << 4)
479 479
480/* REC Left Mixer Control 1 (0x3b) */ 480/* REC Left Mixer Control 1 (0x3b) */
481#define RT5640_G_HP_L_RM_L_MASK (0x7 << 13) 481#define RT5640_G_HP_L_RM_L_MASK (0x7 << 13)
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index a8b3e3f701f9..1bae17ee8817 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1955,11 +1955,16 @@ err_adsp2_codec_probe:
1955static int wm5102_codec_remove(struct snd_soc_codec *codec) 1955static int wm5102_codec_remove(struct snd_soc_codec *codec)
1956{ 1956{
1957 struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec); 1957 struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
1958 struct arizona *arizona = priv->core.arizona;
1958 1959
1959 wm_adsp2_codec_remove(&priv->core.adsp[0], codec); 1960 wm_adsp2_codec_remove(&priv->core.adsp[0], codec);
1960 1961
1961 priv->core.arizona->dapm = NULL; 1962 priv->core.arizona->dapm = NULL;
1962 1963
1964 arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
1965
1966 arizona_free_spk(codec);
1967
1963 return 0; 1968 return 0;
1964} 1969}
1965 1970
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 83ba70fe16e6..2728ac545ffe 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -2298,6 +2298,8 @@ static int wm5110_codec_remove(struct snd_soc_codec *codec)
2298 2298
2299 arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv); 2299 arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
2300 2300
2301 arizona_free_spk(codec);
2302
2301 return 0; 2303 return 0;
2302} 2304}
2303 2305
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 88223608a33f..720a14e0687d 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2471,7 +2471,7 @@ static void wm8962_configure_bclk(struct snd_soc_codec *codec)
2471 break; 2471 break;
2472 default: 2472 default:
2473 dev_warn(codec->dev, "Unknown DSPCLK divisor read back\n"); 2473 dev_warn(codec->dev, "Unknown DSPCLK divisor read back\n");
2474 dspclk = wm8962->sysclk; 2474 dspclk = wm8962->sysclk_rate;
2475 } 2475 }
2476 2476
2477 dev_dbg(codec->dev, "DSPCLK is %dHz, BCLK %d\n", dspclk, wm8962->bclk); 2477 dev_dbg(codec->dev, "DSPCLK is %dHz, BCLK %d\n", dspclk, wm8962->bclk);
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index 52d766efe14f..6b0785b5a5c5 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -1072,6 +1072,8 @@ static int wm8997_codec_remove(struct snd_soc_codec *codec)
1072 1072
1073 priv->core.arizona->dapm = NULL; 1073 priv->core.arizona->dapm = NULL;
1074 1074
1075 arizona_free_spk(codec);
1076
1075 return 0; 1077 return 0;
1076} 1078}
1077 1079
diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
index 012396074a8a..449f66636205 100644
--- a/sound/soc/codecs/wm8998.c
+++ b/sound/soc/codecs/wm8998.c
@@ -1324,6 +1324,8 @@ static int wm8998_codec_remove(struct snd_soc_codec *codec)
1324 1324
1325 priv->core.arizona->dapm = NULL; 1325 priv->core.arizona->dapm = NULL;
1326 1326
1327 arizona_free_spk(codec);
1328
1327 return 0; 1329 return 0;
1328} 1330}
1329 1331
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index b3e6c2300457..1120f4f4d011 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -163,7 +163,6 @@ config SND_SOC_INTEL_SKYLAKE
163 tristate 163 tristate
164 select SND_HDA_EXT_CORE 164 select SND_HDA_EXT_CORE
165 select SND_SOC_TOPOLOGY 165 select SND_SOC_TOPOLOGY
166 select SND_HDA_I915
167 select SND_SOC_INTEL_SST 166 select SND_SOC_INTEL_SST
168 167
169config SND_SOC_INTEL_SKL_RT286_MACH 168config SND_SOC_INTEL_SKL_RT286_MACH
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index ac60f1301e21..91565229d074 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -1345,7 +1345,7 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
1345 return 0; 1345 return 0;
1346 1346
1347 /* wait for pause to complete before we reset the stream */ 1347 /* wait for pause to complete before we reset the stream */
1348 while (stream->running && tries--) 1348 while (stream->running && --tries)
1349 msleep(1); 1349 msleep(1);
1350 if (!tries) { 1350 if (!tries) {
1351 dev_err(hsw->dev, "error: reset stream %d still running\n", 1351 dev_err(hsw->dev, "error: reset stream %d still running\n",
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c
index a5267e8a96e0..2962ef22fc84 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.c
+++ b/sound/soc/intel/skylake/skl-sst-dsp.c
@@ -336,6 +336,11 @@ void skl_dsp_free(struct sst_dsp *dsp)
336 skl_ipc_int_disable(dsp); 336 skl_ipc_int_disable(dsp);
337 337
338 free_irq(dsp->irq, dsp); 338 free_irq(dsp->irq, dsp);
339 dsp->cl_dev.ops.cl_cleanup_controller(dsp);
340 skl_cldma_int_disable(dsp);
341 skl_ipc_op_int_disable(dsp);
342 skl_ipc_int_disable(dsp);
343
339 skl_dsp_disable_core(dsp); 344 skl_dsp_disable_core(dsp);
340} 345}
341EXPORT_SYMBOL_GPL(skl_dsp_free); 346EXPORT_SYMBOL_GPL(skl_dsp_free);
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 545b4e77b8aa..cdb78b7e5a14 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -239,6 +239,7 @@ static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
239{ 239{
240 int multiplier = 1; 240 int multiplier = 1;
241 struct skl_module_fmt *in_fmt, *out_fmt; 241 struct skl_module_fmt *in_fmt, *out_fmt;
242 int in_rate, out_rate;
242 243
243 244
244 /* Since fixups is applied to pin 0 only, ibs, obs needs 245 /* Since fixups is applied to pin 0 only, ibs, obs needs
@@ -249,15 +250,24 @@ static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
249 250
250 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT) 251 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
251 multiplier = 5; 252 multiplier = 5;
252 mcfg->ibs = (in_fmt->s_freq / 1000) * 253
253 (mcfg->in_fmt->channels) * 254 if (in_fmt->s_freq % 1000)
254 (mcfg->in_fmt->bit_depth >> 3) * 255 in_rate = (in_fmt->s_freq / 1000) + 1;
255 multiplier; 256 else
256 257 in_rate = (in_fmt->s_freq / 1000);
257 mcfg->obs = (mcfg->out_fmt->s_freq / 1000) * 258
258 (mcfg->out_fmt->channels) * 259 mcfg->ibs = in_rate * (mcfg->in_fmt->channels) *
259 (mcfg->out_fmt->bit_depth >> 3) * 260 (mcfg->in_fmt->bit_depth >> 3) *
260 multiplier; 261 multiplier;
262
263 if (mcfg->out_fmt->s_freq % 1000)
264 out_rate = (mcfg->out_fmt->s_freq / 1000) + 1;
265 else
266 out_rate = (mcfg->out_fmt->s_freq / 1000);
267
268 mcfg->obs = out_rate * (mcfg->out_fmt->channels) *
269 (mcfg->out_fmt->bit_depth >> 3) *
270 multiplier;
261} 271}
262 272
263static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w, 273static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
@@ -485,11 +495,15 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
485 if (!skl_is_pipe_mcps_avail(skl, mconfig)) 495 if (!skl_is_pipe_mcps_avail(skl, mconfig))
486 return -ENOMEM; 496 return -ENOMEM;
487 497
498 skl_tplg_alloc_pipe_mcps(skl, mconfig);
499
488 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) { 500 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
489 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp, 501 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
490 mconfig->id.module_id, mconfig->guid); 502 mconfig->id.module_id, mconfig->guid);
491 if (ret < 0) 503 if (ret < 0)
492 return ret; 504 return ret;
505
506 mconfig->m_state = SKL_MODULE_LOADED;
493 } 507 }
494 508
495 /* update blob if blob is null for be with default value */ 509 /* update blob if blob is null for be with default value */
@@ -509,7 +523,6 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
509 ret = skl_tplg_set_module_params(w, ctx); 523 ret = skl_tplg_set_module_params(w, ctx);
510 if (ret < 0) 524 if (ret < 0)
511 return ret; 525 return ret;
512 skl_tplg_alloc_pipe_mcps(skl, mconfig);
513 } 526 }
514 527
515 return 0; 528 return 0;
@@ -524,7 +537,8 @@ static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
524 list_for_each_entry(w_module, &pipe->w_list, node) { 537 list_for_each_entry(w_module, &pipe->w_list, node) {
525 mconfig = w_module->w->priv; 538 mconfig = w_module->w->priv;
526 539
527 if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod) 540 if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
541 mconfig->m_state > SKL_MODULE_UNINIT)
528 return ctx->dsp->fw_ops.unload_mod(ctx->dsp, 542 return ctx->dsp->fw_ops.unload_mod(ctx->dsp,
529 mconfig->id.module_id); 543 mconfig->id.module_id);
530 } 544 }
@@ -558,6 +572,9 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
558 if (!skl_is_pipe_mem_avail(skl, mconfig)) 572 if (!skl_is_pipe_mem_avail(skl, mconfig))
559 return -ENOMEM; 573 return -ENOMEM;
560 574
575 skl_tplg_alloc_pipe_mem(skl, mconfig);
576 skl_tplg_alloc_pipe_mcps(skl, mconfig);
577
561 /* 578 /*
562 * Create a list of modules for pipe. 579 * Create a list of modules for pipe.
563 * This list contains modules from source to sink 580 * This list contains modules from source to sink
@@ -601,9 +618,6 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
601 src_module = dst_module; 618 src_module = dst_module;
602 } 619 }
603 620
604 skl_tplg_alloc_pipe_mem(skl, mconfig);
605 skl_tplg_alloc_pipe_mcps(skl, mconfig);
606
607 return 0; 621 return 0;
608} 622}
609 623
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
index de3c401284d9..d2d923002d5c 100644
--- a/sound/soc/intel/skylake/skl-topology.h
+++ b/sound/soc/intel/skylake/skl-topology.h
@@ -274,10 +274,10 @@ struct skl_pipe {
274 274
275enum skl_module_state { 275enum skl_module_state {
276 SKL_MODULE_UNINIT = 0, 276 SKL_MODULE_UNINIT = 0,
277 SKL_MODULE_INIT_DONE = 1, 277 SKL_MODULE_LOADED = 1,
278 SKL_MODULE_LOADED = 2, 278 SKL_MODULE_INIT_DONE = 2,
279 SKL_MODULE_UNLOADED = 3, 279 SKL_MODULE_BIND_DONE = 3,
280 SKL_MODULE_BIND_DONE = 4 280 SKL_MODULE_UNLOADED = 4,
281}; 281};
282 282
283struct skl_module_cfg { 283struct skl_module_cfg {
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index ab5e25aaeee3..3982f5536f2d 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -222,6 +222,7 @@ static int skl_suspend(struct device *dev)
222 struct hdac_ext_bus *ebus = pci_get_drvdata(pci); 222 struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
223 struct skl *skl = ebus_to_skl(ebus); 223 struct skl *skl = ebus_to_skl(ebus);
224 struct hdac_bus *bus = ebus_to_hbus(ebus); 224 struct hdac_bus *bus = ebus_to_hbus(ebus);
225 int ret = 0;
225 226
226 /* 227 /*
227 * Do not suspend if streams which are marked ignore suspend are 228 * Do not suspend if streams which are marked ignore suspend are
@@ -232,10 +233,20 @@ static int skl_suspend(struct device *dev)
232 enable_irq_wake(bus->irq); 233 enable_irq_wake(bus->irq);
233 pci_save_state(pci); 234 pci_save_state(pci);
234 pci_disable_device(pci); 235 pci_disable_device(pci);
235 return 0;
236 } else { 236 } else {
237 return _skl_suspend(ebus); 237 ret = _skl_suspend(ebus);
238 if (ret < 0)
239 return ret;
240 }
241
242 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
243 ret = snd_hdac_display_power(bus, false);
244 if (ret < 0)
245 dev_err(bus->dev,
246 "Cannot turn OFF display power on i915\n");
238 } 247 }
248
249 return ret;
239} 250}
240 251
241static int skl_resume(struct device *dev) 252static int skl_resume(struct device *dev)
@@ -316,17 +327,20 @@ static int skl_free(struct hdac_ext_bus *ebus)
316 327
317 if (bus->irq >= 0) 328 if (bus->irq >= 0)
318 free_irq(bus->irq, (void *)bus); 329 free_irq(bus->irq, (void *)bus);
319 if (bus->remap_addr)
320 iounmap(bus->remap_addr);
321
322 snd_hdac_bus_free_stream_pages(bus); 330 snd_hdac_bus_free_stream_pages(bus);
323 snd_hdac_stream_free_all(ebus); 331 snd_hdac_stream_free_all(ebus);
324 snd_hdac_link_free_all(ebus); 332 snd_hdac_link_free_all(ebus);
333
334 if (bus->remap_addr)
335 iounmap(bus->remap_addr);
336
325 pci_release_regions(skl->pci); 337 pci_release_regions(skl->pci);
326 pci_disable_device(skl->pci); 338 pci_disable_device(skl->pci);
327 339
328 snd_hdac_ext_bus_exit(ebus); 340 snd_hdac_ext_bus_exit(ebus);
329 341
342 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
343 snd_hdac_i915_exit(&ebus->bus);
330 return 0; 344 return 0;
331} 345}
332 346
@@ -719,12 +733,12 @@ static void skl_remove(struct pci_dev *pci)
719 if (skl->tplg) 733 if (skl->tplg)
720 release_firmware(skl->tplg); 734 release_firmware(skl->tplg);
721 735
722 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
723 snd_hdac_i915_exit(&ebus->bus);
724
725 if (pci_dev_run_wake(pci)) 736 if (pci_dev_run_wake(pci))
726 pm_runtime_get_noresume(&pci->dev); 737 pm_runtime_get_noresume(&pci->dev);
727 pci_dev_put(pci); 738
739 /* codec removal, invoke bus_device_remove */
740 snd_hdac_ext_bus_device_remove(ebus);
741
728 skl_platform_unregister(&pci->dev); 742 skl_platform_unregister(&pci->dev);
729 skl_free_dsp(skl); 743 skl_free_dsp(skl);
730 skl_machine_device_unregister(skl); 744 skl_machine_device_unregister(skl);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 801ae1a81dfd..c4464858bf01 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2188,6 +2188,13 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
2188 int count = 0; 2188 int count = 0;
2189 char *state = "not set"; 2189 char *state = "not set";
2190 2190
2191 /* card won't be set for the dummy component, as a spot fix
2192 * we're checking for that case specifically here but in future
2193 * we will ensure that the dummy component looks like others.
2194 */
2195 if (!cmpnt->card)
2196 return 0;
2197
2191 list_for_each_entry(w, &cmpnt->card->widgets, list) { 2198 list_for_each_entry(w, &cmpnt->card->widgets, list) {
2192 if (w->dapm != dapm) 2199 if (w->dapm != dapm)
2193 continue; 2200 continue;