aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-02-05 17:33:28 -0500
committerDavid S. Miller <davem@davemloft.net>2015-02-05 17:33:28 -0500
commit6e03f896b52cd2ca88942170c5c9c407ec0ede69 (patch)
tree48ca9a6efa5f99819667538838bab3679416f92c
parentdb79a621835ee91d3e10177abd97f48e0a4dcf9b (diff)
parent9d82f5eb3376cbae96ad36a063a9390de1694546 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/vxlan.c drivers/vhost/net.c include/linux/if_vlan.h net/core/dev.c The net/core/dev.c conflict was the overlap of one commit marking an existing function static whilst another was adding a new function. In the include/linux/if_vlan.h case, the type used for a local variable was changed in 'net', whereas the function got rewritten to fix a stacked vlan bug in 'net-next'. In drivers/vhost/net.c, Al Viro's iov_iter conversions in 'net-next' overlapped with an endainness fix for VHOST 1.0 in 'net'. In drivers/net/vxlan.c, vxlan_find_vni() added a 'flags' parameter in 'net-next' whereas in 'net' there was a bug fix to pass in the correct network namespace pointer in calls to this function. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-st.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/trivial-devices.txt1
-rw-r--r--Documentation/networking/netlink_mmap.txt13
-rw-r--r--MAINTAINERS11
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/mm/fault.c2
-rw-r--r--arch/arc/mm/fault.c2
-rw-r--r--arch/arm/boot/compressed/head.S39
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi20
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts6
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi8
-rw-r--r--arch/arm/boot/dts/sun5i-a13-hsg-h702.dts4
-rw-r--r--arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts4
-rw-r--r--arch/arm/boot/dts/sun5i-a13-olinuxino.dts4
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi9
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi6
-rw-r--r--arch/arm/boot/dts/sun7i-a20-bananapi.dts6
-rw-r--r--arch/arm/boot/dts/sun7i-a20-hummingbird.dts8
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts3
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi8
-rw-r--r--arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts4
-rw-r--r--arch/arm/boot/dts/sun8i-a23.dtsi9
-rw-r--r--arch/arm/boot/dts/sun9i-a80-optimus.dts5
-rw-r--r--arch/arm/boot/dts/sun9i-a80.dtsi10
-rw-r--r--arch/arm/include/asm/kvm_emulate.h10
-rw-r--r--arch/arm/include/asm/kvm_host.h3
-rw-r--r--arch/arm/include/asm/kvm_mmu.h77
-rw-r--r--arch/arm/kernel/entry-v7m.S2
-rw-r--r--arch/arm/kvm/arm.c10
-rw-r--r--arch/arm/kvm/coproc.c70
-rw-r--r--arch/arm/kvm/coproc.h6
-rw-r--r--arch/arm/kvm/coproc_a15.c2
-rw-r--r--arch/arm/kvm/coproc_a7.c2
-rw-r--r--arch/arm/kvm/mmu.c164
-rw-r--r--arch/arm/kvm/trace.h39
-rw-r--r--arch/arm/mach-mvebu/coherency.c7
-rw-r--r--arch/arm/mach-shmobile/board-ape6evm.c20
-rw-r--r--arch/arm/mach-shmobile/board-lager.c13
-rw-r--r--arch/arm/mach-shmobile/setup-rcar-gen2.c2
-rw-r--r--arch/arm/mach-shmobile/timer.c12
-rw-r--r--arch/arm/mm/Kconfig1
-rw-r--r--arch/arm/mm/context.c26
-rw-r--r--arch/arm/mm/dma-mapping.c56
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h10
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h34
-rw-r--r--arch/arm64/kvm/sys_regs.c75
-rw-r--r--arch/avr32/mm/fault.c2
-rw-r--r--arch/cris/mm/fault.c2
-rw-r--r--arch/frv/mm/fault.c2
-rw-r--r--arch/ia64/mm/fault.c2
-rw-r--r--arch/m32r/mm/fault.c2
-rw-r--r--arch/m68k/mm/fault.c2
-rw-r--r--arch/metag/mm/fault.c2
-rw-r--r--arch/microblaze/mm/fault.c2
-rw-r--r--arch/mips/mm/fault.c2
-rw-r--r--arch/mn10300/mm/fault.c2
-rw-r--r--arch/nios2/mm/fault.c2
-rw-r--r--arch/openrisc/mm/fault.c2
-rw-r--r--arch/parisc/mm/fault.c2
-rw-r--r--arch/powerpc/mm/copro_fault.c2
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/s390/mm/fault.c6
-rw-r--r--arch/score/mm/fault.c2
-rw-r--r--arch/sh/mm/fault.c2
-rw-r--r--arch/sparc/mm/fault_32.c2
-rw-r--r--arch/sparc/mm/fault_64.c2
-rw-r--r--arch/tile/mm/fault.c2
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h18
-rw-r--r--arch/x86/kvm/lapic.c3
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/pci/common.c16
-rw-r--r--arch/xtensa/mm/fault.c2
-rw-r--r--block/blk-mq-sysfs.c25
-rw-r--r--block/blk-mq.c23
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/acpi/acpi_lpss.c35
-rw-r--r--drivers/block/rbd.c25
-rw-r--r--drivers/gpio/gpio-mcp23s08.c17
-rw-r--r--drivers/gpio/gpio-omap.c39
-rw-r--r--drivers/gpio/gpiolib-sysfs.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c78
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c18
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c9
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c30
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c52
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c14
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c26
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c1
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c1
-rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/r300.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c54
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c12
-rw-r--r--drivers/gpu/drm/radeon/rs400.c14
-rw-r--r--drivers/gpu/drm/radeon/rs600.c14
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c23
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c12
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c4
-rw-r--r--drivers/infiniband/core/uverbs_main.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c18
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c27
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c49
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c239
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c22
-rw-r--r--drivers/input/mouse/elantech.c16
-rw-r--r--drivers/input/mouse/synaptics.c7
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h8
-rw-r--r--drivers/iommu/tegra-gart.c3
-rw-r--r--drivers/isdn/hardware/eicon/message.c2
-rw-r--r--drivers/md/bitmap.c13
-rw-r--r--drivers/md/dm-cache-metadata.c9
-rw-r--r--drivers/md/dm-thin.c6
-rw-r--r--drivers/md/raid5.c5
-rw-r--r--drivers/net/caif/caif_hsi.c1
-rw-r--r--drivers/net/ethernet/amd/Kconfig4
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c27
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c26
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c1
-rw-r--r--drivers/net/hyperv/netvsc.c11
-rw-r--r--drivers/net/macvtap.c16
-rw-r--r--drivers/net/ppp/ppp_deflate.c2
-rw-r--r--drivers/net/tun.c25
-rw-r--r--drivers/net/usb/sr9700.c36
-rw-r--r--drivers/net/usb/sr9700.h66
-rw-r--r--drivers/net/virtio_net.c24
-rw-r--r--drivers/net/vxlan.c10
-rw-r--r--drivers/net/wan/Kconfig6
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netback/netback.c3
-rw-r--r--drivers/pci/host/pcie-designware.c3
-rw-r--r--drivers/pci/quirks.c40
-rw-r--r--drivers/pinctrl/pinctrl-at91.c108
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c3
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/spi/spi-fsl-dspi.c14
-rw-r--r--drivers/spi/spi-imx.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c2
-rw-r--r--drivers/staging/nvec/nvec.c9
-rw-r--r--drivers/usb/core/otg_whitelist.h5
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc2/core_intr.c6
-rw-r--r--drivers/usb/phy/phy.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h9
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/vhost/net.c14
-rw-r--r--fs/btrfs/scrub.c2
-rw-r--r--fs/cifs/cifs_debug.c6
-rw-r--r--fs/cifs/file.c6
-rw-r--r--fs/cifs/smbencrypt.c2
-rw-r--r--fs/gfs2/quota.c49
-rw-r--r--fs/nfs/direct.c6
-rw-r--r--fs/nfs/inode.c5
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/nfs4client.c2
-rw-r--r--fs/quota/dquot.c83
-rw-r--r--fs/quota/quota.c162
-rw-r--r--fs/udf/file.c2
-rw-r--r--fs/xfs/xfs_qm.h4
-rw-r--r--fs/xfs/xfs_qm_syscalls.c156
-rw-r--r--fs/xfs/xfs_quotaops.c8
-rw-r--r--include/linux/i2c.h6
-rw-r--r--include/linux/if_vlan.h60
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/mlx4/device.h2
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/perf_event.h6
-rw-r--r--include/linux/quota.h47
-rw-r--r--include/linux/quotaops.h4
-rw-r--r--include/net/flow_keys.h6
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ipv6.h7
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/sch_generic.h13
-rw-r--r--include/net/tcp.h4
-rw-r--r--kernel/events/core.c15
-rw-r--r--kernel/sched/core.c5
-rw-r--r--lib/checksum.c12
-rw-r--r--mm/gup.c4
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/memory.c2
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c29
-rw-r--r--net/caif/chnl_net.c1
-rw-r--r--net/core/dev.c37
-rw-r--r--net/core/rtnetlink.c6
-rw-r--r--net/ipv4/ip_output.c29
-rw-r--r--net/ipv4/route.c3
-rw-r--r--net/ipv4/tcp_bic.c2
-rw-r--r--net/ipv4/tcp_cong.c32
-rw-r--r--net/ipv4/tcp_cubic.c39
-rw-r--r--net/ipv4/tcp_ipv4.c37
-rw-r--r--net/ipv4/tcp_scalable.c3
-rw-r--r--net/ipv4/tcp_veno.c2
-rw-r--r--net/ipv4/tcp_yeah.c2
-rw-r--r--net/ipv6/ip6_gre.c4
-rw-r--r--net/ipv6/ip6_output.c14
-rw-r--r--net/ipv6/output_core.c41
-rw-r--r--net/ipv6/sit.c8
-rw-r--r--net/ipv6/udp_offload.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c33
-rw-r--r--net/netfilter/nf_tables_api.c28
-rw-r--r--net/netfilter/nft_masq.c26
-rw-r--r--net/netfilter/nft_nat.c40
-rw-r--r--net/netfilter/nft_redir.c25
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/rds/sysctl.c4
-rw-r--r--net/sched/cls_api.c7
-rw-r--r--net/sched/sch_fq.c10
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--sound/core/seq/seq_dummy.c31
-rw-r--r--sound/soc/adi/axi-i2s.c2
-rw-r--r--sound/soc/codecs/pcm512x.c2
-rw-r--r--sound/soc/codecs/rt286.c6
-rw-r--r--sound/soc/codecs/rt5677.c18
-rw-r--r--sound/soc/codecs/ts3a227e.c6
-rw-r--r--sound/soc/codecs/wm8904.c23
-rw-r--r--sound/soc/codecs/wm8960.c2
-rw-r--r--sound/soc/fsl/fsl_esai.h2
-rw-r--r--sound/soc/fsl/fsl_ssi.c4
-rw-r--r--sound/soc/fsl/imx-wm8962.c1
-rw-r--r--sound/soc/generic/simple-card.c7
-rw-r--r--sound/soc/intel/sst-firmware.c13
-rw-r--r--sound/soc/intel/sst-haswell-ipc.c30
-rw-r--r--sound/soc/omap/omap-mcbsp.c2
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c1
-rw-r--r--sound/soc/soc-compress.c9
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Context.c5
-rw-r--r--tools/perf/util/annotate.c18
-rw-r--r--tools/perf/util/evlist.c2
-rw-r--r--tools/perf/util/map.h16
-rw-r--r--tools/perf/util/probe-event.c34
-rw-r--r--tools/perf/util/symbol.c31
-rw-r--r--tools/perf/util/symbol.h1
280 files changed, 2574 insertions, 1653 deletions
diff --git a/Documentation/devicetree/bindings/i2c/i2c-st.txt b/Documentation/devicetree/bindings/i2c/i2c-st.txt
index 437e0db3823c..4c26fda3844a 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-st.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-st.txt
@@ -31,7 +31,7 @@ i2c0: i2c@fed40000 {
31 compatible = "st,comms-ssc4-i2c"; 31 compatible = "st,comms-ssc4-i2c";
32 reg = <0xfed40000 0x110>; 32 reg = <0xfed40000 0x110>;
33 interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>; 33 interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
34 clocks = <&CLK_S_ICN_REG_0>; 34 clocks = <&clk_s_a0_ls CLK_ICN_REG>;
35 clock-names = "ssc"; 35 clock-names = "ssc";
36 clock-frequency = <400000>; 36 clock-frequency = <400000>;
37 pinctrl-names = "default"; 37 pinctrl-names = "default";
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index 9f4e3824e71e..9f41d05be3be 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -47,6 +47,7 @@ dallas,ds3232 Extremely Accurate I²C RTC with Integrated Crystal and SRAM
47dallas,ds4510 CPU Supervisor with Nonvolatile Memory and Programmable I/O 47dallas,ds4510 CPU Supervisor with Nonvolatile Memory and Programmable I/O
48dallas,ds75 Digital Thermometer and Thermostat 48dallas,ds75 Digital Thermometer and Thermostat
49dlg,da9053 DA9053: flexible system level PMIC with multicore support 49dlg,da9053 DA9053: flexible system level PMIC with multicore support
50dlg,da9063 DA9063: system PMIC for quad-core application processors
50epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE 51epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE
51epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE 52epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE
52fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer 53fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer
diff --git a/Documentation/networking/netlink_mmap.txt b/Documentation/networking/netlink_mmap.txt
index c6af4bac5aa8..54f10478e8e3 100644
--- a/Documentation/networking/netlink_mmap.txt
+++ b/Documentation/networking/netlink_mmap.txt
@@ -199,16 +199,9 @@ frame header.
199TX limitations 199TX limitations
200-------------- 200--------------
201 201
202Kernel processing usually involves validation of the message received by 202As of Jan 2015 the message is always copied from the ring frame to an
203user-space, then processing its contents. The kernel must assure that 203allocated buffer due to unresolved security concerns.
204userspace is not able to modify the message contents after they have been 204See commit 4682a0358639b29cf ("netlink: Always copy on mmap TX.").
205validated. In order to do so, the message is copied from the ring frame
206to an allocated buffer if either of these conditions is false:
207
208- only a single mapping of the ring exists
209- the file descriptor is not shared between processes
210
211This means that for threaded programs, the kernel will fall back to copying.
212 205
213Example 206Example
214------- 207-------
diff --git a/MAINTAINERS b/MAINTAINERS
index 3c3bf861d78d..2b3aca7e40b9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -708,6 +708,16 @@ X: drivers/iio/*/adjd*
708F: drivers/staging/iio/*/ad* 708F: drivers/staging/iio/*/ad*
709F: staging/iio/trigger/iio-trig-bfin-timer.c 709F: staging/iio/trigger/iio-trig-bfin-timer.c
710 710
711ANDROID DRIVERS
712M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
713M: Arve Hjønnevåg <arve@android.com>
714M: Riley Andrews <riandrews@android.com>
715T: git git://git.kernel.org/pub/scm/linux/kernel/gregkh/staging.git
716L: devel@driverdev.osuosl.org
717S: Supported
718F: drivers/android/
719F: drivers/staging/android/
720
711AOA (Apple Onboard Audio) ALSA DRIVER 721AOA (Apple Onboard Audio) ALSA DRIVER
712M: Johannes Berg <johannes@sipsolutions.net> 722M: Johannes Berg <johannes@sipsolutions.net>
713L: linuxppc-dev@lists.ozlabs.org 723L: linuxppc-dev@lists.ozlabs.org
@@ -10181,6 +10191,7 @@ USERSPACE I/O (UIO)
10181M: "Hans J. Koch" <hjk@hansjkoch.de> 10191M: "Hans J. Koch" <hjk@hansjkoch.de>
10182M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 10192M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
10183S: Maintained 10193S: Maintained
10194T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
10184F: Documentation/DocBook/uio-howto.tmpl 10195F: Documentation/DocBook/uio-howto.tmpl
10185F: drivers/uio/ 10196F: drivers/uio/
10186F: include/linux/uio*.h 10197F: include/linux/uio*.h
diff --git a/Makefile b/Makefile
index 95a0e827ecd3..c8e17c05f916 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 19 2PATCHLEVEL = 19
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc6 4EXTRAVERSION = -rc7
5NAME = Diseased Newt 5NAME = Diseased Newt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 98838a05ba6d..9d0ac091a52a 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -156,6 +156,8 @@ retry:
156 if (unlikely(fault & VM_FAULT_ERROR)) { 156 if (unlikely(fault & VM_FAULT_ERROR)) {
157 if (fault & VM_FAULT_OOM) 157 if (fault & VM_FAULT_OOM)
158 goto out_of_memory; 158 goto out_of_memory;
159 else if (fault & VM_FAULT_SIGSEGV)
160 goto bad_area;
159 else if (fault & VM_FAULT_SIGBUS) 161 else if (fault & VM_FAULT_SIGBUS)
160 goto do_sigbus; 162 goto do_sigbus;
161 BUG(); 163 BUG();
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 6f7e3a68803a..563cb27e37f5 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -161,6 +161,8 @@ good_area:
161 161
162 if (fault & VM_FAULT_OOM) 162 if (fault & VM_FAULT_OOM)
163 goto out_of_memory; 163 goto out_of_memory;
164 else if (fault & VM_FAULT_SIGSEGV)
165 goto bad_area;
164 else if (fault & VM_FAULT_SIGBUS) 166 else if (fault & VM_FAULT_SIGBUS)
165 goto do_sigbus; 167 goto do_sigbus;
166 168
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 68be9017593d..132c70e2d2f1 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -263,16 +263,37 @@ restart: adr r0, LC0
263 * OK... Let's do some funky business here. 263 * OK... Let's do some funky business here.
264 * If we do have a DTB appended to zImage, and we do have 264 * If we do have a DTB appended to zImage, and we do have
265 * an ATAG list around, we want the later to be translated 265 * an ATAG list around, we want the later to be translated
266 * and folded into the former here. To be on the safe side, 266 * and folded into the former here. No GOT fixup has occurred
267 * let's temporarily move the stack away into the malloc 267 * yet, but none of the code we're about to call uses any
268 * area. No GOT fixup has occurred yet, but none of the 268 * global variable.
269 * code we're about to call uses any global variable.
270 */ 269 */
271 add sp, sp, #0x10000 270
271 /* Get the initial DTB size */
272 ldr r5, [r6, #4]
273#ifndef __ARMEB__
274 /* convert to little endian */
275 eor r1, r5, r5, ror #16
276 bic r1, r1, #0x00ff0000
277 mov r5, r5, ror #8
278 eor r5, r5, r1, lsr #8
279#endif
280 /* 50% DTB growth should be good enough */
281 add r5, r5, r5, lsr #1
282 /* preserve 64-bit alignment */
283 add r5, r5, #7
284 bic r5, r5, #7
285 /* clamp to 32KB min and 1MB max */
286 cmp r5, #(1 << 15)
287 movlo r5, #(1 << 15)
288 cmp r5, #(1 << 20)
289 movhi r5, #(1 << 20)
290 /* temporarily relocate the stack past the DTB work space */
291 add sp, sp, r5
292
272 stmfd sp!, {r0-r3, ip, lr} 293 stmfd sp!, {r0-r3, ip, lr}
273 mov r0, r8 294 mov r0, r8
274 mov r1, r6 295 mov r1, r6
275 sub r2, sp, r6 296 mov r2, r5
276 bl atags_to_fdt 297 bl atags_to_fdt
277 298
278 /* 299 /*
@@ -285,11 +306,11 @@ restart: adr r0, LC0
285 bic r0, r0, #1 306 bic r0, r0, #1
286 add r0, r0, #0x100 307 add r0, r0, #0x100
287 mov r1, r6 308 mov r1, r6
288 sub r2, sp, r6 309 mov r2, r5
289 bleq atags_to_fdt 310 bleq atags_to_fdt
290 311
291 ldmfd sp!, {r0-r3, ip, lr} 312 ldmfd sp!, {r0-r3, ip, lr}
292 sub sp, sp, #0x10000 313 sub sp, sp, r5
293#endif 314#endif
294 315
295 mov r8, r6 @ use the appended device tree 316 mov r8, r6 @ use the appended device tree
@@ -306,7 +327,7 @@ restart: adr r0, LC0
306 subs r1, r5, r1 327 subs r1, r5, r1
307 addhi r9, r9, r1 328 addhi r9, r9, r1
308 329
309 /* Get the dtb's size */ 330 /* Get the current DTB size */
310 ldr r5, [r6, #4] 331 ldr r5, [r6, #4]
311#ifndef __ARMEB__ 332#ifndef __ARMEB__
312 /* convert r5 (dtb size) to little endian */ 333 /* convert r5 (dtb size) to little endian */
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 7b4099fcf817..d5c4669224b1 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -17,14 +17,6 @@
17 17
18 aliases { 18 aliases {
19 ethernet0 = &emac; 19 ethernet0 = &emac;
20 serial0 = &uart0;
21 serial1 = &uart1;
22 serial2 = &uart2;
23 serial3 = &uart3;
24 serial4 = &uart4;
25 serial5 = &uart5;
26 serial6 = &uart6;
27 serial7 = &uart7;
28 }; 20 };
29 21
30 chosen { 22 chosen {
@@ -39,6 +31,14 @@
39 <&ahb_gates 44>; 31 <&ahb_gates 44>;
40 status = "disabled"; 32 status = "disabled";
41 }; 33 };
34
35 framebuffer@1 {
36 compatible = "allwinner,simple-framebuffer", "simple-framebuffer";
37 allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
38 clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
39 <&ahb_gates 44>, <&ahb_gates 46>;
40 status = "disabled";
41 };
42 }; 42 };
43 43
44 cpus { 44 cpus {
@@ -438,8 +438,8 @@
438 reg-names = "phy_ctrl", "pmu1", "pmu2"; 438 reg-names = "phy_ctrl", "pmu1", "pmu2";
439 clocks = <&usb_clk 8>; 439 clocks = <&usb_clk 8>;
440 clock-names = "usb_phy"; 440 clock-names = "usb_phy";
441 resets = <&usb_clk 1>, <&usb_clk 2>; 441 resets = <&usb_clk 0>, <&usb_clk 1>, <&usb_clk 2>;
442 reset-names = "usb1_reset", "usb2_reset"; 442 reset-names = "usb0_reset", "usb1_reset", "usb2_reset";
443 status = "disabled"; 443 status = "disabled";
444 }; 444 };
445 445
diff --git a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
index fe3c559ca6a8..bfa742817690 100644
--- a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
@@ -55,6 +55,12 @@
55 model = "Olimex A10s-Olinuxino Micro"; 55 model = "Olimex A10s-Olinuxino Micro";
56 compatible = "olimex,a10s-olinuxino-micro", "allwinner,sun5i-a10s"; 56 compatible = "olimex,a10s-olinuxino-micro", "allwinner,sun5i-a10s";
57 57
58 aliases {
59 serial0 = &uart0;
60 serial1 = &uart2;
61 serial2 = &uart3;
62 };
63
58 soc@01c00000 { 64 soc@01c00000 {
59 emac: ethernet@01c0b000 { 65 emac: ethernet@01c0b000 {
60 pinctrl-names = "default"; 66 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index 1b76667f3182..2e7d8263799d 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -18,10 +18,6 @@
18 18
19 aliases { 19 aliases {
20 ethernet0 = &emac; 20 ethernet0 = &emac;
21 serial0 = &uart0;
22 serial1 = &uart1;
23 serial2 = &uart2;
24 serial3 = &uart3;
25 }; 21 };
26 22
27 chosen { 23 chosen {
@@ -390,8 +386,8 @@
390 reg-names = "phy_ctrl", "pmu1"; 386 reg-names = "phy_ctrl", "pmu1";
391 clocks = <&usb_clk 8>; 387 clocks = <&usb_clk 8>;
392 clock-names = "usb_phy"; 388 clock-names = "usb_phy";
393 resets = <&usb_clk 1>; 389 resets = <&usb_clk 0>, <&usb_clk 1>;
394 reset-names = "usb1_reset"; 390 reset-names = "usb0_reset", "usb1_reset";
395 status = "disabled"; 391 status = "disabled";
396 }; 392 };
397 393
diff --git a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
index eeed1f236ee8..c7be3abd9fcc 100644
--- a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
+++ b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
@@ -53,6 +53,10 @@
53 model = "HSG H702"; 53 model = "HSG H702";
54 compatible = "hsg,h702", "allwinner,sun5i-a13"; 54 compatible = "hsg,h702", "allwinner,sun5i-a13";
55 55
56 aliases {
57 serial0 = &uart1;
58 };
59
56 soc@01c00000 { 60 soc@01c00000 {
57 mmc0: mmc@01c0f000 { 61 mmc0: mmc@01c0f000 {
58 pinctrl-names = "default"; 62 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
index 916ee8bb826f..3decefb3c37a 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
@@ -54,6 +54,10 @@
54 model = "Olimex A13-Olinuxino Micro"; 54 model = "Olimex A13-Olinuxino Micro";
55 compatible = "olimex,a13-olinuxino-micro", "allwinner,sun5i-a13"; 55 compatible = "olimex,a13-olinuxino-micro", "allwinner,sun5i-a13";
56 56
57 aliases {
58 serial0 = &uart1;
59 };
60
57 soc@01c00000 { 61 soc@01c00000 {
58 mmc0: mmc@01c0f000 { 62 mmc0: mmc@01c0f000 {
59 pinctrl-names = "default"; 63 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
index e31d291d14cb..b421f7fa197b 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
@@ -55,6 +55,10 @@
55 model = "Olimex A13-Olinuxino"; 55 model = "Olimex A13-Olinuxino";
56 compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13"; 56 compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13";
57 57
58 aliases {
59 serial0 = &uart1;
60 };
61
58 soc@01c00000 { 62 soc@01c00000 {
59 mmc0: mmc@01c0f000 { 63 mmc0: mmc@01c0f000 {
60 pinctrl-names = "default"; 64 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index c35217ea1f64..c556688f8b8b 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -16,11 +16,6 @@
16/ { 16/ {
17 interrupt-parent = <&intc>; 17 interrupt-parent = <&intc>;
18 18
19 aliases {
20 serial0 = &uart1;
21 serial1 = &uart3;
22 };
23
24 cpus { 19 cpus {
25 #address-cells = <1>; 20 #address-cells = <1>;
26 #size-cells = <0>; 21 #size-cells = <0>;
@@ -349,8 +344,8 @@
349 reg-names = "phy_ctrl", "pmu1"; 344 reg-names = "phy_ctrl", "pmu1";
350 clocks = <&usb_clk 8>; 345 clocks = <&usb_clk 8>;
351 clock-names = "usb_phy"; 346 clock-names = "usb_phy";
352 resets = <&usb_clk 1>; 347 resets = <&usb_clk 0>, <&usb_clk 1>;
353 reset-names = "usb1_reset"; 348 reset-names = "usb0_reset", "usb1_reset";
354 status = "disabled"; 349 status = "disabled";
355 }; 350 };
356 351
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index f47156b6572b..1e7e7bcf8307 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -53,12 +53,6 @@
53 interrupt-parent = <&gic>; 53 interrupt-parent = <&gic>;
54 54
55 aliases { 55 aliases {
56 serial0 = &uart0;
57 serial1 = &uart1;
58 serial2 = &uart2;
59 serial3 = &uart3;
60 serial4 = &uart4;
61 serial5 = &uart5;
62 ethernet0 = &gmac; 56 ethernet0 = &gmac;
63 }; 57 };
64 58
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi.dts b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
index 1cf1214cc068..bd7b15add697 100644
--- a/arch/arm/boot/dts/sun7i-a20-bananapi.dts
+++ b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
@@ -55,6 +55,12 @@
55 model = "LeMaker Banana Pi"; 55 model = "LeMaker Banana Pi";
56 compatible = "lemaker,bananapi", "allwinner,sun7i-a20"; 56 compatible = "lemaker,bananapi", "allwinner,sun7i-a20";
57 57
58 aliases {
59 serial0 = &uart0;
60 serial1 = &uart3;
61 serial2 = &uart7;
62 };
63
58 soc@01c00000 { 64 soc@01c00000 {
59 spi0: spi@01c05000 { 65 spi0: spi@01c05000 {
60 pinctrl-names = "default"; 66 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun7i-a20-hummingbird.dts b/arch/arm/boot/dts/sun7i-a20-hummingbird.dts
index 0e4bfa3b2b85..0bcefcbbb756 100644
--- a/arch/arm/boot/dts/sun7i-a20-hummingbird.dts
+++ b/arch/arm/boot/dts/sun7i-a20-hummingbird.dts
@@ -19,6 +19,14 @@
19 model = "Merrii A20 Hummingbird"; 19 model = "Merrii A20 Hummingbird";
20 compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20"; 20 compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20";
21 21
22 aliases {
23 serial0 = &uart0;
24 serial1 = &uart2;
25 serial2 = &uart3;
26 serial3 = &uart4;
27 serial4 = &uart5;
28 };
29
22 soc@01c00000 { 30 soc@01c00000 {
23 mmc0: mmc@01c0f000 { 31 mmc0: mmc@01c0f000 {
24 pinctrl-names = "default"; 32 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
index 9d669cdf031d..66cc77707198 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
@@ -20,6 +20,9 @@
20 compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20"; 20 compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20";
21 21
22 aliases { 22 aliases {
23 serial0 = &uart0;
24 serial1 = &uart6;
25 serial2 = &uart7;
23 spi0 = &spi1; 26 spi0 = &spi1;
24 spi1 = &spi2; 27 spi1 = &spi2;
25 }; 28 };
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index e21ce5992d56..89749ce34a84 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -54,14 +54,6 @@
54 54
55 aliases { 55 aliases {
56 ethernet0 = &gmac; 56 ethernet0 = &gmac;
57 serial0 = &uart0;
58 serial1 = &uart1;
59 serial2 = &uart2;
60 serial3 = &uart3;
61 serial4 = &uart4;
62 serial5 = &uart5;
63 serial6 = &uart6;
64 serial7 = &uart7;
65 }; 57 };
66 58
67 chosen { 59 chosen {
diff --git a/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts b/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts
index 7f2117ce6985..32ad80804dbb 100644
--- a/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts
+++ b/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts
@@ -55,6 +55,10 @@
55 model = "Ippo Q8H Dual Core Tablet (v5)"; 55 model = "Ippo Q8H Dual Core Tablet (v5)";
56 compatible = "ippo,q8h-v5", "allwinner,sun8i-a23"; 56 compatible = "ippo,q8h-v5", "allwinner,sun8i-a23";
57 57
58 aliases {
59 serial0 = &r_uart;
60 };
61
58 chosen { 62 chosen {
59 bootargs = "earlyprintk console=ttyS0,115200"; 63 bootargs = "earlyprintk console=ttyS0,115200";
60 }; 64 };
diff --git a/arch/arm/boot/dts/sun8i-a23.dtsi b/arch/arm/boot/dts/sun8i-a23.dtsi
index 0746cd1024d7..86584fcf5e32 100644
--- a/arch/arm/boot/dts/sun8i-a23.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23.dtsi
@@ -52,15 +52,6 @@
52/ { 52/ {
53 interrupt-parent = <&gic>; 53 interrupt-parent = <&gic>;
54 54
55 aliases {
56 serial0 = &uart0;
57 serial1 = &uart1;
58 serial2 = &uart2;
59 serial3 = &uart3;
60 serial4 = &uart4;
61 serial5 = &r_uart;
62 };
63
64 cpus { 55 cpus {
65 #address-cells = <1>; 56 #address-cells = <1>;
66 #size-cells = <0>; 57 #size-cells = <0>;
diff --git a/arch/arm/boot/dts/sun9i-a80-optimus.dts b/arch/arm/boot/dts/sun9i-a80-optimus.dts
index 506948f582ee..11ec71072e81 100644
--- a/arch/arm/boot/dts/sun9i-a80-optimus.dts
+++ b/arch/arm/boot/dts/sun9i-a80-optimus.dts
@@ -54,6 +54,11 @@
54 model = "Merrii A80 Optimus Board"; 54 model = "Merrii A80 Optimus Board";
55 compatible = "merrii,a80-optimus", "allwinner,sun9i-a80"; 55 compatible = "merrii,a80-optimus", "allwinner,sun9i-a80";
56 56
57 aliases {
58 serial0 = &uart0;
59 serial1 = &uart4;
60 };
61
57 chosen { 62 chosen {
58 bootargs = "earlyprintk console=ttyS0,115200"; 63 bootargs = "earlyprintk console=ttyS0,115200";
59 }; 64 };
diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi
index 494714f67b57..9ef4438206a9 100644
--- a/arch/arm/boot/dts/sun9i-a80.dtsi
+++ b/arch/arm/boot/dts/sun9i-a80.dtsi
@@ -52,16 +52,6 @@
52/ { 52/ {
53 interrupt-parent = <&gic>; 53 interrupt-parent = <&gic>;
54 54
55 aliases {
56 serial0 = &uart0;
57 serial1 = &uart1;
58 serial2 = &uart2;
59 serial3 = &uart3;
60 serial4 = &uart4;
61 serial5 = &uart5;
62 serial6 = &r_uart;
63 };
64
65 cpus { 55 cpus {
66 #address-cells = <1>; 56 #address-cells = <1>;
67 #size-cells = <0>; 57 #size-cells = <0>;
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 66ce17655bb9..7b0152321b20 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -38,6 +38,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
38 vcpu->arch.hcr = HCR_GUEST_MASK; 38 vcpu->arch.hcr = HCR_GUEST_MASK;
39} 39}
40 40
41static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
42{
43 return vcpu->arch.hcr;
44}
45
46static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
47{
48 vcpu->arch.hcr = hcr;
49}
50
41static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) 51static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
42{ 52{
43 return 1; 53 return 1;
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 254e0650e48b..04b4ea0b550a 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -125,9 +125,6 @@ struct kvm_vcpu_arch {
125 * Anything that is not used directly from assembly code goes 125 * Anything that is not used directly from assembly code goes
126 * here. 126 * here.
127 */ 127 */
128 /* dcache set/way operation pending */
129 int last_pcpu;
130 cpumask_t require_dcache_flush;
131 128
132 /* Don't run the guest on this vcpu */ 129 /* Don't run the guest on this vcpu */
133 bool pause; 130 bool pause;
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 63e0ecc04901..1bca8f8af442 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -44,6 +44,7 @@
44 44
45#ifndef __ASSEMBLY__ 45#ifndef __ASSEMBLY__
46 46
47#include <linux/highmem.h>
47#include <asm/cacheflush.h> 48#include <asm/cacheflush.h>
48#include <asm/pgalloc.h> 49#include <asm/pgalloc.h>
49 50
@@ -161,13 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
161 return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; 162 return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
162} 163}
163 164
164static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, 165static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
165 unsigned long size, 166 unsigned long size,
166 bool ipa_uncached) 167 bool ipa_uncached)
167{ 168{
168 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
169 kvm_flush_dcache_to_poc((void *)hva, size);
170
171 /* 169 /*
172 * If we are going to insert an instruction page and the icache is 170 * If we are going to insert an instruction page and the icache is
173 * either VIPT or PIPT, there is a potential problem where the host 171 * either VIPT or PIPT, there is a potential problem where the host
@@ -179,18 +177,77 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
179 * 177 *
180 * VIVT caches are tagged using both the ASID and the VMID and doesn't 178 * VIVT caches are tagged using both the ASID and the VMID and doesn't
181 * need any kind of flushing (DDI 0406C.b - Page B3-1392). 179 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
180 *
181 * We need to do this through a kernel mapping (using the
182 * user-space mapping has proved to be the wrong
183 * solution). For that, we need to kmap one page at a time,
184 * and iterate over the range.
182 */ 185 */
183 if (icache_is_pipt()) { 186
184 __cpuc_coherent_user_range(hva, hva + size); 187 bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
185 } else if (!icache_is_vivt_asid_tagged()) { 188
189 VM_BUG_ON(size & PAGE_MASK);
190
191 if (!need_flush && !icache_is_pipt())
192 goto vipt_cache;
193
194 while (size) {
195 void *va = kmap_atomic_pfn(pfn);
196
197 if (need_flush)
198 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
199
200 if (icache_is_pipt())
201 __cpuc_coherent_user_range((unsigned long)va,
202 (unsigned long)va + PAGE_SIZE);
203
204 size -= PAGE_SIZE;
205 pfn++;
206
207 kunmap_atomic(va);
208 }
209
210vipt_cache:
211 if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
186 /* any kind of VIPT cache */ 212 /* any kind of VIPT cache */
187 __flush_icache_all(); 213 __flush_icache_all();
188 } 214 }
189} 215}
190 216
217static inline void __kvm_flush_dcache_pte(pte_t pte)
218{
219 void *va = kmap_atomic(pte_page(pte));
220
221 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
222
223 kunmap_atomic(va);
224}
225
226static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
227{
228 unsigned long size = PMD_SIZE;
229 pfn_t pfn = pmd_pfn(pmd);
230
231 while (size) {
232 void *va = kmap_atomic_pfn(pfn);
233
234 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
235
236 pfn++;
237 size -= PAGE_SIZE;
238
239 kunmap_atomic(va);
240 }
241}
242
243static inline void __kvm_flush_dcache_pud(pud_t pud)
244{
245}
246
191#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) 247#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
192 248
193void stage2_flush_vm(struct kvm *kvm); 249void kvm_set_way_flush(struct kvm_vcpu *vcpu);
250void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
194 251
195#endif /* !__ASSEMBLY__ */ 252#endif /* !__ASSEMBLY__ */
196 253
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
index 2260f1855820..8944f4991c3c 100644
--- a/arch/arm/kernel/entry-v7m.S
+++ b/arch/arm/kernel/entry-v7m.S
@@ -22,10 +22,12 @@
22 22
23__invalid_entry: 23__invalid_entry:
24 v7m_exception_entry 24 v7m_exception_entry
25#ifdef CONFIG_PRINTK
25 adr r0, strerr 26 adr r0, strerr
26 mrs r1, ipsr 27 mrs r1, ipsr
27 mov r2, lr 28 mov r2, lr
28 bl printk 29 bl printk
30#endif
29 mov r0, sp 31 mov r0, sp
30 bl show_regs 32 bl show_regs
311: b 1b 331: b 1b
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 2d6d91001062..0b0d58a905c4 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -281,15 +281,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
281 vcpu->cpu = cpu; 281 vcpu->cpu = cpu;
282 vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); 282 vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
283 283
284 /*
285 * Check whether this vcpu requires the cache to be flushed on
286 * this physical CPU. This is a consequence of doing dcache
287 * operations by set/way on this vcpu. We do it here to be in
288 * a non-preemptible section.
289 */
290 if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
291 flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
292
293 kvm_arm_set_running_vcpu(vcpu); 284 kvm_arm_set_running_vcpu(vcpu);
294} 285}
295 286
@@ -541,7 +532,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
541 ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); 532 ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
542 533
543 vcpu->mode = OUTSIDE_GUEST_MODE; 534 vcpu->mode = OUTSIDE_GUEST_MODE;
544 vcpu->arch.last_pcpu = smp_processor_id();
545 kvm_guest_exit(); 535 kvm_guest_exit();
546 trace_kvm_exit(*vcpu_pc(vcpu)); 536 trace_kvm_exit(*vcpu_pc(vcpu));
547 /* 537 /*
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 7928dbdf2102..f3d88dc388bc 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -189,82 +189,40 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
189 return true; 189 return true;
190} 190}
191 191
192/* See note at ARM ARM B1.14.4 */ 192/*
193 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
194 */
193static bool access_dcsw(struct kvm_vcpu *vcpu, 195static bool access_dcsw(struct kvm_vcpu *vcpu,
194 const struct coproc_params *p, 196 const struct coproc_params *p,
195 const struct coproc_reg *r) 197 const struct coproc_reg *r)
196{ 198{
197 unsigned long val;
198 int cpu;
199
200 if (!p->is_write) 199 if (!p->is_write)
201 return read_from_write_only(vcpu, p); 200 return read_from_write_only(vcpu, p);
202 201
203 cpu = get_cpu(); 202 kvm_set_way_flush(vcpu);
204
205 cpumask_setall(&vcpu->arch.require_dcache_flush);
206 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
207
208 /* If we were already preempted, take the long way around */
209 if (cpu != vcpu->arch.last_pcpu) {
210 flush_cache_all();
211 goto done;
212 }
213
214 val = *vcpu_reg(vcpu, p->Rt1);
215
216 switch (p->CRm) {
217 case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
218 case 14: /* DCCISW */
219 asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
220 break;
221
222 case 10: /* DCCSW */
223 asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
224 break;
225 }
226
227done:
228 put_cpu();
229
230 return true; 203 return true;
231} 204}
232 205
233/* 206/*
234 * Generic accessor for VM registers. Only called as long as HCR_TVM 207 * Generic accessor for VM registers. Only called as long as HCR_TVM
235 * is set. 208 * is set. If the guest enables the MMU, we stop trapping the VM
209 * sys_regs and leave it in complete control of the caches.
210 *
211 * Used by the cpu-specific code.
236 */ 212 */
237static bool access_vm_reg(struct kvm_vcpu *vcpu, 213bool access_vm_reg(struct kvm_vcpu *vcpu,
238 const struct coproc_params *p, 214 const struct coproc_params *p,
239 const struct coproc_reg *r) 215 const struct coproc_reg *r)
240{ 216{
217 bool was_enabled = vcpu_has_cache_enabled(vcpu);
218
241 BUG_ON(!p->is_write); 219 BUG_ON(!p->is_write);
242 220
243 vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); 221 vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
244 if (p->is_64bit) 222 if (p->is_64bit)
245 vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); 223 vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
246 224
247 return true; 225 kvm_toggle_cache(vcpu, was_enabled);
248}
249
250/*
251 * SCTLR accessor. Only called as long as HCR_TVM is set. If the
252 * guest enables the MMU, we stop trapping the VM sys_regs and leave
253 * it in complete control of the caches.
254 *
255 * Used by the cpu-specific code.
256 */
257bool access_sctlr(struct kvm_vcpu *vcpu,
258 const struct coproc_params *p,
259 const struct coproc_reg *r)
260{
261 access_vm_reg(vcpu, p, r);
262
263 if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
264 vcpu->arch.hcr &= ~HCR_TVM;
265 stage2_flush_vm(vcpu->kvm);
266 }
267
268 return true; 226 return true;
269} 227}
270 228
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index 1a44bbe39643..88d24a3a9778 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -153,8 +153,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
153#define is64 .is_64 = true 153#define is64 .is_64 = true
154#define is32 .is_64 = false 154#define is32 .is_64 = false
155 155
156bool access_sctlr(struct kvm_vcpu *vcpu, 156bool access_vm_reg(struct kvm_vcpu *vcpu,
157 const struct coproc_params *p, 157 const struct coproc_params *p,
158 const struct coproc_reg *r); 158 const struct coproc_reg *r);
159 159
160#endif /* __ARM_KVM_COPROC_LOCAL_H__ */ 160#endif /* __ARM_KVM_COPROC_LOCAL_H__ */
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
index e6f4ae48bda9..a7136757d373 100644
--- a/arch/arm/kvm/coproc_a15.c
+++ b/arch/arm/kvm/coproc_a15.c
@@ -34,7 +34,7 @@
34static const struct coproc_reg a15_regs[] = { 34static const struct coproc_reg a15_regs[] = {
35 /* SCTLR: swapped by interrupt.S. */ 35 /* SCTLR: swapped by interrupt.S. */
36 { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, 36 { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
37 access_sctlr, reset_val, c1_SCTLR, 0x00C50078 }, 37 access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 },
38}; 38};
39 39
40static struct kvm_coproc_target_table a15_target_table = { 40static struct kvm_coproc_target_table a15_target_table = {
diff --git a/arch/arm/kvm/coproc_a7.c b/arch/arm/kvm/coproc_a7.c
index 17fc7cd479d3..b19e46d1b2c0 100644
--- a/arch/arm/kvm/coproc_a7.c
+++ b/arch/arm/kvm/coproc_a7.c
@@ -37,7 +37,7 @@
37static const struct coproc_reg a7_regs[] = { 37static const struct coproc_reg a7_regs[] = {
38 /* SCTLR: swapped by interrupt.S. */ 38 /* SCTLR: swapped by interrupt.S. */
39 { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, 39 { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
40 access_sctlr, reset_val, c1_SCTLR, 0x00C50878 }, 40 access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 },
41}; 41};
42 42
43static struct kvm_coproc_target_table a7_target_table = { 43static struct kvm_coproc_target_table a7_target_table = {
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 1dc9778a00af..136662547ca6 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -58,6 +58,26 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
58 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); 58 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
59} 59}
60 60
61/*
62 * D-Cache management functions. They take the page table entries by
63 * value, as they are flushing the cache using the kernel mapping (or
64 * kmap on 32bit).
65 */
66static void kvm_flush_dcache_pte(pte_t pte)
67{
68 __kvm_flush_dcache_pte(pte);
69}
70
71static void kvm_flush_dcache_pmd(pmd_t pmd)
72{
73 __kvm_flush_dcache_pmd(pmd);
74}
75
76static void kvm_flush_dcache_pud(pud_t pud)
77{
78 __kvm_flush_dcache_pud(pud);
79}
80
61static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 81static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
62 int min, int max) 82 int min, int max)
63{ 83{
@@ -119,6 +139,26 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
119 put_page(virt_to_page(pmd)); 139 put_page(virt_to_page(pmd));
120} 140}
121 141
142/*
143 * Unmapping vs dcache management:
144 *
145 * If a guest maps certain memory pages as uncached, all writes will
146 * bypass the data cache and go directly to RAM. However, the CPUs
147 * can still speculate reads (not writes) and fill cache lines with
148 * data.
149 *
150 * Those cache lines will be *clean* cache lines though, so a
151 * clean+invalidate operation is equivalent to an invalidate
152 * operation, because no cache lines are marked dirty.
153 *
154 * Those clean cache lines could be filled prior to an uncached write
155 * by the guest, and the cache coherent IO subsystem would therefore
156 * end up writing old data to disk.
157 *
158 * This is why right after unmapping a page/section and invalidating
159 * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
160 * the IO subsystem will never hit in the cache.
161 */
122static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, 162static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
123 phys_addr_t addr, phys_addr_t end) 163 phys_addr_t addr, phys_addr_t end)
124{ 164{
@@ -128,9 +168,16 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
128 start_pte = pte = pte_offset_kernel(pmd, addr); 168 start_pte = pte = pte_offset_kernel(pmd, addr);
129 do { 169 do {
130 if (!pte_none(*pte)) { 170 if (!pte_none(*pte)) {
171 pte_t old_pte = *pte;
172
131 kvm_set_pte(pte, __pte(0)); 173 kvm_set_pte(pte, __pte(0));
132 put_page(virt_to_page(pte));
133 kvm_tlb_flush_vmid_ipa(kvm, addr); 174 kvm_tlb_flush_vmid_ipa(kvm, addr);
175
176 /* No need to invalidate the cache for device mappings */
177 if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
178 kvm_flush_dcache_pte(old_pte);
179
180 put_page(virt_to_page(pte));
134 } 181 }
135 } while (pte++, addr += PAGE_SIZE, addr != end); 182 } while (pte++, addr += PAGE_SIZE, addr != end);
136 183
@@ -149,8 +196,13 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud,
149 next = kvm_pmd_addr_end(addr, end); 196 next = kvm_pmd_addr_end(addr, end);
150 if (!pmd_none(*pmd)) { 197 if (!pmd_none(*pmd)) {
151 if (kvm_pmd_huge(*pmd)) { 198 if (kvm_pmd_huge(*pmd)) {
199 pmd_t old_pmd = *pmd;
200
152 pmd_clear(pmd); 201 pmd_clear(pmd);
153 kvm_tlb_flush_vmid_ipa(kvm, addr); 202 kvm_tlb_flush_vmid_ipa(kvm, addr);
203
204 kvm_flush_dcache_pmd(old_pmd);
205
154 put_page(virt_to_page(pmd)); 206 put_page(virt_to_page(pmd));
155 } else { 207 } else {
156 unmap_ptes(kvm, pmd, addr, next); 208 unmap_ptes(kvm, pmd, addr, next);
@@ -173,8 +225,13 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
173 next = kvm_pud_addr_end(addr, end); 225 next = kvm_pud_addr_end(addr, end);
174 if (!pud_none(*pud)) { 226 if (!pud_none(*pud)) {
175 if (pud_huge(*pud)) { 227 if (pud_huge(*pud)) {
228 pud_t old_pud = *pud;
229
176 pud_clear(pud); 230 pud_clear(pud);
177 kvm_tlb_flush_vmid_ipa(kvm, addr); 231 kvm_tlb_flush_vmid_ipa(kvm, addr);
232
233 kvm_flush_dcache_pud(old_pud);
234
178 put_page(virt_to_page(pud)); 235 put_page(virt_to_page(pud));
179 } else { 236 } else {
180 unmap_pmds(kvm, pud, addr, next); 237 unmap_pmds(kvm, pud, addr, next);
@@ -209,10 +266,9 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
209 266
210 pte = pte_offset_kernel(pmd, addr); 267 pte = pte_offset_kernel(pmd, addr);
211 do { 268 do {
212 if (!pte_none(*pte)) { 269 if (!pte_none(*pte) &&
213 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); 270 (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
214 kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE); 271 kvm_flush_dcache_pte(*pte);
215 }
216 } while (pte++, addr += PAGE_SIZE, addr != end); 272 } while (pte++, addr += PAGE_SIZE, addr != end);
217} 273}
218 274
@@ -226,12 +282,10 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
226 do { 282 do {
227 next = kvm_pmd_addr_end(addr, end); 283 next = kvm_pmd_addr_end(addr, end);
228 if (!pmd_none(*pmd)) { 284 if (!pmd_none(*pmd)) {
229 if (kvm_pmd_huge(*pmd)) { 285 if (kvm_pmd_huge(*pmd))
230 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); 286 kvm_flush_dcache_pmd(*pmd);
231 kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE); 287 else
232 } else {
233 stage2_flush_ptes(kvm, pmd, addr, next); 288 stage2_flush_ptes(kvm, pmd, addr, next);
234 }
235 } 289 }
236 } while (pmd++, addr = next, addr != end); 290 } while (pmd++, addr = next, addr != end);
237} 291}
@@ -246,12 +300,10 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
246 do { 300 do {
247 next = kvm_pud_addr_end(addr, end); 301 next = kvm_pud_addr_end(addr, end);
248 if (!pud_none(*pud)) { 302 if (!pud_none(*pud)) {
249 if (pud_huge(*pud)) { 303 if (pud_huge(*pud))
250 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); 304 kvm_flush_dcache_pud(*pud);
251 kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE); 305 else
252 } else {
253 stage2_flush_pmds(kvm, pud, addr, next); 306 stage2_flush_pmds(kvm, pud, addr, next);
254 }
255 } 307 }
256 } while (pud++, addr = next, addr != end); 308 } while (pud++, addr = next, addr != end);
257} 309}
@@ -278,7 +330,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
278 * Go through the stage 2 page tables and invalidate any cache lines 330 * Go through the stage 2 page tables and invalidate any cache lines
279 * backing memory already mapped to the VM. 331 * backing memory already mapped to the VM.
280 */ 332 */
281void stage2_flush_vm(struct kvm *kvm) 333static void stage2_flush_vm(struct kvm *kvm)
282{ 334{
283 struct kvm_memslots *slots; 335 struct kvm_memslots *slots;
284 struct kvm_memory_slot *memslot; 336 struct kvm_memory_slot *memslot;
@@ -905,6 +957,12 @@ static bool kvm_is_device_pfn(unsigned long pfn)
905 return !pfn_valid(pfn); 957 return !pfn_valid(pfn);
906} 958}
907 959
960static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
961 unsigned long size, bool uncached)
962{
963 __coherent_cache_guest_page(vcpu, pfn, size, uncached);
964}
965
908static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 966static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
909 struct kvm_memory_slot *memslot, unsigned long hva, 967 struct kvm_memory_slot *memslot, unsigned long hva,
910 unsigned long fault_status) 968 unsigned long fault_status)
@@ -994,8 +1052,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
994 kvm_set_s2pmd_writable(&new_pmd); 1052 kvm_set_s2pmd_writable(&new_pmd);
995 kvm_set_pfn_dirty(pfn); 1053 kvm_set_pfn_dirty(pfn);
996 } 1054 }
997 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE, 1055 coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached);
998 fault_ipa_uncached);
999 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); 1056 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1000 } else { 1057 } else {
1001 pte_t new_pte = pfn_pte(pfn, mem_type); 1058 pte_t new_pte = pfn_pte(pfn, mem_type);
@@ -1003,8 +1060,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1003 kvm_set_s2pte_writable(&new_pte); 1060 kvm_set_s2pte_writable(&new_pte);
1004 kvm_set_pfn_dirty(pfn); 1061 kvm_set_pfn_dirty(pfn);
1005 } 1062 }
1006 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE, 1063 coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached);
1007 fault_ipa_uncached);
1008 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, 1064 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
1009 pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); 1065 pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
1010 } 1066 }
@@ -1411,3 +1467,71 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1411 unmap_stage2_range(kvm, gpa, size); 1467 unmap_stage2_range(kvm, gpa, size);
1412 spin_unlock(&kvm->mmu_lock); 1468 spin_unlock(&kvm->mmu_lock);
1413} 1469}
1470
1471/*
1472 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
1473 *
1474 * Main problems:
1475 * - S/W ops are local to a CPU (not broadcast)
1476 * - We have line migration behind our back (speculation)
1477 * - System caches don't support S/W at all (damn!)
1478 *
1479 * In the face of the above, the best we can do is to try and convert
1480 * S/W ops to VA ops. Because the guest is not allowed to infer the
1481 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
1482 * which is a rather good thing for us.
1483 *
1484 * Also, it is only used when turning caches on/off ("The expected
1485 * usage of the cache maintenance instructions that operate by set/way
1486 * is associated with the cache maintenance instructions associated
1487 * with the powerdown and powerup of caches, if this is required by
1488 * the implementation.").
1489 *
1490 * We use the following policy:
1491 *
1492 * - If we trap a S/W operation, we enable VM trapping to detect
1493 * caches being turned on/off, and do a full clean.
1494 *
1495 * - We flush the caches on both caches being turned on and off.
1496 *
1497 * - Once the caches are enabled, we stop trapping VM ops.
1498 */
1499void kvm_set_way_flush(struct kvm_vcpu *vcpu)
1500{
1501 unsigned long hcr = vcpu_get_hcr(vcpu);
1502
1503 /*
1504 * If this is the first time we do a S/W operation
1505 * (i.e. HCR_TVM not set) flush the whole memory, and set the
1506 * VM trapping.
1507 *
1508 * Otherwise, rely on the VM trapping to wait for the MMU +
1509 * Caches to be turned off. At that point, we'll be able to
1510 * clean the caches again.
1511 */
1512 if (!(hcr & HCR_TVM)) {
1513 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
1514 vcpu_has_cache_enabled(vcpu));
1515 stage2_flush_vm(vcpu->kvm);
1516 vcpu_set_hcr(vcpu, hcr | HCR_TVM);
1517 }
1518}
1519
1520void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
1521{
1522 bool now_enabled = vcpu_has_cache_enabled(vcpu);
1523
1524 /*
1525 * If switching the MMU+caches on, need to invalidate the caches.
1526 * If switching it off, need to clean the caches.
1527 * Clean + invalidate does the trick always.
1528 */
1529 if (now_enabled != was_enabled)
1530 stage2_flush_vm(vcpu->kvm);
1531
1532 /* Caches are now on, stop trapping VM ops (until a S/W op) */
1533 if (now_enabled)
1534 vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
1535
1536 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
1537}
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index b1d640f78623..b6a6e7102201 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -223,6 +223,45 @@ TRACE_EVENT(kvm_hvc,
223 __entry->vcpu_pc, __entry->r0, __entry->imm) 223 __entry->vcpu_pc, __entry->r0, __entry->imm)
224); 224);
225 225
226TRACE_EVENT(kvm_set_way_flush,
227 TP_PROTO(unsigned long vcpu_pc, bool cache),
228 TP_ARGS(vcpu_pc, cache),
229
230 TP_STRUCT__entry(
231 __field( unsigned long, vcpu_pc )
232 __field( bool, cache )
233 ),
234
235 TP_fast_assign(
236 __entry->vcpu_pc = vcpu_pc;
237 __entry->cache = cache;
238 ),
239
240 TP_printk("S/W flush at 0x%016lx (cache %s)",
241 __entry->vcpu_pc, __entry->cache ? "on" : "off")
242);
243
244TRACE_EVENT(kvm_toggle_cache,
245 TP_PROTO(unsigned long vcpu_pc, bool was, bool now),
246 TP_ARGS(vcpu_pc, was, now),
247
248 TP_STRUCT__entry(
249 __field( unsigned long, vcpu_pc )
250 __field( bool, was )
251 __field( bool, now )
252 ),
253
254 TP_fast_assign(
255 __entry->vcpu_pc = vcpu_pc;
256 __entry->was = was;
257 __entry->now = now;
258 ),
259
260 TP_printk("VM op at 0x%016lx (cache was %s, now %s)",
261 __entry->vcpu_pc, __entry->was ? "on" : "off",
262 __entry->now ? "on" : "off")
263);
264
226#endif /* _TRACE_KVM_H */ 265#endif /* _TRACE_KVM_H */
227 266
228#undef TRACE_INCLUDE_PATH 267#undef TRACE_INCLUDE_PATH
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index caa21e9b8cd9..ccef8806bb58 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -190,6 +190,13 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
190 arch_ioremap_caller = armada_pcie_wa_ioremap_caller; 190 arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
191 191
192 /* 192 /*
193 * We should switch the PL310 to I/O coherency mode only if
194 * I/O coherency is actually enabled.
195 */
196 if (!coherency_available())
197 return;
198
199 /*
193 * Add the PL310 property "arm,io-coherent". This makes sure the 200 * Add the PL310 property "arm,io-coherent". This makes sure the
194 * outer sync operation is not used, which allows to 201 * outer sync operation is not used, which allows to
195 * workaround the system erratum that causes deadlocks when 202 * workaround the system erratum that causes deadlocks when
diff --git a/arch/arm/mach-shmobile/board-ape6evm.c b/arch/arm/mach-shmobile/board-ape6evm.c
index 66f67816a844..444f22d370f0 100644
--- a/arch/arm/mach-shmobile/board-ape6evm.c
+++ b/arch/arm/mach-shmobile/board-ape6evm.c
@@ -18,6 +18,8 @@
18#include <linux/gpio_keys.h> 18#include <linux/gpio_keys.h>
19#include <linux/input.h> 19#include <linux/input.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/irqchip.h>
22#include <linux/irqchip/arm-gic.h>
21#include <linux/kernel.h> 23#include <linux/kernel.h>
22#include <linux/mfd/tmio.h> 24#include <linux/mfd/tmio.h>
23#include <linux/mmc/host.h> 25#include <linux/mmc/host.h>
@@ -273,6 +275,22 @@ static void __init ape6evm_add_standard_devices(void)
273 sizeof(ape6evm_leds_pdata)); 275 sizeof(ape6evm_leds_pdata));
274} 276}
275 277
278static void __init ape6evm_legacy_init_time(void)
279{
280 /* Do not invoke DT-based timers via clocksource_of_init() */
281}
282
283static void __init ape6evm_legacy_init_irq(void)
284{
285 void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000);
286 void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000);
287
288 gic_init(0, 29, gic_dist_base, gic_cpu_base);
289
290 /* Do not invoke DT-based interrupt code via irqchip_init() */
291}
292
293
276static const char *ape6evm_boards_compat_dt[] __initdata = { 294static const char *ape6evm_boards_compat_dt[] __initdata = {
277 "renesas,ape6evm", 295 "renesas,ape6evm",
278 NULL, 296 NULL,
@@ -280,7 +298,9 @@ static const char *ape6evm_boards_compat_dt[] __initdata = {
280 298
281DT_MACHINE_START(APE6EVM_DT, "ape6evm") 299DT_MACHINE_START(APE6EVM_DT, "ape6evm")
282 .init_early = shmobile_init_delay, 300 .init_early = shmobile_init_delay,
301 .init_irq = ape6evm_legacy_init_irq,
283 .init_machine = ape6evm_add_standard_devices, 302 .init_machine = ape6evm_add_standard_devices,
284 .init_late = shmobile_init_late, 303 .init_late = shmobile_init_late,
285 .dt_compat = ape6evm_boards_compat_dt, 304 .dt_compat = ape6evm_boards_compat_dt,
305 .init_time = ape6evm_legacy_init_time,
286MACHINE_END 306MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index f8197eb6e566..65b128dd4072 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -21,6 +21,8 @@
21#include <linux/input.h> 21#include <linux/input.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/irqchip.h>
25#include <linux/irqchip/arm-gic.h>
24#include <linux/kernel.h> 26#include <linux/kernel.h>
25#include <linux/leds.h> 27#include <linux/leds.h>
26#include <linux/mfd/tmio.h> 28#include <linux/mfd/tmio.h>
@@ -811,6 +813,16 @@ static void __init lager_init(void)
811 lager_ksz8041_fixup); 813 lager_ksz8041_fixup);
812} 814}
813 815
816static void __init lager_legacy_init_irq(void)
817{
818 void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000);
819 void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000);
820
821 gic_init(0, 29, gic_dist_base, gic_cpu_base);
822
823 /* Do not invoke DT-based interrupt code via irqchip_init() */
824}
825
814static const char * const lager_boards_compat_dt[] __initconst = { 826static const char * const lager_boards_compat_dt[] __initconst = {
815 "renesas,lager", 827 "renesas,lager",
816 NULL, 828 NULL,
@@ -819,6 +831,7 @@ static const char * const lager_boards_compat_dt[] __initconst = {
819DT_MACHINE_START(LAGER_DT, "lager") 831DT_MACHINE_START(LAGER_DT, "lager")
820 .smp = smp_ops(r8a7790_smp_ops), 832 .smp = smp_ops(r8a7790_smp_ops),
821 .init_early = shmobile_init_delay, 833 .init_early = shmobile_init_delay,
834 .init_irq = lager_legacy_init_irq,
822 .init_time = rcar_gen2_timer_init, 835 .init_time = rcar_gen2_timer_init,
823 .init_machine = lager_init, 836 .init_machine = lager_init,
824 .init_late = shmobile_init_late, 837 .init_late = shmobile_init_late,
diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c
index 3dd6edd9bd1d..cc9470dfb1ce 100644
--- a/arch/arm/mach-shmobile/setup-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c
@@ -133,7 +133,9 @@ void __init rcar_gen2_timer_init(void)
133#ifdef CONFIG_COMMON_CLK 133#ifdef CONFIG_COMMON_CLK
134 rcar_gen2_clocks_init(mode); 134 rcar_gen2_clocks_init(mode);
135#endif 135#endif
136#ifdef CONFIG_ARCH_SHMOBILE_MULTI
136 clocksource_of_init(); 137 clocksource_of_init();
138#endif
137} 139}
138 140
139struct memory_reserve_config { 141struct memory_reserve_config {
diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c
index f1d027aa7a81..0edf2a6d2bbe 100644
--- a/arch/arm/mach-shmobile/timer.c
+++ b/arch/arm/mach-shmobile/timer.c
@@ -70,6 +70,18 @@ void __init shmobile_init_delay(void)
70 if (!max_freq) 70 if (!max_freq)
71 return; 71 return;
72 72
73#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
74 /* Non-multiplatform r8a73a4 SoC cannot use arch timer due
75 * to GIC being initialized from C and arch timer via DT */
76 if (of_machine_is_compatible("renesas,r8a73a4"))
77 has_arch_timer = false;
78
79 /* Non-multiplatform r8a7790 SoC cannot use arch timer due
80 * to GIC being initialized from C and arch timer via DT */
81 if (of_machine_is_compatible("renesas,r8a7790"))
82 has_arch_timer = false;
83#endif
84
73 if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) { 85 if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) {
74 if (is_a7_a8_a9) 86 if (is_a7_a8_a9)
75 shmobile_setup_delay_hz(max_freq, 1, 3); 87 shmobile_setup_delay_hz(max_freq, 1, 3);
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 03823e784f63..c43c71455566 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -1012,6 +1012,7 @@ config ARCH_SUPPORTS_BIG_ENDIAN
1012 1012
1013config ARM_KERNMEM_PERMS 1013config ARM_KERNMEM_PERMS
1014 bool "Restrict kernel memory permissions" 1014 bool "Restrict kernel memory permissions"
1015 depends on MMU
1015 help 1016 help
1016 If this is set, kernel memory other than kernel text (and rodata) 1017 If this is set, kernel memory other than kernel text (and rodata)
1017 will be made non-executable. The tradeoff is that each region is 1018 will be made non-executable. The tradeoff is that each region is
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 91892569710f..845769e41332 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -144,21 +144,17 @@ static void flush_context(unsigned int cpu)
144 /* Update the list of reserved ASIDs and the ASID bitmap. */ 144 /* Update the list of reserved ASIDs and the ASID bitmap. */
145 bitmap_clear(asid_map, 0, NUM_USER_ASIDS); 145 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
146 for_each_possible_cpu(i) { 146 for_each_possible_cpu(i) {
147 if (i == cpu) { 147 asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
148 asid = 0; 148 /*
149 } else { 149 * If this CPU has already been through a
150 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); 150 * rollover, but hasn't run another task in
151 /* 151 * the meantime, we must preserve its reserved
152 * If this CPU has already been through a 152 * ASID, as this is the only trace we have of
153 * rollover, but hasn't run another task in 153 * the process it is still running.
154 * the meantime, we must preserve its reserved 154 */
155 * ASID, as this is the only trace we have of 155 if (asid == 0)
156 * the process it is still running. 156 asid = per_cpu(reserved_asids, i);
157 */ 157 __set_bit(asid & ~ASID_MASK, asid_map);
158 if (asid == 0)
159 asid = per_cpu(reserved_asids, i);
160 __set_bit(asid & ~ASID_MASK, asid_map);
161 }
162 per_cpu(reserved_asids, i) = asid; 158 per_cpu(reserved_asids, i) = asid;
163 } 159 }
164 160
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 7864797609b3..903dba064a03 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1940,13 +1940,32 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
1940} 1940}
1941EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 1941EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
1942 1942
1943static int __arm_iommu_attach_device(struct device *dev,
1944 struct dma_iommu_mapping *mapping)
1945{
1946 int err;
1947
1948 err = iommu_attach_device(mapping->domain, dev);
1949 if (err)
1950 return err;
1951
1952 kref_get(&mapping->kref);
1953 dev->archdata.mapping = mapping;
1954
1955 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
1956 return 0;
1957}
1958
1943/** 1959/**
1944 * arm_iommu_attach_device 1960 * arm_iommu_attach_device
1945 * @dev: valid struct device pointer 1961 * @dev: valid struct device pointer
1946 * @mapping: io address space mapping structure (returned from 1962 * @mapping: io address space mapping structure (returned from
1947 * arm_iommu_create_mapping) 1963 * arm_iommu_create_mapping)
1948 * 1964 *
1949 * Attaches specified io address space mapping to the provided device, 1965 * Attaches specified io address space mapping to the provided device.
1966 * This replaces the dma operations (dma_map_ops pointer) with the
1967 * IOMMU aware version.
1968 *
1950 * More than one client might be attached to the same io address space 1969 * More than one client might be attached to the same io address space
1951 * mapping. 1970 * mapping.
1952 */ 1971 */
@@ -1955,25 +1974,16 @@ int arm_iommu_attach_device(struct device *dev,
1955{ 1974{
1956 int err; 1975 int err;
1957 1976
1958 err = iommu_attach_device(mapping->domain, dev); 1977 err = __arm_iommu_attach_device(dev, mapping);
1959 if (err) 1978 if (err)
1960 return err; 1979 return err;
1961 1980
1962 kref_get(&mapping->kref); 1981 set_dma_ops(dev, &iommu_ops);
1963 dev->archdata.mapping = mapping;
1964
1965 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
1966 return 0; 1982 return 0;
1967} 1983}
1968EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 1984EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
1969 1985
1970/** 1986static void __arm_iommu_detach_device(struct device *dev)
1971 * arm_iommu_detach_device
1972 * @dev: valid struct device pointer
1973 *
1974 * Detaches the provided device from a previously attached map.
1975 */
1976void arm_iommu_detach_device(struct device *dev)
1977{ 1987{
1978 struct dma_iommu_mapping *mapping; 1988 struct dma_iommu_mapping *mapping;
1979 1989
@@ -1989,6 +1999,19 @@ void arm_iommu_detach_device(struct device *dev)
1989 1999
1990 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 2000 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
1991} 2001}
2002
2003/**
2004 * arm_iommu_detach_device
2005 * @dev: valid struct device pointer
2006 *
2007 * Detaches the provided device from a previously attached map.
2008 * This voids the dma operations (dma_map_ops pointer)
2009 */
2010void arm_iommu_detach_device(struct device *dev)
2011{
2012 __arm_iommu_detach_device(dev);
2013 set_dma_ops(dev, NULL);
2014}
1992EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 2015EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
1993 2016
1994static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 2017static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
@@ -2011,7 +2034,7 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
2011 return false; 2034 return false;
2012 } 2035 }
2013 2036
2014 if (arm_iommu_attach_device(dev, mapping)) { 2037 if (__arm_iommu_attach_device(dev, mapping)) {
2015 pr_warn("Failed to attached device %s to IOMMU_mapping\n", 2038 pr_warn("Failed to attached device %s to IOMMU_mapping\n",
2016 dev_name(dev)); 2039 dev_name(dev));
2017 arm_iommu_release_mapping(mapping); 2040 arm_iommu_release_mapping(mapping);
@@ -2025,7 +2048,10 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
2025{ 2048{
2026 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 2049 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
2027 2050
2028 arm_iommu_detach_device(dev); 2051 if (!mapping)
2052 return;
2053
2054 __arm_iommu_detach_device(dev);
2029 arm_iommu_release_mapping(mapping); 2055 arm_iommu_release_mapping(mapping);
2030} 2056}
2031 2057
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 865a7e28ea2d..3cb4c856b10d 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -45,6 +45,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
45 vcpu->arch.hcr_el2 &= ~HCR_RW; 45 vcpu->arch.hcr_el2 &= ~HCR_RW;
46} 46}
47 47
48static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
49{
50 return vcpu->arch.hcr_el2;
51}
52
53static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
54{
55 vcpu->arch.hcr_el2 = hcr;
56}
57
48static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) 58static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
49{ 59{
50 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; 60 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 0b7dfdb931df..acd101a9014d 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -116,9 +116,6 @@ struct kvm_vcpu_arch {
116 * Anything that is not used directly from assembly code goes 116 * Anything that is not used directly from assembly code goes
117 * here. 117 * here.
118 */ 118 */
119 /* dcache set/way operation pending */
120 int last_pcpu;
121 cpumask_t require_dcache_flush;
122 119
123 /* Don't run the guest */ 120 /* Don't run the guest */
124 bool pause; 121 bool pause;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 14a74f136272..adcf49547301 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -243,24 +243,46 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
243 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; 243 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
244} 244}
245 245
246static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, 246static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
247 unsigned long size, 247 unsigned long size,
248 bool ipa_uncached) 248 bool ipa_uncached)
249{ 249{
250 void *va = page_address(pfn_to_page(pfn));
251
250 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) 252 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
251 kvm_flush_dcache_to_poc((void *)hva, size); 253 kvm_flush_dcache_to_poc(va, size);
252 254
253 if (!icache_is_aliasing()) { /* PIPT */ 255 if (!icache_is_aliasing()) { /* PIPT */
254 flush_icache_range(hva, hva + size); 256 flush_icache_range((unsigned long)va,
257 (unsigned long)va + size);
255 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ 258 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
256 /* any kind of VIPT cache */ 259 /* any kind of VIPT cache */
257 __flush_icache_all(); 260 __flush_icache_all();
258 } 261 }
259} 262}
260 263
264static inline void __kvm_flush_dcache_pte(pte_t pte)
265{
266 struct page *page = pte_page(pte);
267 kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
268}
269
270static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
271{
272 struct page *page = pmd_page(pmd);
273 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
274}
275
276static inline void __kvm_flush_dcache_pud(pud_t pud)
277{
278 struct page *page = pud_page(pud);
279 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
280}
281
261#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) 282#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
262 283
263void stage2_flush_vm(struct kvm *kvm); 284void kvm_set_way_flush(struct kvm_vcpu *vcpu);
285void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
264 286
265#endif /* __ASSEMBLY__ */ 287#endif /* __ASSEMBLY__ */
266#endif /* __ARM64_KVM_MMU_H__ */ 288#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 3d7c2df89946..f31e8bb2bc5b 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -69,68 +69,31 @@ static u32 get_ccsidr(u32 csselr)
69 return ccsidr; 69 return ccsidr;
70} 70}
71 71
72static void do_dc_cisw(u32 val) 72/*
73{ 73 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
74 asm volatile("dc cisw, %x0" : : "r" (val)); 74 */
75 dsb(ish);
76}
77
78static void do_dc_csw(u32 val)
79{
80 asm volatile("dc csw, %x0" : : "r" (val));
81 dsb(ish);
82}
83
84/* See note at ARM ARM B1.14.4 */
85static bool access_dcsw(struct kvm_vcpu *vcpu, 75static bool access_dcsw(struct kvm_vcpu *vcpu,
86 const struct sys_reg_params *p, 76 const struct sys_reg_params *p,
87 const struct sys_reg_desc *r) 77 const struct sys_reg_desc *r)
88{ 78{
89 unsigned long val;
90 int cpu;
91
92 if (!p->is_write) 79 if (!p->is_write)
93 return read_from_write_only(vcpu, p); 80 return read_from_write_only(vcpu, p);
94 81
95 cpu = get_cpu(); 82 kvm_set_way_flush(vcpu);
96
97 cpumask_setall(&vcpu->arch.require_dcache_flush);
98 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
99
100 /* If we were already preempted, take the long way around */
101 if (cpu != vcpu->arch.last_pcpu) {
102 flush_cache_all();
103 goto done;
104 }
105
106 val = *vcpu_reg(vcpu, p->Rt);
107
108 switch (p->CRm) {
109 case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
110 case 14: /* DCCISW */
111 do_dc_cisw(val);
112 break;
113
114 case 10: /* DCCSW */
115 do_dc_csw(val);
116 break;
117 }
118
119done:
120 put_cpu();
121
122 return true; 83 return true;
123} 84}
124 85
125/* 86/*
126 * Generic accessor for VM registers. Only called as long as HCR_TVM 87 * Generic accessor for VM registers. Only called as long as HCR_TVM
127 * is set. 88 * is set. If the guest enables the MMU, we stop trapping the VM
89 * sys_regs and leave it in complete control of the caches.
128 */ 90 */
129static bool access_vm_reg(struct kvm_vcpu *vcpu, 91static bool access_vm_reg(struct kvm_vcpu *vcpu,
130 const struct sys_reg_params *p, 92 const struct sys_reg_params *p,
131 const struct sys_reg_desc *r) 93 const struct sys_reg_desc *r)
132{ 94{
133 unsigned long val; 95 unsigned long val;
96 bool was_enabled = vcpu_has_cache_enabled(vcpu);
134 97
135 BUG_ON(!p->is_write); 98 BUG_ON(!p->is_write);
136 99
@@ -143,25 +106,7 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
143 vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; 106 vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
144 } 107 }
145 108
146 return true; 109 kvm_toggle_cache(vcpu, was_enabled);
147}
148
149/*
150 * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
151 * guest enables the MMU, we stop trapping the VM sys_regs and leave
152 * it in complete control of the caches.
153 */
154static bool access_sctlr(struct kvm_vcpu *vcpu,
155 const struct sys_reg_params *p,
156 const struct sys_reg_desc *r)
157{
158 access_vm_reg(vcpu, p, r);
159
160 if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
161 vcpu->arch.hcr_el2 &= ~HCR_TVM;
162 stage2_flush_vm(vcpu->kvm);
163 }
164
165 return true; 110 return true;
166} 111}
167 112
@@ -377,7 +322,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
377 NULL, reset_mpidr, MPIDR_EL1 }, 322 NULL, reset_mpidr, MPIDR_EL1 },
378 /* SCTLR_EL1 */ 323 /* SCTLR_EL1 */
379 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), 324 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
380 access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 }, 325 access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
381 /* CPACR_EL1 */ 326 /* CPACR_EL1 */
382 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), 327 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
383 NULL, reset_val, CPACR_EL1, 0 }, 328 NULL, reset_val, CPACR_EL1, 0 },
@@ -657,7 +602,7 @@ static const struct sys_reg_desc cp14_64_regs[] = {
657 * register). 602 * register).
658 */ 603 */
659static const struct sys_reg_desc cp15_regs[] = { 604static const struct sys_reg_desc cp15_regs[] = {
660 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR }, 605 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
661 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 606 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
662 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, 607 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
663 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, 608 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index 0eca93327195..d223a8b57c1e 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -142,6 +142,8 @@ good_area:
142 if (unlikely(fault & VM_FAULT_ERROR)) { 142 if (unlikely(fault & VM_FAULT_ERROR)) {
143 if (fault & VM_FAULT_OOM) 143 if (fault & VM_FAULT_OOM)
144 goto out_of_memory; 144 goto out_of_memory;
145 else if (fault & VM_FAULT_SIGSEGV)
146 goto bad_area;
145 else if (fault & VM_FAULT_SIGBUS) 147 else if (fault & VM_FAULT_SIGBUS)
146 goto do_sigbus; 148 goto do_sigbus;
147 BUG(); 149 BUG();
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index 1790f22e71a2..2686a7aa8ec8 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -176,6 +176,8 @@ retry:
176 if (unlikely(fault & VM_FAULT_ERROR)) { 176 if (unlikely(fault & VM_FAULT_ERROR)) {
177 if (fault & VM_FAULT_OOM) 177 if (fault & VM_FAULT_OOM)
178 goto out_of_memory; 178 goto out_of_memory;
179 else if (fault & VM_FAULT_SIGSEGV)
180 goto bad_area;
179 else if (fault & VM_FAULT_SIGBUS) 181 else if (fault & VM_FAULT_SIGBUS)
180 goto do_sigbus; 182 goto do_sigbus;
181 BUG(); 183 BUG();
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 9a66372fc7c7..ec4917ddf678 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
168 if (unlikely(fault & VM_FAULT_ERROR)) { 168 if (unlikely(fault & VM_FAULT_ERROR)) {
169 if (fault & VM_FAULT_OOM) 169 if (fault & VM_FAULT_OOM)
170 goto out_of_memory; 170 goto out_of_memory;
171 else if (fault & VM_FAULT_SIGSEGV)
172 goto bad_area;
171 else if (fault & VM_FAULT_SIGBUS) 173 else if (fault & VM_FAULT_SIGBUS)
172 goto do_sigbus; 174 goto do_sigbus;
173 BUG(); 175 BUG();
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 7225dad87094..ba5ba7accd0d 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -172,6 +172,8 @@ retry:
172 */ 172 */
173 if (fault & VM_FAULT_OOM) { 173 if (fault & VM_FAULT_OOM) {
174 goto out_of_memory; 174 goto out_of_memory;
175 } else if (fault & VM_FAULT_SIGSEGV) {
176 goto bad_area;
175 } else if (fault & VM_FAULT_SIGBUS) { 177 } else if (fault & VM_FAULT_SIGBUS) {
176 signal = SIGBUS; 178 signal = SIGBUS;
177 goto bad_area; 179 goto bad_area;
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index e9c6a8014bd6..e3d4d4890104 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -200,6 +200,8 @@ good_area:
200 if (unlikely(fault & VM_FAULT_ERROR)) { 200 if (unlikely(fault & VM_FAULT_ERROR)) {
201 if (fault & VM_FAULT_OOM) 201 if (fault & VM_FAULT_OOM)
202 goto out_of_memory; 202 goto out_of_memory;
203 else if (fault & VM_FAULT_SIGSEGV)
204 goto bad_area;
203 else if (fault & VM_FAULT_SIGBUS) 205 else if (fault & VM_FAULT_SIGBUS)
204 goto do_sigbus; 206 goto do_sigbus;
205 BUG(); 207 BUG();
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 2bd7487440c4..b2f04aee46ec 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -145,6 +145,8 @@ good_area:
145 if (unlikely(fault & VM_FAULT_ERROR)) { 145 if (unlikely(fault & VM_FAULT_ERROR)) {
146 if (fault & VM_FAULT_OOM) 146 if (fault & VM_FAULT_OOM)
147 goto out_of_memory; 147 goto out_of_memory;
148 else if (fault & VM_FAULT_SIGSEGV)
149 goto map_err;
148 else if (fault & VM_FAULT_SIGBUS) 150 else if (fault & VM_FAULT_SIGBUS)
149 goto bus_err; 151 goto bus_err;
150 BUG(); 152 BUG();
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
index 332680e5ebf2..2de5dc695a87 100644
--- a/arch/metag/mm/fault.c
+++ b/arch/metag/mm/fault.c
@@ -141,6 +141,8 @@ good_area:
141 if (unlikely(fault & VM_FAULT_ERROR)) { 141 if (unlikely(fault & VM_FAULT_ERROR)) {
142 if (fault & VM_FAULT_OOM) 142 if (fault & VM_FAULT_OOM)
143 goto out_of_memory; 143 goto out_of_memory;
144 else if (fault & VM_FAULT_SIGSEGV)
145 goto bad_area;
144 else if (fault & VM_FAULT_SIGBUS) 146 else if (fault & VM_FAULT_SIGBUS)
145 goto do_sigbus; 147 goto do_sigbus;
146 BUG(); 148 BUG();
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index fa4cf52aa7a6..d46a5ebb7570 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -224,6 +224,8 @@ good_area:
224 if (unlikely(fault & VM_FAULT_ERROR)) { 224 if (unlikely(fault & VM_FAULT_ERROR)) {
225 if (fault & VM_FAULT_OOM) 225 if (fault & VM_FAULT_OOM)
226 goto out_of_memory; 226 goto out_of_memory;
227 else if (fault & VM_FAULT_SIGSEGV)
228 goto bad_area;
227 else if (fault & VM_FAULT_SIGBUS) 229 else if (fault & VM_FAULT_SIGBUS)
228 goto do_sigbus; 230 goto do_sigbus;
229 BUG(); 231 BUG();
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index becc42bb1849..70ab5d664332 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -158,6 +158,8 @@ good_area:
158 if (unlikely(fault & VM_FAULT_ERROR)) { 158 if (unlikely(fault & VM_FAULT_ERROR)) {
159 if (fault & VM_FAULT_OOM) 159 if (fault & VM_FAULT_OOM)
160 goto out_of_memory; 160 goto out_of_memory;
161 else if (fault & VM_FAULT_SIGSEGV)
162 goto bad_area;
161 else if (fault & VM_FAULT_SIGBUS) 163 else if (fault & VM_FAULT_SIGBUS)
162 goto do_sigbus; 164 goto do_sigbus;
163 BUG(); 165 BUG();
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 3516cbdf1ee9..0c2cc5d39c8e 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -262,6 +262,8 @@ good_area:
262 if (unlikely(fault & VM_FAULT_ERROR)) { 262 if (unlikely(fault & VM_FAULT_ERROR)) {
263 if (fault & VM_FAULT_OOM) 263 if (fault & VM_FAULT_OOM)
264 goto out_of_memory; 264 goto out_of_memory;
265 else if (fault & VM_FAULT_SIGSEGV)
266 goto bad_area;
265 else if (fault & VM_FAULT_SIGBUS) 267 else if (fault & VM_FAULT_SIGBUS)
266 goto do_sigbus; 268 goto do_sigbus;
267 BUG(); 269 BUG();
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index 15a0bb5fc06d..34429d5a0ccd 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -135,6 +135,8 @@ survive:
135 if (unlikely(fault & VM_FAULT_ERROR)) { 135 if (unlikely(fault & VM_FAULT_ERROR)) {
136 if (fault & VM_FAULT_OOM) 136 if (fault & VM_FAULT_OOM)
137 goto out_of_memory; 137 goto out_of_memory;
138 else if (fault & VM_FAULT_SIGSEGV)
139 goto bad_area;
138 else if (fault & VM_FAULT_SIGBUS) 140 else if (fault & VM_FAULT_SIGBUS)
139 goto do_sigbus; 141 goto do_sigbus;
140 BUG(); 142 BUG();
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index 0703acf7d327..230ac20ae794 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -171,6 +171,8 @@ good_area:
171 if (unlikely(fault & VM_FAULT_ERROR)) { 171 if (unlikely(fault & VM_FAULT_ERROR)) {
172 if (fault & VM_FAULT_OOM) 172 if (fault & VM_FAULT_OOM)
173 goto out_of_memory; 173 goto out_of_memory;
174 else if (fault & VM_FAULT_SIGSEGV)
175 goto bad_area;
174 else if (fault & VM_FAULT_SIGBUS) 176 else if (fault & VM_FAULT_SIGBUS)
175 goto do_sigbus; 177 goto do_sigbus;
176 BUG(); 178 BUG();
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 3ca9c1131cfe..e5120e653240 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -256,6 +256,8 @@ good_area:
256 */ 256 */
257 if (fault & VM_FAULT_OOM) 257 if (fault & VM_FAULT_OOM)
258 goto out_of_memory; 258 goto out_of_memory;
259 else if (fault & VM_FAULT_SIGSEGV)
260 goto bad_area;
259 else if (fault & VM_FAULT_SIGBUS) 261 else if (fault & VM_FAULT_SIGBUS)
260 goto bad_area; 262 goto bad_area;
261 BUG(); 263 BUG();
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 5a236f082c78..1b5305d4bdab 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -76,7 +76,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
76 if (*flt & VM_FAULT_OOM) { 76 if (*flt & VM_FAULT_OOM) {
77 ret = -ENOMEM; 77 ret = -ENOMEM;
78 goto out_unlock; 78 goto out_unlock;
79 } else if (*flt & VM_FAULT_SIGBUS) { 79 } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
80 ret = -EFAULT; 80 ret = -EFAULT;
81 goto out_unlock; 81 goto out_unlock;
82 } 82 }
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index eb79907f34fa..6154b0a2b063 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -437,6 +437,8 @@ good_area:
437 */ 437 */
438 fault = handle_mm_fault(mm, vma, address, flags); 438 fault = handle_mm_fault(mm, vma, address, flags);
439 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { 439 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
440 if (fault & VM_FAULT_SIGSEGV)
441 goto bad_area;
440 rc = mm_fault_error(regs, address, fault); 442 rc = mm_fault_error(regs, address, fault);
441 if (rc >= MM_FAULT_RETURN) 443 if (rc >= MM_FAULT_RETURN)
442 goto bail; 444 goto bail;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 811937bb90be..9065d5aa3932 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -374,6 +374,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
374 do_no_context(regs); 374 do_no_context(regs);
375 else 375 else
376 pagefault_out_of_memory(); 376 pagefault_out_of_memory();
377 } else if (fault & VM_FAULT_SIGSEGV) {
378 /* Kernel mode? Handle exceptions or die */
379 if (!user_mode(regs))
380 do_no_context(regs);
381 else
382 do_sigsegv(regs, SEGV_MAPERR);
377 } else if (fault & VM_FAULT_SIGBUS) { 383 } else if (fault & VM_FAULT_SIGBUS) {
378 /* Kernel mode? Handle exceptions or die */ 384 /* Kernel mode? Handle exceptions or die */
379 if (!user_mode(regs)) 385 if (!user_mode(regs))
diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
index 52238983527d..6860beb2a280 100644
--- a/arch/score/mm/fault.c
+++ b/arch/score/mm/fault.c
@@ -114,6 +114,8 @@ good_area:
114 if (unlikely(fault & VM_FAULT_ERROR)) { 114 if (unlikely(fault & VM_FAULT_ERROR)) {
115 if (fault & VM_FAULT_OOM) 115 if (fault & VM_FAULT_OOM)
116 goto out_of_memory; 116 goto out_of_memory;
117 else if (fault & VM_FAULT_SIGSEGV)
118 goto bad_area;
117 else if (fault & VM_FAULT_SIGBUS) 119 else if (fault & VM_FAULT_SIGBUS)
118 goto do_sigbus; 120 goto do_sigbus;
119 BUG(); 121 BUG();
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 541dc6101508..a58fec9b55e0 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
353 } else { 353 } else {
354 if (fault & VM_FAULT_SIGBUS) 354 if (fault & VM_FAULT_SIGBUS)
355 do_sigbus(regs, error_code, address); 355 do_sigbus(regs, error_code, address);
356 else if (fault & VM_FAULT_SIGSEGV)
357 bad_area(regs, error_code, address);
356 else 358 else
357 BUG(); 359 BUG();
358 } 360 }
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 908e8c17c902..70d817154fe8 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -249,6 +249,8 @@ good_area:
249 if (unlikely(fault & VM_FAULT_ERROR)) { 249 if (unlikely(fault & VM_FAULT_ERROR)) {
250 if (fault & VM_FAULT_OOM) 250 if (fault & VM_FAULT_OOM)
251 goto out_of_memory; 251 goto out_of_memory;
252 else if (fault & VM_FAULT_SIGSEGV)
253 goto bad_area;
252 else if (fault & VM_FAULT_SIGBUS) 254 else if (fault & VM_FAULT_SIGBUS)
253 goto do_sigbus; 255 goto do_sigbus;
254 BUG(); 256 BUG();
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 18fcd7167095..479823249429 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -446,6 +446,8 @@ good_area:
446 if (unlikely(fault & VM_FAULT_ERROR)) { 446 if (unlikely(fault & VM_FAULT_ERROR)) {
447 if (fault & VM_FAULT_OOM) 447 if (fault & VM_FAULT_OOM)
448 goto out_of_memory; 448 goto out_of_memory;
449 else if (fault & VM_FAULT_SIGSEGV)
450 goto bad_area;
449 else if (fault & VM_FAULT_SIGBUS) 451 else if (fault & VM_FAULT_SIGBUS)
450 goto do_sigbus; 452 goto do_sigbus;
451 BUG(); 453 BUG();
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 565e25a98334..0f61a73534e6 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -442,6 +442,8 @@ good_area:
442 if (unlikely(fault & VM_FAULT_ERROR)) { 442 if (unlikely(fault & VM_FAULT_ERROR)) {
443 if (fault & VM_FAULT_OOM) 443 if (fault & VM_FAULT_OOM)
444 goto out_of_memory; 444 goto out_of_memory;
445 else if (fault & VM_FAULT_SIGSEGV)
446 goto bad_area;
445 else if (fault & VM_FAULT_SIGBUS) 447 else if (fault & VM_FAULT_SIGBUS)
446 goto do_sigbus; 448 goto do_sigbus;
447 BUG(); 449 BUG();
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 5678c3571e7c..209617302df8 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -80,6 +80,8 @@ good_area:
80 if (unlikely(fault & VM_FAULT_ERROR)) { 80 if (unlikely(fault & VM_FAULT_ERROR)) {
81 if (fault & VM_FAULT_OOM) { 81 if (fault & VM_FAULT_OOM) {
82 goto out_of_memory; 82 goto out_of_memory;
83 } else if (fault & VM_FAULT_SIGSEGV) {
84 goto out;
83 } else if (fault & VM_FAULT_SIGBUS) { 85 } else if (fault & VM_FAULT_SIGBUS) {
84 err = -EACCES; 86 err = -EACCES;
85 goto out; 87 goto out;
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 944bf019b74f..498b6d967138 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2431,6 +2431,7 @@ __init int intel_pmu_init(void)
2431 break; 2431 break;
2432 2432
2433 case 55: /* 22nm Atom "Silvermont" */ 2433 case 55: /* 22nm Atom "Silvermont" */
2434 case 76: /* 14nm Atom "Airmont" */
2434 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */ 2435 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
2435 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, 2436 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
2436 sizeof(hw_cache_event_ids)); 2437 sizeof(hw_cache_event_ids));
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 6e434f8e5fc8..c4bb8b8e5017 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -142,7 +142,7 @@ static inline u64 rapl_scale(u64 v)
142 * or use ldexp(count, -32). 142 * or use ldexp(count, -32).
143 * Watts = Joules/Time delta 143 * Watts = Joules/Time delta
144 */ 144 */
145 return v << (32 - __this_cpu_read(rapl_pmu->hw_unit)); 145 return v << (32 - __this_cpu_read(rapl_pmu)->hw_unit);
146} 146}
147 147
148static u64 rapl_event_update(struct perf_event *event) 148static u64 rapl_event_update(struct perf_event *event)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 10b8d3eaaf15..c635b8b49e93 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -840,7 +840,6 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
840 box->phys_id = phys_id; 840 box->phys_id = phys_id;
841 box->pci_dev = pdev; 841 box->pci_dev = pdev;
842 box->pmu = pmu; 842 box->pmu = pmu;
843 uncore_box_init(box);
844 pci_set_drvdata(pdev, box); 843 pci_set_drvdata(pdev, box);
845 844
846 raw_spin_lock(&uncore_box_lock); 845 raw_spin_lock(&uncore_box_lock);
@@ -1004,10 +1003,8 @@ static int uncore_cpu_starting(int cpu)
1004 pmu = &type->pmus[j]; 1003 pmu = &type->pmus[j];
1005 box = *per_cpu_ptr(pmu->box, cpu); 1004 box = *per_cpu_ptr(pmu->box, cpu);
1006 /* called by uncore_cpu_init? */ 1005 /* called by uncore_cpu_init? */
1007 if (box && box->phys_id >= 0) { 1006 if (box && box->phys_id >= 0)
1008 uncore_box_init(box);
1009 continue; 1007 continue;
1010 }
1011 1008
1012 for_each_online_cpu(k) { 1009 for_each_online_cpu(k) {
1013 exist = *per_cpu_ptr(pmu->box, k); 1010 exist = *per_cpu_ptr(pmu->box, k);
@@ -1023,10 +1020,8 @@ static int uncore_cpu_starting(int cpu)
1023 } 1020 }
1024 } 1021 }
1025 1022
1026 if (box) { 1023 if (box)
1027 box->phys_id = phys_id; 1024 box->phys_id = phys_id;
1028 uncore_box_init(box);
1029 }
1030 } 1025 }
1031 } 1026 }
1032 return 0; 1027 return 0;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index 863d9b02563e..6c8c1e7e69d8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -257,6 +257,14 @@ static inline int uncore_num_counters(struct intel_uncore_box *box)
257 return box->pmu->type->num_counters; 257 return box->pmu->type->num_counters;
258} 258}
259 259
260static inline void uncore_box_init(struct intel_uncore_box *box)
261{
262 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
263 if (box->pmu->type->ops->init_box)
264 box->pmu->type->ops->init_box(box);
265 }
266}
267
260static inline void uncore_disable_box(struct intel_uncore_box *box) 268static inline void uncore_disable_box(struct intel_uncore_box *box)
261{ 269{
262 if (box->pmu->type->ops->disable_box) 270 if (box->pmu->type->ops->disable_box)
@@ -265,6 +273,8 @@ static inline void uncore_disable_box(struct intel_uncore_box *box)
265 273
266static inline void uncore_enable_box(struct intel_uncore_box *box) 274static inline void uncore_enable_box(struct intel_uncore_box *box)
267{ 275{
276 uncore_box_init(box);
277
268 if (box->pmu->type->ops->enable_box) 278 if (box->pmu->type->ops->enable_box)
269 box->pmu->type->ops->enable_box(box); 279 box->pmu->type->ops->enable_box(box);
270} 280}
@@ -287,14 +297,6 @@ static inline u64 uncore_read_counter(struct intel_uncore_box *box,
287 return box->pmu->type->ops->read_counter(box, event); 297 return box->pmu->type->ops->read_counter(box, event);
288} 298}
289 299
290static inline void uncore_box_init(struct intel_uncore_box *box)
291{
292 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
293 if (box->pmu->type->ops->init_box)
294 box->pmu->type->ops->init_box(box);
295 }
296}
297
298static inline bool uncore_box_is_fake(struct intel_uncore_box *box) 300static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
299{ 301{
300 return (box->phys_id < 0); 302 return (box->phys_id < 0);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 4f0c0b954686..d52dcf0776ea 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -192,6 +192,9 @@ static void recalculate_apic_map(struct kvm *kvm)
192 u16 cid, lid; 192 u16 cid, lid;
193 u32 ldr, aid; 193 u32 ldr, aid;
194 194
195 if (!kvm_apic_present(vcpu))
196 continue;
197
195 aid = kvm_apic_id(apic); 198 aid = kvm_apic_id(apic);
196 ldr = kvm_apic_get_reg(apic, APIC_LDR); 199 ldr = kvm_apic_get_reg(apic, APIC_LDR);
197 cid = apic_cluster_id(new, ldr); 200 cid = apic_cluster_id(new, ldr);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 38dcec403b46..e3ff27a5b634 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -898,6 +898,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
898 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 898 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
899 VM_FAULT_HWPOISON_LARGE)) 899 VM_FAULT_HWPOISON_LARGE))
900 do_sigbus(regs, error_code, address, fault); 900 do_sigbus(regs, error_code, address, fault);
901 else if (fault & VM_FAULT_SIGSEGV)
902 bad_area_nosemaphore(regs, error_code, address);
901 else 903 else
902 BUG(); 904 BUG();
903 } 905 }
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 7b20bccf3648..2fb384724ebb 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -448,6 +448,22 @@ static const struct dmi_system_id pciprobe_dmi_table[] __initconst = {
448 DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"), 448 DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"),
449 }, 449 },
450 }, 450 },
451 {
452 .callback = set_scan_all,
453 .ident = "Stratus/NEC ftServer",
454 .matches = {
455 DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
456 DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R32"),
457 },
458 },
459 {
460 .callback = set_scan_all,
461 .ident = "Stratus/NEC ftServer",
462 .matches = {
463 DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
464 DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R31"),
465 },
466 },
451 {} 467 {}
452}; 468};
453 469
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index b57c4f91f487..9e3571a6535c 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -117,6 +117,8 @@ good_area:
117 if (unlikely(fault & VM_FAULT_ERROR)) { 117 if (unlikely(fault & VM_FAULT_ERROR)) {
118 if (fault & VM_FAULT_OOM) 118 if (fault & VM_FAULT_OOM)
119 goto out_of_memory; 119 goto out_of_memory;
120 else if (fault & VM_FAULT_SIGSEGV)
121 goto bad_area;
120 else if (fault & VM_FAULT_SIGBUS) 122 else if (fault & VM_FAULT_SIGBUS)
121 goto do_sigbus; 123 goto do_sigbus;
122 BUG(); 124 BUG();
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 6774a0e69867..1630a20d5dcf 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -15,26 +15,6 @@
15 15
16static void blk_mq_sysfs_release(struct kobject *kobj) 16static void blk_mq_sysfs_release(struct kobject *kobj)
17{ 17{
18 struct request_queue *q;
19
20 q = container_of(kobj, struct request_queue, mq_kobj);
21 free_percpu(q->queue_ctx);
22}
23
24static void blk_mq_ctx_release(struct kobject *kobj)
25{
26 struct blk_mq_ctx *ctx;
27
28 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
29 kobject_put(&ctx->queue->mq_kobj);
30}
31
32static void blk_mq_hctx_release(struct kobject *kobj)
33{
34 struct blk_mq_hw_ctx *hctx;
35
36 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
37 kfree(hctx);
38} 18}
39 19
40struct blk_mq_ctx_sysfs_entry { 20struct blk_mq_ctx_sysfs_entry {
@@ -338,13 +318,13 @@ static struct kobj_type blk_mq_ktype = {
338static struct kobj_type blk_mq_ctx_ktype = { 318static struct kobj_type blk_mq_ctx_ktype = {
339 .sysfs_ops = &blk_mq_sysfs_ops, 319 .sysfs_ops = &blk_mq_sysfs_ops,
340 .default_attrs = default_ctx_attrs, 320 .default_attrs = default_ctx_attrs,
341 .release = blk_mq_ctx_release, 321 .release = blk_mq_sysfs_release,
342}; 322};
343 323
344static struct kobj_type blk_mq_hw_ktype = { 324static struct kobj_type blk_mq_hw_ktype = {
345 .sysfs_ops = &blk_mq_hw_sysfs_ops, 325 .sysfs_ops = &blk_mq_hw_sysfs_ops,
346 .default_attrs = default_hw_ctx_attrs, 326 .default_attrs = default_hw_ctx_attrs,
347 .release = blk_mq_hctx_release, 327 .release = blk_mq_sysfs_release,
348}; 328};
349 329
350static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) 330static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
@@ -375,7 +355,6 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
375 return ret; 355 return ret;
376 356
377 hctx_for_each_ctx(hctx, ctx, i) { 357 hctx_for_each_ctx(hctx, ctx, i) {
378 kobject_get(&q->mq_kobj);
379 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); 358 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
380 if (ret) 359 if (ret)
381 break; 360 break;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 9ee3b87c4498..2390c5541e71 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1867,6 +1867,27 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1867 mutex_unlock(&set->tag_list_lock); 1867 mutex_unlock(&set->tag_list_lock);
1868} 1868}
1869 1869
1870/*
1871 * It is the actual release handler for mq, but we do it from
1872 * request queue's release handler for avoiding use-after-free
1873 * and headache because q->mq_kobj shouldn't have been introduced,
1874 * but we can't group ctx/kctx kobj without it.
1875 */
1876void blk_mq_release(struct request_queue *q)
1877{
1878 struct blk_mq_hw_ctx *hctx;
1879 unsigned int i;
1880
1881 /* hctx kobj stays in hctx */
1882 queue_for_each_hw_ctx(q, hctx, i)
1883 kfree(hctx);
1884
1885 kfree(q->queue_hw_ctx);
1886
1887 /* ctx kobj stays in queue_ctx */
1888 free_percpu(q->queue_ctx);
1889}
1890
1870struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 1891struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1871{ 1892{
1872 struct blk_mq_hw_ctx **hctxs; 1893 struct blk_mq_hw_ctx **hctxs;
@@ -2000,10 +2021,8 @@ void blk_mq_free_queue(struct request_queue *q)
2000 2021
2001 percpu_ref_exit(&q->mq_usage_counter); 2022 percpu_ref_exit(&q->mq_usage_counter);
2002 2023
2003 kfree(q->queue_hw_ctx);
2004 kfree(q->mq_map); 2024 kfree(q->mq_map);
2005 2025
2006 q->queue_hw_ctx = NULL;
2007 q->mq_map = NULL; 2026 q->mq_map = NULL;
2008 2027
2009 mutex_lock(&all_q_mutex); 2028 mutex_lock(&all_q_mutex);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 4f4f943c22c3..6a48c4c0d8a2 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -62,6 +62,8 @@ extern void blk_mq_sysfs_unregister(struct request_queue *q);
62 62
63extern void blk_mq_rq_timed_out(struct request *req, bool reserved); 63extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
64 64
65void blk_mq_release(struct request_queue *q);
66
65/* 67/*
66 * Basic implementation of sparser bitmap, allowing the user to spread 68 * Basic implementation of sparser bitmap, allowing the user to spread
67 * the bits over more cachelines. 69 * the bits over more cachelines.
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 935ea2aa0730..faaf36ade7eb 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -517,6 +517,8 @@ static void blk_release_queue(struct kobject *kobj)
517 517
518 if (!q->mq_ops) 518 if (!q->mq_ops)
519 blk_free_flush_queue(q->fq); 519 blk_free_flush_queue(q->fq);
520 else
521 blk_mq_release(q);
520 522
521 blk_trace_shutdown(q); 523 blk_trace_shutdown(q);
522 524
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 694d5a70d6ce..c70d6e45dc10 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -134,8 +134,6 @@ source "drivers/staging/Kconfig"
134 134
135source "drivers/platform/Kconfig" 135source "drivers/platform/Kconfig"
136 136
137source "drivers/soc/Kconfig"
138
139source "drivers/clk/Kconfig" 137source "drivers/clk/Kconfig"
140 138
141source "drivers/hwspinlock/Kconfig" 139source "drivers/hwspinlock/Kconfig"
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 4f3febf8a589..e75737fd7eef 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * ACPI support for Intel Lynxpoint LPSS. 2 * ACPI support for Intel Lynxpoint LPSS.
3 * 3 *
4 * Copyright (C) 2013, 2014, Intel Corporation 4 * Copyright (C) 2013, Intel Corporation
5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> 5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
6 * Rafael J. Wysocki <rafael.j.wysocki@intel.com> 6 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 * 7 *
@@ -60,8 +60,6 @@ ACPI_MODULE_NAME("acpi_lpss");
60#define LPSS_CLK_DIVIDER BIT(2) 60#define LPSS_CLK_DIVIDER BIT(2)
61#define LPSS_LTR BIT(3) 61#define LPSS_LTR BIT(3)
62#define LPSS_SAVE_CTX BIT(4) 62#define LPSS_SAVE_CTX BIT(4)
63#define LPSS_DEV_PROXY BIT(5)
64#define LPSS_PROXY_REQ BIT(6)
65 63
66struct lpss_private_data; 64struct lpss_private_data;
67 65
@@ -72,10 +70,8 @@ struct lpss_device_desc {
72 void (*setup)(struct lpss_private_data *pdata); 70 void (*setup)(struct lpss_private_data *pdata);
73}; 71};
74 72
75static struct device *proxy_device;
76
77static struct lpss_device_desc lpss_dma_desc = { 73static struct lpss_device_desc lpss_dma_desc = {
78 .flags = LPSS_CLK | LPSS_PROXY_REQ, 74 .flags = LPSS_CLK,
79}; 75};
80 76
81struct lpss_private_data { 77struct lpss_private_data {
@@ -150,24 +146,22 @@ static struct lpss_device_desc byt_pwm_dev_desc = {
150}; 146};
151 147
152static struct lpss_device_desc byt_uart_dev_desc = { 148static struct lpss_device_desc byt_uart_dev_desc = {
153 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX | 149 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
154 LPSS_DEV_PROXY,
155 .prv_offset = 0x800, 150 .prv_offset = 0x800,
156 .setup = lpss_uart_setup, 151 .setup = lpss_uart_setup,
157}; 152};
158 153
159static struct lpss_device_desc byt_spi_dev_desc = { 154static struct lpss_device_desc byt_spi_dev_desc = {
160 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX | 155 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
161 LPSS_DEV_PROXY,
162 .prv_offset = 0x400, 156 .prv_offset = 0x400,
163}; 157};
164 158
165static struct lpss_device_desc byt_sdio_dev_desc = { 159static struct lpss_device_desc byt_sdio_dev_desc = {
166 .flags = LPSS_CLK | LPSS_DEV_PROXY, 160 .flags = LPSS_CLK,
167}; 161};
168 162
169static struct lpss_device_desc byt_i2c_dev_desc = { 163static struct lpss_device_desc byt_i2c_dev_desc = {
170 .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_DEV_PROXY, 164 .flags = LPSS_CLK | LPSS_SAVE_CTX,
171 .prv_offset = 0x800, 165 .prv_offset = 0x800,
172 .setup = byt_i2c_setup, 166 .setup = byt_i2c_setup,
173}; 167};
@@ -374,8 +368,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
374 adev->driver_data = pdata; 368 adev->driver_data = pdata;
375 pdev = acpi_create_platform_device(adev); 369 pdev = acpi_create_platform_device(adev);
376 if (!IS_ERR_OR_NULL(pdev)) { 370 if (!IS_ERR_OR_NULL(pdev)) {
377 if (!proxy_device && dev_desc->flags & LPSS_DEV_PROXY)
378 proxy_device = &pdev->dev;
379 return 1; 371 return 1;
380 } 372 }
381 373
@@ -600,14 +592,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
600 if (pdata->dev_desc->flags & LPSS_SAVE_CTX) 592 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
601 acpi_lpss_save_ctx(dev, pdata); 593 acpi_lpss_save_ctx(dev, pdata);
602 594
603 ret = acpi_dev_runtime_suspend(dev); 595 return acpi_dev_runtime_suspend(dev);
604 if (ret)
605 return ret;
606
607 if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device)
608 return pm_runtime_put_sync_suspend(proxy_device);
609
610 return 0;
611} 596}
612 597
613static int acpi_lpss_runtime_resume(struct device *dev) 598static int acpi_lpss_runtime_resume(struct device *dev)
@@ -615,12 +600,6 @@ static int acpi_lpss_runtime_resume(struct device *dev)
615 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 600 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
616 int ret; 601 int ret;
617 602
618 if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device) {
619 ret = pm_runtime_get_sync(proxy_device);
620 if (ret)
621 return ret;
622 }
623
624 ret = acpi_dev_runtime_resume(dev); 603 ret = acpi_dev_runtime_resume(dev);
625 if (ret) 604 if (ret)
626 return ret; 605 return ret;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 3ec85dfce124..8a86b62466f7 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2098 * If an image has a non-zero parent overlap, get a reference to its 2098 * If an image has a non-zero parent overlap, get a reference to its
2099 * parent. 2099 * parent.
2100 * 2100 *
2101 * We must get the reference before checking for the overlap to
2102 * coordinate properly with zeroing the parent overlap in
2103 * rbd_dev_v2_parent_info() when an image gets flattened. We
2104 * drop it again if there is no overlap.
2105 *
2106 * Returns true if the rbd device has a parent with a non-zero 2101 * Returns true if the rbd device has a parent with a non-zero
2107 * overlap and a reference for it was successfully taken, or 2102 * overlap and a reference for it was successfully taken, or
2108 * false otherwise. 2103 * false otherwise.
2109 */ 2104 */
2110static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) 2105static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2111{ 2106{
2112 int counter; 2107 int counter = 0;
2113 2108
2114 if (!rbd_dev->parent_spec) 2109 if (!rbd_dev->parent_spec)
2115 return false; 2110 return false;
2116 2111
2117 counter = atomic_inc_return_safe(&rbd_dev->parent_ref); 2112 down_read(&rbd_dev->header_rwsem);
2118 if (counter > 0 && rbd_dev->parent_overlap) 2113 if (rbd_dev->parent_overlap)
2119 return true; 2114 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2120 2115 up_read(&rbd_dev->header_rwsem);
2121 /* Image was flattened, but parent is not yet torn down */
2122 2116
2123 if (counter < 0) 2117 if (counter < 0)
2124 rbd_warn(rbd_dev, "parent reference overflow"); 2118 rbd_warn(rbd_dev, "parent reference overflow");
2125 2119
2126 return false; 2120 return counter > 0;
2127} 2121}
2128 2122
2129/* 2123/*
@@ -4239,7 +4233,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4239 */ 4233 */
4240 if (rbd_dev->parent_overlap) { 4234 if (rbd_dev->parent_overlap) {
4241 rbd_dev->parent_overlap = 0; 4235 rbd_dev->parent_overlap = 0;
4242 smp_mb();
4243 rbd_dev_parent_put(rbd_dev); 4236 rbd_dev_parent_put(rbd_dev);
4244 pr_info("%s: clone image has been flattened\n", 4237 pr_info("%s: clone image has been flattened\n",
4245 rbd_dev->disk->disk_name); 4238 rbd_dev->disk->disk_name);
@@ -4285,7 +4278,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4285 * treat it specially. 4278 * treat it specially.
4286 */ 4279 */
4287 rbd_dev->parent_overlap = overlap; 4280 rbd_dev->parent_overlap = overlap;
4288 smp_mb();
4289 if (!overlap) { 4281 if (!overlap) {
4290 4282
4291 /* A null parent_spec indicates it's the initial probe */ 4283 /* A null parent_spec indicates it's the initial probe */
@@ -5114,10 +5106,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5114{ 5106{
5115 struct rbd_image_header *header; 5107 struct rbd_image_header *header;
5116 5108
5117 /* Drop parent reference unless it's already been done (or none) */ 5109 rbd_dev_parent_put(rbd_dev);
5118
5119 if (rbd_dev->parent_overlap)
5120 rbd_dev_parent_put(rbd_dev);
5121 5110
5122 /* Free dynamic fields from the header, then zero it out */ 5111 /* Free dynamic fields from the header, then zero it out */
5123 5112
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index da9c316059bc..eea5d7e578c9 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -801,9 +801,11 @@ static int mcp230xx_probe(struct i2c_client *client,
801 client->irq = irq_of_parse_and_map(client->dev.of_node, 0); 801 client->irq = irq_of_parse_and_map(client->dev.of_node, 0);
802 } else { 802 } else {
803 pdata = dev_get_platdata(&client->dev); 803 pdata = dev_get_platdata(&client->dev);
804 if (!pdata || !gpio_is_valid(pdata->base)) { 804 if (!pdata) {
805 dev_dbg(&client->dev, "invalid platform data\n"); 805 pdata = devm_kzalloc(&client->dev,
806 return -EINVAL; 806 sizeof(struct mcp23s08_platform_data),
807 GFP_KERNEL);
808 pdata->base = -1;
807 } 809 }
808 } 810 }
809 811
@@ -924,10 +926,11 @@ static int mcp23s08_probe(struct spi_device *spi)
924 } else { 926 } else {
925 type = spi_get_device_id(spi)->driver_data; 927 type = spi_get_device_id(spi)->driver_data;
926 pdata = dev_get_platdata(&spi->dev); 928 pdata = dev_get_platdata(&spi->dev);
927 if (!pdata || !gpio_is_valid(pdata->base)) { 929 if (!pdata) {
928 dev_dbg(&spi->dev, 930 pdata = devm_kzalloc(&spi->dev,
929 "invalid or missing platform data\n"); 931 sizeof(struct mcp23s08_platform_data),
930 return -EINVAL; 932 GFP_KERNEL);
933 pdata->base = -1;
931 } 934 }
932 935
933 for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) { 936 for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 30646cfe0efa..f476ae2eb0b3 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -88,6 +88,8 @@ struct gpio_bank {
88#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) 88#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
89#define LINE_USED(line, offset) (line & (BIT(offset))) 89#define LINE_USED(line, offset) (line & (BIT(offset)))
90 90
91static void omap_gpio_unmask_irq(struct irq_data *d);
92
91static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) 93static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
92{ 94{
93 return bank->chip.base + gpio_irq; 95 return bank->chip.base + gpio_irq;
@@ -477,6 +479,16 @@ static int omap_gpio_is_input(struct gpio_bank *bank, int mask)
477 return readl_relaxed(reg) & mask; 479 return readl_relaxed(reg) & mask;
478} 480}
479 481
482static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned gpio,
483 unsigned offset)
484{
485 if (!LINE_USED(bank->mod_usage, offset)) {
486 omap_enable_gpio_module(bank, offset);
487 omap_set_gpio_direction(bank, offset, 1);
488 }
489 bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
490}
491
480static int omap_gpio_irq_type(struct irq_data *d, unsigned type) 492static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
481{ 493{
482 struct gpio_bank *bank = omap_irq_data_get_bank(d); 494 struct gpio_bank *bank = omap_irq_data_get_bank(d);
@@ -506,15 +518,11 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
506 spin_lock_irqsave(&bank->lock, flags); 518 spin_lock_irqsave(&bank->lock, flags);
507 offset = GPIO_INDEX(bank, gpio); 519 offset = GPIO_INDEX(bank, gpio);
508 retval = omap_set_gpio_triggering(bank, offset, type); 520 retval = omap_set_gpio_triggering(bank, offset, type);
509 if (!LINE_USED(bank->mod_usage, offset)) { 521 omap_gpio_init_irq(bank, gpio, offset);
510 omap_enable_gpio_module(bank, offset); 522 if (!omap_gpio_is_input(bank, BIT(offset))) {
511 omap_set_gpio_direction(bank, offset, 1);
512 } else if (!omap_gpio_is_input(bank, BIT(offset))) {
513 spin_unlock_irqrestore(&bank->lock, flags); 523 spin_unlock_irqrestore(&bank->lock, flags);
514 return -EINVAL; 524 return -EINVAL;
515 } 525 }
516
517 bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
518 spin_unlock_irqrestore(&bank->lock, flags); 526 spin_unlock_irqrestore(&bank->lock, flags);
519 527
520 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 528 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
@@ -792,6 +800,24 @@ exit:
792 pm_runtime_put(bank->dev); 800 pm_runtime_put(bank->dev);
793} 801}
794 802
803static unsigned int omap_gpio_irq_startup(struct irq_data *d)
804{
805 struct gpio_bank *bank = omap_irq_data_get_bank(d);
806 unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
807 unsigned long flags;
808 unsigned offset = GPIO_INDEX(bank, gpio);
809
810 if (!BANK_USED(bank))
811 pm_runtime_get_sync(bank->dev);
812
813 spin_lock_irqsave(&bank->lock, flags);
814 omap_gpio_init_irq(bank, gpio, offset);
815 spin_unlock_irqrestore(&bank->lock, flags);
816 omap_gpio_unmask_irq(d);
817
818 return 0;
819}
820
795static void omap_gpio_irq_shutdown(struct irq_data *d) 821static void omap_gpio_irq_shutdown(struct irq_data *d)
796{ 822{
797 struct gpio_bank *bank = omap_irq_data_get_bank(d); 823 struct gpio_bank *bank = omap_irq_data_get_bank(d);
@@ -1181,6 +1207,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
1181 if (!irqc) 1207 if (!irqc)
1182 return -ENOMEM; 1208 return -ENOMEM;
1183 1209
1210 irqc->irq_startup = omap_gpio_irq_startup,
1184 irqc->irq_shutdown = omap_gpio_irq_shutdown, 1211 irqc->irq_shutdown = omap_gpio_irq_shutdown,
1185 irqc->irq_ack = omap_gpio_ack_irq, 1212 irqc->irq_ack = omap_gpio_ack_irq,
1186 irqc->irq_mask = omap_gpio_mask_irq, 1213 irqc->irq_mask = omap_gpio_mask_irq,
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index f62aa115d79a..7722ed53bd65 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -648,6 +648,7 @@ int gpiod_export_link(struct device *dev, const char *name,
648 if (tdev != NULL) { 648 if (tdev != NULL) {
649 status = sysfs_create_link(&dev->kobj, &tdev->kobj, 649 status = sysfs_create_link(&dev->kobj, &tdev->kobj,
650 name); 650 name);
651 put_device(tdev);
651 } else { 652 } else {
652 status = -ENODEV; 653 status = -ENODEV;
653 } 654 }
@@ -695,7 +696,7 @@ int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
695 } 696 }
696 697
697 status = sysfs_set_active_low(desc, dev, value); 698 status = sysfs_set_active_low(desc, dev, value);
698 699 put_device(dev);
699unlock: 700unlock:
700 mutex_unlock(&sysfs_lock); 701 mutex_unlock(&sysfs_lock);
701 702
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 633532a2e7ec..25bc47f3c1cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -26,6 +26,7 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include "kfd_priv.h" 27#include "kfd_priv.h"
28#include "kfd_device_queue_manager.h" 28#include "kfd_device_queue_manager.h"
29#include "kfd_pm4_headers.h"
29 30
30#define MQD_SIZE_ALIGNED 768 31#define MQD_SIZE_ALIGNED 768
31 32
@@ -169,9 +170,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
169 kfd->shared_resources = *gpu_resources; 170 kfd->shared_resources = *gpu_resources;
170 171
171 /* calculate max size of mqds needed for queues */ 172 /* calculate max size of mqds needed for queues */
172 size = max_num_of_processes * 173 size = max_num_of_queues_per_device *
173 max_num_of_queues_per_process * 174 kfd->device_info->mqd_size_aligned;
174 kfd->device_info->mqd_size_aligned;
175 175
176 /* add another 512KB for all other allocations on gart */ 176 /* add another 512KB for all other allocations on gart */
177 size += 512 * 1024; 177 size += 512 * 1024;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 30c8fda9622e..0fd592799d58 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -183,6 +183,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
183 183
184 mutex_lock(&dqm->lock); 184 mutex_lock(&dqm->lock);
185 185
186 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
187 pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
188 dqm->total_queue_count);
189 mutex_unlock(&dqm->lock);
190 return -EPERM;
191 }
192
186 if (list_empty(&qpd->queues_list)) { 193 if (list_empty(&qpd->queues_list)) {
187 retval = allocate_vmid(dqm, qpd, q); 194 retval = allocate_vmid(dqm, qpd, q);
188 if (retval != 0) { 195 if (retval != 0) {
@@ -207,6 +214,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
207 list_add(&q->list, &qpd->queues_list); 214 list_add(&q->list, &qpd->queues_list);
208 dqm->queue_count++; 215 dqm->queue_count++;
209 216
217 /*
218 * Unconditionally increment this counter, regardless of the queue's
219 * type or whether the queue is active.
220 */
221 dqm->total_queue_count++;
222 pr_debug("Total of %d queues are accountable so far\n",
223 dqm->total_queue_count);
224
210 mutex_unlock(&dqm->lock); 225 mutex_unlock(&dqm->lock);
211 return 0; 226 return 0;
212} 227}
@@ -326,6 +341,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
326 if (list_empty(&qpd->queues_list)) 341 if (list_empty(&qpd->queues_list))
327 deallocate_vmid(dqm, qpd, q); 342 deallocate_vmid(dqm, qpd, q);
328 dqm->queue_count--; 343 dqm->queue_count--;
344
345 /*
346 * Unconditionally decrement this counter, regardless of the queue's
347 * type
348 */
349 dqm->total_queue_count--;
350 pr_debug("Total of %d queues are accountable so far\n",
351 dqm->total_queue_count);
352
329out: 353out:
330 mutex_unlock(&dqm->lock); 354 mutex_unlock(&dqm->lock);
331 return retval; 355 return retval;
@@ -541,10 +565,14 @@ static int init_pipelines(struct device_queue_manager *dqm,
541 565
542 for (i = 0; i < pipes_num; i++) { 566 for (i = 0; i < pipes_num; i++) {
543 inx = i + first_pipe; 567 inx = i + first_pipe;
568 /*
569 * HPD buffer on GTT is allocated by amdkfd, no need to waste
570 * space in GTT for pipelines we don't initialize
571 */
544 pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; 572 pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
545 pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr); 573 pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
546 /* = log2(bytes/4)-1 */ 574 /* = log2(bytes/4)-1 */
547 kfd2kgd->init_pipeline(dqm->dev->kgd, i, 575 kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
548 CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr); 576 CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
549 } 577 }
550 578
@@ -560,7 +588,7 @@ static int init_scheduler(struct device_queue_manager *dqm)
560 588
561 pr_debug("kfd: In %s\n", __func__); 589 pr_debug("kfd: In %s\n", __func__);
562 590
563 retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE); 591 retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
564 if (retval != 0) 592 if (retval != 0)
565 return retval; 593 return retval;
566 594
@@ -752,6 +780,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
752 pr_debug("kfd: In func %s\n", __func__); 780 pr_debug("kfd: In func %s\n", __func__);
753 781
754 mutex_lock(&dqm->lock); 782 mutex_lock(&dqm->lock);
783 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
784 pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
785 dqm->total_queue_count);
786 mutex_unlock(&dqm->lock);
787 return -EPERM;
788 }
789
790 /*
791 * Unconditionally increment this counter, regardless of the queue's
792 * type or whether the queue is active.
793 */
794 dqm->total_queue_count++;
795 pr_debug("Total of %d queues are accountable so far\n",
796 dqm->total_queue_count);
797
755 list_add(&kq->list, &qpd->priv_queue_list); 798 list_add(&kq->list, &qpd->priv_queue_list);
756 dqm->queue_count++; 799 dqm->queue_count++;
757 qpd->is_debug = true; 800 qpd->is_debug = true;
@@ -775,6 +818,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
775 dqm->queue_count--; 818 dqm->queue_count--;
776 qpd->is_debug = false; 819 qpd->is_debug = false;
777 execute_queues_cpsch(dqm, false); 820 execute_queues_cpsch(dqm, false);
821 /*
822 * Unconditionally decrement this counter, regardless of the queue's
823 * type.
824 */
825 dqm->total_queue_count--;
826 pr_debug("Total of %d queues are accountable so far\n",
827 dqm->total_queue_count);
778 mutex_unlock(&dqm->lock); 828 mutex_unlock(&dqm->lock);
779} 829}
780 830
@@ -793,6 +843,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
793 843
794 mutex_lock(&dqm->lock); 844 mutex_lock(&dqm->lock);
795 845
846 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
847 pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
848 dqm->total_queue_count);
849 retval = -EPERM;
850 goto out;
851 }
852
796 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP); 853 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
797 if (mqd == NULL) { 854 if (mqd == NULL) {
798 mutex_unlock(&dqm->lock); 855 mutex_unlock(&dqm->lock);
@@ -810,6 +867,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
810 retval = execute_queues_cpsch(dqm, false); 867 retval = execute_queues_cpsch(dqm, false);
811 } 868 }
812 869
870 /*
871 * Unconditionally increment this counter, regardless of the queue's
872 * type or whether the queue is active.
873 */
874 dqm->total_queue_count++;
875
876 pr_debug("Total of %d queues are accountable so far\n",
877 dqm->total_queue_count);
878
813out: 879out:
814 mutex_unlock(&dqm->lock); 880 mutex_unlock(&dqm->lock);
815 return retval; 881 return retval;
@@ -930,6 +996,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
930 996
931 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); 997 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
932 998
999 /*
1000 * Unconditionally decrement this counter, regardless of the queue's
1001 * type
1002 */
1003 dqm->total_queue_count--;
1004 pr_debug("Total of %d queues are accountable so far\n",
1005 dqm->total_queue_count);
1006
933 mutex_unlock(&dqm->lock); 1007 mutex_unlock(&dqm->lock);
934 1008
935 return 0; 1009 return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index c3f189e8ae35..52035bf0c1cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -130,6 +130,7 @@ struct device_queue_manager {
130 struct list_head queues; 130 struct list_head queues;
131 unsigned int processes_count; 131 unsigned int processes_count;
132 unsigned int queue_count; 132 unsigned int queue_count;
133 unsigned int total_queue_count;
133 unsigned int next_pipe_to_allocate; 134 unsigned int next_pipe_to_allocate;
134 unsigned int *allocated_queues; 135 unsigned int *allocated_queues;
135 unsigned int vmid_bitmap; 136 unsigned int vmid_bitmap;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 95d5af138e6e..1c385c23dd0b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444);
50MODULE_PARM_DESC(sched_policy, 50MODULE_PARM_DESC(sched_policy,
51 "Kernel cmdline parameter that defines the amdkfd scheduling policy"); 51 "Kernel cmdline parameter that defines the amdkfd scheduling policy");
52 52
53int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT; 53int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
54module_param(max_num_of_processes, int, 0444); 54module_param(max_num_of_queues_per_device, int, 0444);
55MODULE_PARM_DESC(max_num_of_processes, 55MODULE_PARM_DESC(max_num_of_queues_per_device,
56 "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes"); 56 "Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
57
58int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT;
59module_param(max_num_of_queues_per_process, int, 0444);
60MODULE_PARM_DESC(max_num_of_queues_per_process,
61 "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process");
62 57
63bool kgd2kfd_init(unsigned interface_version, 58bool kgd2kfd_init(unsigned interface_version,
64 const struct kfd2kgd_calls *f2g, 59 const struct kfd2kgd_calls *f2g,
@@ -100,16 +95,10 @@ static int __init kfd_module_init(void)
100 } 95 }
101 96
102 /* Verify module parameters */ 97 /* Verify module parameters */
103 if ((max_num_of_processes < 0) || 98 if ((max_num_of_queues_per_device < 1) ||
104 (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) { 99 (max_num_of_queues_per_device >
105 pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n"); 100 KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
106 return -1; 101 pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
107 }
108
109 if ((max_num_of_queues_per_process < 0) ||
110 (max_num_of_queues_per_process >
111 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) {
112 pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n");
113 return -1; 102 return -1;
114 } 103 }
115 104
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 4c25ef504f79..6cfe7f1f18cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex);
30 30
31int kfd_pasid_init(void) 31int kfd_pasid_init(void)
32{ 32{
33 pasid_limit = max_num_of_processes; 33 pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
34 34
35 pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); 35 pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
36 if (!pasid_bitmap) 36 if (!pasid_bitmap)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index b3dc13c83169..96dc10e8904a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -52,20 +52,19 @@
52#define kfd_alloc_struct(ptr_to_struct) \ 52#define kfd_alloc_struct(ptr_to_struct) \
53 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) 53 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
54 54
55/* Kernel module parameter to specify maximum number of supported processes */
56extern int max_num_of_processes;
57
58#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
59#define KFD_MAX_NUM_OF_PROCESSES 512 55#define KFD_MAX_NUM_OF_PROCESSES 512
56#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
60 57
61/* 58/*
62 * Kernel module parameter to specify maximum number of supported queues 59 * Kernel module parameter to specify maximum number of supported queues per
63 * per process 60 * device
64 */ 61 */
65extern int max_num_of_queues_per_process; 62extern int max_num_of_queues_per_device;
66 63
67#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128 64#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
68#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 65#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
66 (KFD_MAX_NUM_OF_PROCESSES * \
67 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
69 68
70#define KFD_KERNEL_QUEUE_SIZE 2048 69#define KFD_KERNEL_QUEUE_SIZE 2048
71 70
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 47526780d736..2fda1927bff7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
54 pr_debug("kfd: in %s\n", __func__); 54 pr_debug("kfd: in %s\n", __func__);
55 55
56 found = find_first_zero_bit(pqm->queue_slot_bitmap, 56 found = find_first_zero_bit(pqm->queue_slot_bitmap,
57 max_num_of_queues_per_process); 57 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
58 58
59 pr_debug("kfd: the new slot id %lu\n", found); 59 pr_debug("kfd: the new slot id %lu\n", found);
60 60
61 if (found >= max_num_of_queues_per_process) { 61 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
62 pr_info("amdkfd: Can not open more queues for process with pasid %d\n", 62 pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
63 pqm->process->pasid); 63 pqm->process->pasid);
64 return -ENOMEM; 64 return -ENOMEM;
@@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
76 76
77 INIT_LIST_HEAD(&pqm->queues); 77 INIT_LIST_HEAD(&pqm->queues);
78 pqm->queue_slot_bitmap = 78 pqm->queue_slot_bitmap =
79 kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process, 79 kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
80 BITS_PER_BYTE), GFP_KERNEL); 80 BITS_PER_BYTE), GFP_KERNEL);
81 if (pqm->queue_slot_bitmap == NULL) 81 if (pqm->queue_slot_bitmap == NULL)
82 return -ENOMEM; 82 return -ENOMEM;
@@ -203,6 +203,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
203 pqn->kq = NULL; 203 pqn->kq = NULL;
204 retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd, 204 retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
205 &q->properties.vmid); 205 &q->properties.vmid);
206 pr_debug("DQM returned %d for create_queue\n", retval);
206 print_queue(q); 207 print_queue(q);
207 break; 208 break;
208 case KFD_QUEUE_TYPE_DIQ: 209 case KFD_QUEUE_TYPE_DIQ:
@@ -222,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
222 } 223 }
223 224
224 if (retval != 0) { 225 if (retval != 0) {
225 pr_err("kfd: error dqm create queue\n"); 226 pr_debug("Error dqm create queue\n");
226 goto err_create_queue; 227 goto err_create_queue;
227 } 228 }
228 229
@@ -241,7 +242,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
241err_create_queue: 242err_create_queue:
242 kfree(pqn); 243 kfree(pqn);
243err_allocate_pqn: 244err_allocate_pqn:
245 /* check if queues list is empty unregister process from device */
244 clear_bit(*qid, pqm->queue_slot_bitmap); 246 clear_bit(*qid, pqm->queue_slot_bitmap);
247 if (list_empty(&pqm->queues))
248 dev->dqm->unregister_process(dev->dqm, &pdd->qpd);
245 return retval; 249 return retval;
246} 250}
247 251
@@ -311,7 +315,11 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
311 BUG_ON(!pqm); 315 BUG_ON(!pqm);
312 316
313 pqn = get_queue_by_qid(pqm, qid); 317 pqn = get_queue_by_qid(pqm, qid);
314 BUG_ON(!pqn); 318 if (!pqn) {
319 pr_debug("amdkfd: No queue %d exists for update operation\n",
320 qid);
321 return -EFAULT;
322 }
315 323
316 pqn->q->properties.queue_address = p->queue_address; 324 pqn->q->properties.queue_address = p->queue_address;
317 pqn->q->properties.queue_size = p->queue_size; 325 pqn->q->properties.queue_size = p->queue_size;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index c2a1cba1e984..b9140032962d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -16,9 +16,12 @@
16#include "cirrus_drv.h" 16#include "cirrus_drv.h"
17 17
18int cirrus_modeset = -1; 18int cirrus_modeset = -1;
19int cirrus_bpp = 24;
19 20
20MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); 21MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
21module_param_named(modeset, cirrus_modeset, int, 0400); 22module_param_named(modeset, cirrus_modeset, int, 0400);
23MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:24)");
24module_param_named(bpp, cirrus_bpp, int, 0400);
22 25
23/* 26/*
24 * This is the generic driver code. This binds the driver to the drm core, 27 * This is the generic driver code. This binds the driver to the drm core,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 693a4565c4ff..705061537a27 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -262,4 +262,7 @@ static inline void cirrus_bo_unreserve(struct cirrus_bo *bo)
262 262
263int cirrus_bo_push_sysram(struct cirrus_bo *bo); 263int cirrus_bo_push_sysram(struct cirrus_bo *bo);
264int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr); 264int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
265
266extern int cirrus_bpp;
267
265#endif /* __CIRRUS_DRV_H__ */ 268#endif /* __CIRRUS_DRV_H__ */
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 4c2d68e9102d..e4b976658087 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -320,6 +320,8 @@ bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
320 const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */ 320 const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */
321 const int max_size = cdev->mc.vram_size; 321 const int max_size = cdev->mc.vram_size;
322 322
323 if (bpp > cirrus_bpp)
324 return false;
323 if (bpp > 32) 325 if (bpp > 32)
324 return false; 326 return false;
325 327
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 99d4a74ffeaf..61385f2298bf 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -501,8 +501,13 @@ static int cirrus_vga_get_modes(struct drm_connector *connector)
501 int count; 501 int count;
502 502
503 /* Just add a static list of modes */ 503 /* Just add a static list of modes */
504 count = drm_add_modes_noedid(connector, 1280, 1024); 504 if (cirrus_bpp <= 24) {
505 drm_set_preferred_mode(connector, 1024, 768); 505 count = drm_add_modes_noedid(connector, 1280, 1024);
506 drm_set_preferred_mode(connector, 1024, 768);
507 } else {
508 count = drm_add_modes_noedid(connector, 800, 600);
509 drm_set_preferred_mode(connector, 800, 600);
510 }
506 return count; 511 return count;
507} 512}
508 513
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index cf775a4449c1..dc386ebe5193 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
145} 145}
146EXPORT_SYMBOL(drm_fb_helper_add_one_connector); 146EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
147 147
148static void remove_from_modeset(struct drm_mode_set *set,
149 struct drm_connector *connector)
150{
151 int i, j;
152
153 for (i = 0; i < set->num_connectors; i++) {
154 if (set->connectors[i] == connector)
155 break;
156 }
157
158 if (i == set->num_connectors)
159 return;
160
161 for (j = i + 1; j < set->num_connectors; j++) {
162 set->connectors[j - 1] = set->connectors[j];
163 }
164 set->num_connectors--;
165
166 /* because i915 is pissy about this..
167 * TODO maybe need to makes sure we set it back to !=NULL somewhere?
168 */
169 if (set->num_connectors == 0)
170 set->fb = NULL;
171}
172
148int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, 173int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
149 struct drm_connector *connector) 174 struct drm_connector *connector)
150{ 175{
@@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
167 } 192 }
168 fb_helper->connector_count--; 193 fb_helper->connector_count--;
169 kfree(fb_helper_connector); 194 kfree(fb_helper_connector);
195
196 /* also cleanup dangling references to the connector: */
197 for (i = 0; i < fb_helper->crtc_count; i++)
198 remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector);
199
170 return 0; 200 return 0;
171} 201}
172EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); 202EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index d4762799351d..a9041d1a8ff0 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -32,6 +32,8 @@
32struct tda998x_priv { 32struct tda998x_priv {
33 struct i2c_client *cec; 33 struct i2c_client *cec;
34 struct i2c_client *hdmi; 34 struct i2c_client *hdmi;
35 struct mutex mutex;
36 struct delayed_work dwork;
35 uint16_t rev; 37 uint16_t rev;
36 uint8_t current_page; 38 uint8_t current_page;
37 int dpms; 39 int dpms;
@@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
402 uint8_t addr = REG2ADDR(reg); 404 uint8_t addr = REG2ADDR(reg);
403 int ret; 405 int ret;
404 406
407 mutex_lock(&priv->mutex);
405 ret = set_page(priv, reg); 408 ret = set_page(priv, reg);
406 if (ret < 0) 409 if (ret < 0)
407 return ret; 410 goto out;
408 411
409 ret = i2c_master_send(client, &addr, sizeof(addr)); 412 ret = i2c_master_send(client, &addr, sizeof(addr));
410 if (ret < 0) 413 if (ret < 0)
@@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
414 if (ret < 0) 417 if (ret < 0)
415 goto fail; 418 goto fail;
416 419
417 return ret; 420 goto out;
418 421
419fail: 422fail:
420 dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg); 423 dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
424out:
425 mutex_unlock(&priv->mutex);
421 return ret; 426 return ret;
422} 427}
423 428
@@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)
431 buf[0] = REG2ADDR(reg); 436 buf[0] = REG2ADDR(reg);
432 memcpy(&buf[1], p, cnt); 437 memcpy(&buf[1], p, cnt);
433 438
439 mutex_lock(&priv->mutex);
434 ret = set_page(priv, reg); 440 ret = set_page(priv, reg);
435 if (ret < 0) 441 if (ret < 0)
436 return; 442 goto out;
437 443
438 ret = i2c_master_send(client, buf, cnt + 1); 444 ret = i2c_master_send(client, buf, cnt + 1);
439 if (ret < 0) 445 if (ret < 0)
440 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); 446 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
447out:
448 mutex_unlock(&priv->mutex);
441} 449}
442 450
443static int 451static int
@@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
459 uint8_t buf[] = {REG2ADDR(reg), val}; 467 uint8_t buf[] = {REG2ADDR(reg), val};
460 int ret; 468 int ret;
461 469
470 mutex_lock(&priv->mutex);
462 ret = set_page(priv, reg); 471 ret = set_page(priv, reg);
463 if (ret < 0) 472 if (ret < 0)
464 return; 473 goto out;
465 474
466 ret = i2c_master_send(client, buf, sizeof(buf)); 475 ret = i2c_master_send(client, buf, sizeof(buf));
467 if (ret < 0) 476 if (ret < 0)
468 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); 477 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
478out:
479 mutex_unlock(&priv->mutex);
469} 480}
470 481
471static void 482static void
@@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)
475 uint8_t buf[] = {REG2ADDR(reg), val >> 8, val}; 486 uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
476 int ret; 487 int ret;
477 488
489 mutex_lock(&priv->mutex);
478 ret = set_page(priv, reg); 490 ret = set_page(priv, reg);
479 if (ret < 0) 491 if (ret < 0)
480 return; 492 goto out;
481 493
482 ret = i2c_master_send(client, buf, sizeof(buf)); 494 ret = i2c_master_send(client, buf, sizeof(buf));
483 if (ret < 0) 495 if (ret < 0)
484 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); 496 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
497out:
498 mutex_unlock(&priv->mutex);
485} 499}
486 500
487static void 501static void
@@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv)
536 reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24); 550 reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
537} 551}
538 552
553/* handle HDMI connect/disconnect */
554static void tda998x_hpd(struct work_struct *work)
555{
556 struct delayed_work *dwork = to_delayed_work(work);
557 struct tda998x_priv *priv =
558 container_of(dwork, struct tda998x_priv, dwork);
559
560 if (priv->encoder && priv->encoder->dev)
561 drm_kms_helper_hotplug_event(priv->encoder->dev);
562}
563
539/* 564/*
540 * only 2 interrupts may occur: screen plug/unplug and EDID read 565 * only 2 interrupts may occur: screen plug/unplug and EDID read
541 */ 566 */
@@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
559 priv->wq_edid_wait = 0; 584 priv->wq_edid_wait = 0;
560 wake_up(&priv->wq_edid); 585 wake_up(&priv->wq_edid);
561 } else if (cec != 0) { /* HPD change */ 586 } else if (cec != 0) { /* HPD change */
562 if (priv->encoder && priv->encoder->dev) 587 schedule_delayed_work(&priv->dwork, HZ/10);
563 drm_helper_hpd_irq_event(priv->encoder->dev);
564 } 588 }
565 return IRQ_HANDLED; 589 return IRQ_HANDLED;
566} 590}
@@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv)
1170 /* disable all IRQs and free the IRQ handler */ 1194 /* disable all IRQs and free the IRQ handler */
1171 cec_write(priv, REG_CEC_RXSHPDINTENA, 0); 1195 cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
1172 reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); 1196 reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
1173 if (priv->hdmi->irq) 1197 if (priv->hdmi->irq) {
1174 free_irq(priv->hdmi->irq, priv); 1198 free_irq(priv->hdmi->irq, priv);
1199 cancel_delayed_work_sync(&priv->dwork);
1200 }
1175 1201
1176 i2c_unregister_device(priv->cec); 1202 i2c_unregister_device(priv->cec);
1177} 1203}
@@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
1255 struct device_node *np = client->dev.of_node; 1281 struct device_node *np = client->dev.of_node;
1256 u32 video; 1282 u32 video;
1257 int rev_lo, rev_hi, ret; 1283 int rev_lo, rev_hi, ret;
1284 unsigned short cec_addr;
1258 1285
1259 priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3); 1286 priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
1260 priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); 1287 priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
@@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
1262 1289
1263 priv->current_page = 0xff; 1290 priv->current_page = 0xff;
1264 priv->hdmi = client; 1291 priv->hdmi = client;
1265 priv->cec = i2c_new_dummy(client->adapter, 0x34); 1292 /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
1293 cec_addr = 0x34 + (client->addr & 0x03);
1294 priv->cec = i2c_new_dummy(client->adapter, cec_addr);
1266 if (!priv->cec) 1295 if (!priv->cec)
1267 return -ENODEV; 1296 return -ENODEV;
1268 1297
1269 priv->dpms = DRM_MODE_DPMS_OFF; 1298 priv->dpms = DRM_MODE_DPMS_OFF;
1270 1299
1300 mutex_init(&priv->mutex); /* protect the page access */
1301
1271 /* wake up the device: */ 1302 /* wake up the device: */
1272 cec_write(priv, REG_CEC_ENAMODS, 1303 cec_write(priv, REG_CEC_ENAMODS,
1273 CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); 1304 CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
@@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
1323 if (client->irq) { 1354 if (client->irq) {
1324 int irqf_trigger; 1355 int irqf_trigger;
1325 1356
1326 /* init read EDID waitqueue */ 1357 /* init read EDID waitqueue and HDP work */
1327 init_waitqueue_head(&priv->wq_edid); 1358 init_waitqueue_head(&priv->wq_edid);
1359 INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd);
1328 1360
1329 /* clear pending interrupts */ 1361 /* clear pending interrupts */
1330 reg_read(priv, REG_INT_FLAGS_0); 1362 reg_read(priv, REG_INT_FLAGS_0);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 574057cd1d09..7643300828c3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -462,19 +462,13 @@ void intel_detect_pch(struct drm_device *dev)
462 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 462 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
463 dev_priv->pch_type = PCH_LPT; 463 dev_priv->pch_type = PCH_LPT;
464 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 464 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
465 WARN_ON(!IS_HASWELL(dev)); 465 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
466 WARN_ON(IS_HSW_ULT(dev)); 466 WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
467 } else if (IS_BROADWELL(dev)) {
468 dev_priv->pch_type = PCH_LPT;
469 dev_priv->pch_id =
470 INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
471 DRM_DEBUG_KMS("This is Broadwell, assuming "
472 "LynxPoint LP PCH\n");
473 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 467 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
474 dev_priv->pch_type = PCH_LPT; 468 dev_priv->pch_type = PCH_LPT;
475 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 469 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
476 WARN_ON(!IS_HASWELL(dev)); 470 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
477 WARN_ON(!IS_HSW_ULT(dev)); 471 WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
478 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { 472 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
479 dev_priv->pch_type = PCH_SPT; 473 dev_priv->pch_type = PCH_SPT;
480 DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); 474 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e9f891c432f8..9d7a7155bf02 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2159,8 +2159,7 @@ struct drm_i915_cmd_table {
2159#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 2159#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
2160 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) 2160 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
2161#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ 2161#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
2162 ((INTEL_DEVID(dev) & 0xf) == 0x2 || \ 2162 ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
2163 (INTEL_DEVID(dev) & 0xf) == 0x6 || \
2164 (INTEL_DEVID(dev) & 0xf) == 0xe)) 2163 (INTEL_DEVID(dev) & 0xf) == 0xe))
2165#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ 2164#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
2166 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2165 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 76354d3ba925..5f614828d365 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3148,6 +3148,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
3148 u32 size = i915_gem_obj_ggtt_size(obj); 3148 u32 size = i915_gem_obj_ggtt_size(obj);
3149 uint64_t val; 3149 uint64_t val;
3150 3150
3151 /* Adjust fence size to match tiled area */
3152 if (obj->tiling_mode != I915_TILING_NONE) {
3153 uint32_t row_size = obj->stride *
3154 (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
3155 size = (size / row_size) * row_size;
3156 }
3157
3151 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & 3158 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
3152 0xfffff000) << 32; 3159 0xfffff000) << 32;
3153 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; 3160 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
@@ -4884,25 +4891,18 @@ i915_gem_init_hw(struct drm_device *dev)
4884 for (i = 0; i < NUM_L3_SLICES(dev); i++) 4891 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4885 i915_gem_l3_remap(&dev_priv->ring[RCS], i); 4892 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4886 4893
4887 /* 4894 ret = i915_ppgtt_init_hw(dev);
4888 * XXX: Contexts should only be initialized once. Doing a switch to the
4889 * default context switch however is something we'd like to do after
4890 * reset or thaw (the latter may not actually be necessary for HW, but
4891 * goes with our code better). Context switching requires rings (for
4892 * the do_switch), but before enabling PPGTT. So don't move this.
4893 */
4894 ret = i915_gem_context_enable(dev_priv);
4895 if (ret && ret != -EIO) { 4895 if (ret && ret != -EIO) {
4896 DRM_ERROR("Context enable failed %d\n", ret); 4896 DRM_ERROR("PPGTT enable failed %d\n", ret);
4897 i915_gem_cleanup_ringbuffer(dev); 4897 i915_gem_cleanup_ringbuffer(dev);
4898
4899 return ret;
4900 } 4898 }
4901 4899
4902 ret = i915_ppgtt_init_hw(dev); 4900 ret = i915_gem_context_enable(dev_priv);
4903 if (ret && ret != -EIO) { 4901 if (ret && ret != -EIO) {
4904 DRM_ERROR("PPGTT enable failed %d\n", ret); 4902 DRM_ERROR("Context enable failed %d\n", ret);
4905 i915_gem_cleanup_ringbuffer(dev); 4903 i915_gem_cleanup_ringbuffer(dev);
4904
4905 return ret;
4906 } 4906 }
4907 4907
4908 return ret; 4908 return ret;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 4d63839bd9b4..dfb783a8f2c3 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -962,7 +962,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
962 962
963 WARN_ON(panel->backlight.max == 0); 963 WARN_ON(panel->backlight.max == 0);
964 964
965 if (panel->backlight.level == 0) { 965 if (panel->backlight.level <= panel->backlight.min) {
966 panel->backlight.level = panel->backlight.max; 966 panel->backlight.level = panel->backlight.max;
967 if (panel->backlight.device) 967 if (panel->backlight.device)
968 panel->backlight.device->props.brightness = 968 panel->backlight.device->props.brightness =
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index a0133c74f4cf..42cd0cffe210 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -816,7 +816,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev,
816 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 816 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
817 if (flags & R600_PTE_SYSTEM) { 817 if (flags & R600_PTE_SYSTEM) {
818 value = radeon_vm_map_gart(rdev, addr); 818 value = radeon_vm_map_gart(rdev, addr);
819 value &= 0xFFFFFFFFFFFFF000ULL;
820 } else if (flags & R600_PTE_VALID) { 819 } else if (flags & R600_PTE_VALID) {
821 value = addr; 820 value = addr;
822 } else { 821 } else {
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index 4be2bb7cbef3..ce787a9f12c0 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev,
372 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 372 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
373 if (flags & R600_PTE_SYSTEM) { 373 if (flags & R600_PTE_SYSTEM) {
374 value = radeon_vm_map_gart(rdev, addr); 374 value = radeon_vm_map_gart(rdev, addr);
375 value &= 0xFFFFFFFFFFFFF000ULL;
376 } else if (flags & R600_PTE_VALID) { 375 } else if (flags & R600_PTE_VALID) {
377 value = addr; 376 value = addr;
378 } else { 377 } else {
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 74f06d540591..279801ca5110 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
644 return r; 644 return r;
645 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 645 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
646 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; 646 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
647 rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
647 rdev->asic->gart.set_page = &r100_pci_gart_set_page; 648 rdev->asic->gart.set_page = &r100_pci_gart_set_page;
648 return radeon_gart_table_ram_alloc(rdev); 649 return radeon_gart_table_ram_alloc(rdev);
649} 650}
@@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
681 WREG32(RADEON_AIC_HI_ADDR, 0); 682 WREG32(RADEON_AIC_HI_ADDR, 0);
682} 683}
683 684
685uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
686{
687 return addr;
688}
689
684void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, 690void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
685 uint64_t addr, uint32_t flags) 691 uint64_t entry)
686{ 692{
687 u32 *gtt = rdev->gart.ptr; 693 u32 *gtt = rdev->gart.ptr;
688 gtt[i] = cpu_to_le32(lower_32_bits(addr)); 694 gtt[i] = cpu_to_le32(lower_32_bits(entry));
689} 695}
690 696
691void r100_pci_gart_fini(struct radeon_device *rdev) 697void r100_pci_gart_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 064ad5569cca..08d68f3e13e9 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
73#define R300_PTE_WRITEABLE (1 << 2) 73#define R300_PTE_WRITEABLE (1 << 2)
74#define R300_PTE_READABLE (1 << 3) 74#define R300_PTE_READABLE (1 << 3)
75 75
76void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, 76uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
77 uint64_t addr, uint32_t flags)
78{ 77{
79 void __iomem *ptr = rdev->gart.ptr;
80
81 addr = (lower_32_bits(addr) >> 8) | 78 addr = (lower_32_bits(addr) >> 8) |
82 ((upper_32_bits(addr) & 0xff) << 24); 79 ((upper_32_bits(addr) & 0xff) << 24);
83 if (flags & RADEON_GART_PAGE_READ) 80 if (flags & RADEON_GART_PAGE_READ)
@@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
86 addr |= R300_PTE_WRITEABLE; 83 addr |= R300_PTE_WRITEABLE;
87 if (!(flags & RADEON_GART_PAGE_SNOOP)) 84 if (!(flags & RADEON_GART_PAGE_SNOOP))
88 addr |= R300_PTE_UNSNOOPED; 85 addr |= R300_PTE_UNSNOOPED;
86 return addr;
87}
88
89void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
90 uint64_t entry)
91{
92 void __iomem *ptr = rdev->gart.ptr;
93
89 /* on x86 we want this to be CPU endian, on powerpc 94 /* on x86 we want this to be CPU endian, on powerpc
90 * on powerpc without HW swappers, it'll get swapped on way 95 * on powerpc without HW swappers, it'll get swapped on way
91 * into VRAM - so no need for cpu_to_le32 on VRAM tables */ 96 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
92 writel(addr, ((void __iomem *)ptr) + (i * 4)); 97 writel(entry, ((void __iomem *)ptr) + (i * 4));
93} 98}
94 99
95int rv370_pcie_gart_init(struct radeon_device *rdev) 100int rv370_pcie_gart_init(struct radeon_device *rdev)
@@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
109 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); 114 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
110 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 115 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
111 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; 116 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
117 rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
112 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; 118 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
113 return radeon_gart_table_vram_alloc(rdev); 119 return radeon_gart_table_vram_alloc(rdev);
114} 120}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 54529b837afa..3f2a8d3febca 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev);
242 * Dummy page 242 * Dummy page
243 */ 243 */
244struct radeon_dummy_page { 244struct radeon_dummy_page {
245 uint64_t entry;
245 struct page *page; 246 struct page *page;
246 dma_addr_t addr; 247 dma_addr_t addr;
247}; 248};
@@ -645,7 +646,7 @@ struct radeon_gart {
645 unsigned num_cpu_pages; 646 unsigned num_cpu_pages;
646 unsigned table_size; 647 unsigned table_size;
647 struct page **pages; 648 struct page **pages;
648 dma_addr_t *pages_addr; 649 uint64_t *pages_entry;
649 bool ready; 650 bool ready;
650}; 651};
651 652
@@ -1847,8 +1848,9 @@ struct radeon_asic {
1847 /* gart */ 1848 /* gart */
1848 struct { 1849 struct {
1849 void (*tlb_flush)(struct radeon_device *rdev); 1850 void (*tlb_flush)(struct radeon_device *rdev);
1851 uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
1850 void (*set_page)(struct radeon_device *rdev, unsigned i, 1852 void (*set_page)(struct radeon_device *rdev, unsigned i,
1851 uint64_t addr, uint32_t flags); 1853 uint64_t entry);
1852 } gart; 1854 } gart;
1853 struct { 1855 struct {
1854 int (*init)(struct radeon_device *rdev); 1856 int (*init)(struct radeon_device *rdev);
@@ -2852,7 +2854,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
2852#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 2854#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
2853#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) 2855#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
2854#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) 2856#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
2855#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) 2857#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
2858#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
2856#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) 2859#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
2857#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) 2860#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
2858#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) 2861#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 121aff6a3b41..ed0e10eee2dc 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev)
159 DRM_INFO("Forcing AGP to PCIE mode\n"); 159 DRM_INFO("Forcing AGP to PCIE mode\n");
160 rdev->flags |= RADEON_IS_PCIE; 160 rdev->flags |= RADEON_IS_PCIE;
161 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; 161 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
162 rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
162 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; 163 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
163 } else { 164 } else {
164 DRM_INFO("Forcing AGP to PCI mode\n"); 165 DRM_INFO("Forcing AGP to PCI mode\n");
165 rdev->flags |= RADEON_IS_PCI; 166 rdev->flags |= RADEON_IS_PCI;
166 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; 167 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
168 rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
167 rdev->asic->gart.set_page = &r100_pci_gart_set_page; 169 rdev->asic->gart.set_page = &r100_pci_gart_set_page;
168 } 170 }
169 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 171 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
@@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = {
199 .mc_wait_for_idle = &r100_mc_wait_for_idle, 201 .mc_wait_for_idle = &r100_mc_wait_for_idle,
200 .gart = { 202 .gart = {
201 .tlb_flush = &r100_pci_gart_tlb_flush, 203 .tlb_flush = &r100_pci_gart_tlb_flush,
204 .get_page_entry = &r100_pci_gart_get_page_entry,
202 .set_page = &r100_pci_gart_set_page, 205 .set_page = &r100_pci_gart_set_page,
203 }, 206 },
204 .ring = { 207 .ring = {
@@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = {
265 .mc_wait_for_idle = &r100_mc_wait_for_idle, 268 .mc_wait_for_idle = &r100_mc_wait_for_idle,
266 .gart = { 269 .gart = {
267 .tlb_flush = &r100_pci_gart_tlb_flush, 270 .tlb_flush = &r100_pci_gart_tlb_flush,
271 .get_page_entry = &r100_pci_gart_get_page_entry,
268 .set_page = &r100_pci_gart_set_page, 272 .set_page = &r100_pci_gart_set_page,
269 }, 273 },
270 .ring = { 274 .ring = {
@@ -359,6 +363,7 @@ static struct radeon_asic r300_asic = {
359 .mc_wait_for_idle = &r300_mc_wait_for_idle, 363 .mc_wait_for_idle = &r300_mc_wait_for_idle,
360 .gart = { 364 .gart = {
361 .tlb_flush = &r100_pci_gart_tlb_flush, 365 .tlb_flush = &r100_pci_gart_tlb_flush,
366 .get_page_entry = &r100_pci_gart_get_page_entry,
362 .set_page = &r100_pci_gart_set_page, 367 .set_page = &r100_pci_gart_set_page,
363 }, 368 },
364 .ring = { 369 .ring = {
@@ -425,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = {
425 .mc_wait_for_idle = &r300_mc_wait_for_idle, 430 .mc_wait_for_idle = &r300_mc_wait_for_idle,
426 .gart = { 431 .gart = {
427 .tlb_flush = &rv370_pcie_gart_tlb_flush, 432 .tlb_flush = &rv370_pcie_gart_tlb_flush,
433 .get_page_entry = &rv370_pcie_gart_get_page_entry,
428 .set_page = &rv370_pcie_gart_set_page, 434 .set_page = &rv370_pcie_gart_set_page,
429 }, 435 },
430 .ring = { 436 .ring = {
@@ -491,6 +497,7 @@ static struct radeon_asic r420_asic = {
491 .mc_wait_for_idle = &r300_mc_wait_for_idle, 497 .mc_wait_for_idle = &r300_mc_wait_for_idle,
492 .gart = { 498 .gart = {
493 .tlb_flush = &rv370_pcie_gart_tlb_flush, 499 .tlb_flush = &rv370_pcie_gart_tlb_flush,
500 .get_page_entry = &rv370_pcie_gart_get_page_entry,
494 .set_page = &rv370_pcie_gart_set_page, 501 .set_page = &rv370_pcie_gart_set_page,
495 }, 502 },
496 .ring = { 503 .ring = {
@@ -557,6 +564,7 @@ static struct radeon_asic rs400_asic = {
557 .mc_wait_for_idle = &rs400_mc_wait_for_idle, 564 .mc_wait_for_idle = &rs400_mc_wait_for_idle,
558 .gart = { 565 .gart = {
559 .tlb_flush = &rs400_gart_tlb_flush, 566 .tlb_flush = &rs400_gart_tlb_flush,
567 .get_page_entry = &rs400_gart_get_page_entry,
560 .set_page = &rs400_gart_set_page, 568 .set_page = &rs400_gart_set_page,
561 }, 569 },
562 .ring = { 570 .ring = {
@@ -623,6 +631,7 @@ static struct radeon_asic rs600_asic = {
623 .mc_wait_for_idle = &rs600_mc_wait_for_idle, 631 .mc_wait_for_idle = &rs600_mc_wait_for_idle,
624 .gart = { 632 .gart = {
625 .tlb_flush = &rs600_gart_tlb_flush, 633 .tlb_flush = &rs600_gart_tlb_flush,
634 .get_page_entry = &rs600_gart_get_page_entry,
626 .set_page = &rs600_gart_set_page, 635 .set_page = &rs600_gart_set_page,
627 }, 636 },
628 .ring = { 637 .ring = {
@@ -691,6 +700,7 @@ static struct radeon_asic rs690_asic = {
691 .mc_wait_for_idle = &rs690_mc_wait_for_idle, 700 .mc_wait_for_idle = &rs690_mc_wait_for_idle,
692 .gart = { 701 .gart = {
693 .tlb_flush = &rs400_gart_tlb_flush, 702 .tlb_flush = &rs400_gart_tlb_flush,
703 .get_page_entry = &rs400_gart_get_page_entry,
694 .set_page = &rs400_gart_set_page, 704 .set_page = &rs400_gart_set_page,
695 }, 705 },
696 .ring = { 706 .ring = {
@@ -759,6 +769,7 @@ static struct radeon_asic rv515_asic = {
759 .mc_wait_for_idle = &rv515_mc_wait_for_idle, 769 .mc_wait_for_idle = &rv515_mc_wait_for_idle,
760 .gart = { 770 .gart = {
761 .tlb_flush = &rv370_pcie_gart_tlb_flush, 771 .tlb_flush = &rv370_pcie_gart_tlb_flush,
772 .get_page_entry = &rv370_pcie_gart_get_page_entry,
762 .set_page = &rv370_pcie_gart_set_page, 773 .set_page = &rv370_pcie_gart_set_page,
763 }, 774 },
764 .ring = { 775 .ring = {
@@ -825,6 +836,7 @@ static struct radeon_asic r520_asic = {
825 .mc_wait_for_idle = &r520_mc_wait_for_idle, 836 .mc_wait_for_idle = &r520_mc_wait_for_idle,
826 .gart = { 837 .gart = {
827 .tlb_flush = &rv370_pcie_gart_tlb_flush, 838 .tlb_flush = &rv370_pcie_gart_tlb_flush,
839 .get_page_entry = &rv370_pcie_gart_get_page_entry,
828 .set_page = &rv370_pcie_gart_set_page, 840 .set_page = &rv370_pcie_gart_set_page,
829 }, 841 },
830 .ring = { 842 .ring = {
@@ -919,6 +931,7 @@ static struct radeon_asic r600_asic = {
919 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 931 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
920 .gart = { 932 .gart = {
921 .tlb_flush = &r600_pcie_gart_tlb_flush, 933 .tlb_flush = &r600_pcie_gart_tlb_flush,
934 .get_page_entry = &rs600_gart_get_page_entry,
922 .set_page = &rs600_gart_set_page, 935 .set_page = &rs600_gart_set_page,
923 }, 936 },
924 .ring = { 937 .ring = {
@@ -1004,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = {
1004 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1017 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1005 .gart = { 1018 .gart = {
1006 .tlb_flush = &r600_pcie_gart_tlb_flush, 1019 .tlb_flush = &r600_pcie_gart_tlb_flush,
1020 .get_page_entry = &rs600_gart_get_page_entry,
1007 .set_page = &rs600_gart_set_page, 1021 .set_page = &rs600_gart_set_page,
1008 }, 1022 },
1009 .ring = { 1023 .ring = {
@@ -1095,6 +1109,7 @@ static struct radeon_asic rs780_asic = {
1095 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1109 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1096 .gart = { 1110 .gart = {
1097 .tlb_flush = &r600_pcie_gart_tlb_flush, 1111 .tlb_flush = &r600_pcie_gart_tlb_flush,
1112 .get_page_entry = &rs600_gart_get_page_entry,
1098 .set_page = &rs600_gart_set_page, 1113 .set_page = &rs600_gart_set_page,
1099 }, 1114 },
1100 .ring = { 1115 .ring = {
@@ -1199,6 +1214,7 @@ static struct radeon_asic rv770_asic = {
1199 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1214 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1200 .gart = { 1215 .gart = {
1201 .tlb_flush = &r600_pcie_gart_tlb_flush, 1216 .tlb_flush = &r600_pcie_gart_tlb_flush,
1217 .get_page_entry = &rs600_gart_get_page_entry,
1202 .set_page = &rs600_gart_set_page, 1218 .set_page = &rs600_gart_set_page,
1203 }, 1219 },
1204 .ring = { 1220 .ring = {
@@ -1317,6 +1333,7 @@ static struct radeon_asic evergreen_asic = {
1317 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1333 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1318 .gart = { 1334 .gart = {
1319 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1335 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
1336 .get_page_entry = &rs600_gart_get_page_entry,
1320 .set_page = &rs600_gart_set_page, 1337 .set_page = &rs600_gart_set_page,
1321 }, 1338 },
1322 .ring = { 1339 .ring = {
@@ -1409,6 +1426,7 @@ static struct radeon_asic sumo_asic = {
1409 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1426 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1410 .gart = { 1427 .gart = {
1411 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1428 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
1429 .get_page_entry = &rs600_gart_get_page_entry,
1412 .set_page = &rs600_gart_set_page, 1430 .set_page = &rs600_gart_set_page,
1413 }, 1431 },
1414 .ring = { 1432 .ring = {
@@ -1500,6 +1518,7 @@ static struct radeon_asic btc_asic = {
1500 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1518 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1501 .gart = { 1519 .gart = {
1502 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1520 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
1521 .get_page_entry = &rs600_gart_get_page_entry,
1503 .set_page = &rs600_gart_set_page, 1522 .set_page = &rs600_gart_set_page,
1504 }, 1523 },
1505 .ring = { 1524 .ring = {
@@ -1635,6 +1654,7 @@ static struct radeon_asic cayman_asic = {
1635 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1654 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1636 .gart = { 1655 .gart = {
1637 .tlb_flush = &cayman_pcie_gart_tlb_flush, 1656 .tlb_flush = &cayman_pcie_gart_tlb_flush,
1657 .get_page_entry = &rs600_gart_get_page_entry,
1638 .set_page = &rs600_gart_set_page, 1658 .set_page = &rs600_gart_set_page,
1639 }, 1659 },
1640 .vm = { 1660 .vm = {
@@ -1738,6 +1758,7 @@ static struct radeon_asic trinity_asic = {
1738 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1758 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1739 .gart = { 1759 .gart = {
1740 .tlb_flush = &cayman_pcie_gart_tlb_flush, 1760 .tlb_flush = &cayman_pcie_gart_tlb_flush,
1761 .get_page_entry = &rs600_gart_get_page_entry,
1741 .set_page = &rs600_gart_set_page, 1762 .set_page = &rs600_gart_set_page,
1742 }, 1763 },
1743 .vm = { 1764 .vm = {
@@ -1871,6 +1892,7 @@ static struct radeon_asic si_asic = {
1871 .get_gpu_clock_counter = &si_get_gpu_clock_counter, 1892 .get_gpu_clock_counter = &si_get_gpu_clock_counter,
1872 .gart = { 1893 .gart = {
1873 .tlb_flush = &si_pcie_gart_tlb_flush, 1894 .tlb_flush = &si_pcie_gart_tlb_flush,
1895 .get_page_entry = &rs600_gart_get_page_entry,
1874 .set_page = &rs600_gart_set_page, 1896 .set_page = &rs600_gart_set_page,
1875 }, 1897 },
1876 .vm = { 1898 .vm = {
@@ -2032,6 +2054,7 @@ static struct radeon_asic ci_asic = {
2032 .get_gpu_clock_counter = &cik_get_gpu_clock_counter, 2054 .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
2033 .gart = { 2055 .gart = {
2034 .tlb_flush = &cik_pcie_gart_tlb_flush, 2056 .tlb_flush = &cik_pcie_gart_tlb_flush,
2057 .get_page_entry = &rs600_gart_get_page_entry,
2035 .set_page = &rs600_gart_set_page, 2058 .set_page = &rs600_gart_set_page,
2036 }, 2059 },
2037 .vm = { 2060 .vm = {
@@ -2139,6 +2162,7 @@ static struct radeon_asic kv_asic = {
2139 .get_gpu_clock_counter = &cik_get_gpu_clock_counter, 2162 .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
2140 .gart = { 2163 .gart = {
2141 .tlb_flush = &cik_pcie_gart_tlb_flush, 2164 .tlb_flush = &cik_pcie_gart_tlb_flush,
2165 .get_page_entry = &rs600_gart_get_page_entry,
2142 .set_page = &rs600_gart_set_page, 2166 .set_page = &rs600_gart_set_page,
2143 }, 2167 },
2144 .vm = { 2168 .vm = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 2a45d548d5ec..8d787d115653 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
67int r100_asic_reset(struct radeon_device *rdev); 67int r100_asic_reset(struct radeon_device *rdev);
68u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); 68u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
69void r100_pci_gart_tlb_flush(struct radeon_device *rdev); 69void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
70uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
70void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, 71void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
71 uint64_t addr, uint32_t flags); 72 uint64_t entry);
72void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); 73void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
73int r100_irq_set(struct radeon_device *rdev); 74int r100_irq_set(struct radeon_device *rdev);
74int r100_irq_process(struct radeon_device *rdev); 75int r100_irq_process(struct radeon_device *rdev);
@@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
172 struct radeon_fence *fence); 173 struct radeon_fence *fence);
173extern int r300_cs_parse(struct radeon_cs_parser *p); 174extern int r300_cs_parse(struct radeon_cs_parser *p);
174extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); 175extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
176extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);
175extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, 177extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
176 uint64_t addr, uint32_t flags); 178 uint64_t entry);
177extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); 179extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
178extern int rv370_get_pcie_lanes(struct radeon_device *rdev); 180extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
179extern void r300_set_reg_safe(struct radeon_device *rdev); 181extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev);
208extern int rs400_suspend(struct radeon_device *rdev); 210extern int rs400_suspend(struct radeon_device *rdev);
209extern int rs400_resume(struct radeon_device *rdev); 211extern int rs400_resume(struct radeon_device *rdev);
210void rs400_gart_tlb_flush(struct radeon_device *rdev); 212void rs400_gart_tlb_flush(struct radeon_device *rdev);
213uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);
211void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, 214void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
212 uint64_t addr, uint32_t flags); 215 uint64_t entry);
213uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); 216uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
214void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 217void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
215int rs400_gart_init(struct radeon_device *rdev); 218int rs400_gart_init(struct radeon_device *rdev);
@@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev);
232void rs600_irq_disable(struct radeon_device *rdev); 235void rs600_irq_disable(struct radeon_device *rdev);
233u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); 236u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
234void rs600_gart_tlb_flush(struct radeon_device *rdev); 237void rs600_gart_tlb_flush(struct radeon_device *rdev);
238uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);
235void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, 239void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
236 uint64_t addr, uint32_t flags); 240 uint64_t entry);
237uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); 241uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
238void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 242void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
239void rs600_bandwidth_update(struct radeon_device *rdev); 243void rs600_bandwidth_update(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 9e7f23dd14bd..87d5fb21cb61 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -34,7 +34,8 @@
34 34
35static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, 35static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
36 uint64_t saddr, uint64_t daddr, 36 uint64_t saddr, uint64_t daddr,
37 int flag, int n) 37 int flag, int n,
38 struct reservation_object *resv)
38{ 39{
39 unsigned long start_jiffies; 40 unsigned long start_jiffies;
40 unsigned long end_jiffies; 41 unsigned long end_jiffies;
@@ -47,12 +48,12 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
47 case RADEON_BENCHMARK_COPY_DMA: 48 case RADEON_BENCHMARK_COPY_DMA:
48 fence = radeon_copy_dma(rdev, saddr, daddr, 49 fence = radeon_copy_dma(rdev, saddr, daddr,
49 size / RADEON_GPU_PAGE_SIZE, 50 size / RADEON_GPU_PAGE_SIZE,
50 NULL); 51 resv);
51 break; 52 break;
52 case RADEON_BENCHMARK_COPY_BLIT: 53 case RADEON_BENCHMARK_COPY_BLIT:
53 fence = radeon_copy_blit(rdev, saddr, daddr, 54 fence = radeon_copy_blit(rdev, saddr, daddr,
54 size / RADEON_GPU_PAGE_SIZE, 55 size / RADEON_GPU_PAGE_SIZE,
55 NULL); 56 resv);
56 break; 57 break;
57 default: 58 default:
58 DRM_ERROR("Unknown copy method\n"); 59 DRM_ERROR("Unknown copy method\n");
@@ -120,7 +121,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
120 121
121 if (rdev->asic->copy.dma) { 122 if (rdev->asic->copy.dma) {
122 time = radeon_benchmark_do_move(rdev, size, saddr, daddr, 123 time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
123 RADEON_BENCHMARK_COPY_DMA, n); 124 RADEON_BENCHMARK_COPY_DMA, n,
125 dobj->tbo.resv);
124 if (time < 0) 126 if (time < 0)
125 goto out_cleanup; 127 goto out_cleanup;
126 if (time > 0) 128 if (time > 0)
@@ -130,7 +132,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
130 132
131 if (rdev->asic->copy.blit) { 133 if (rdev->asic->copy.blit) {
132 time = radeon_benchmark_do_move(rdev, size, saddr, daddr, 134 time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
133 RADEON_BENCHMARK_COPY_BLIT, n); 135 RADEON_BENCHMARK_COPY_BLIT, n,
136 dobj->tbo.resv);
134 if (time < 0) 137 if (time < 0)
135 goto out_cleanup; 138 goto out_cleanup;
136 if (time > 0) 139 if (time > 0)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0ec65168f331..bd7519fdd3f4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
774 rdev->dummy_page.page = NULL; 774 rdev->dummy_page.page = NULL;
775 return -ENOMEM; 775 return -ENOMEM;
776 } 776 }
777 rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
778 RADEON_GART_PAGE_DUMMY);
777 return 0; 779 return 0;
778} 780}
779 781
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 102116902a07..913fafa597ad 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -960,6 +960,9 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
960 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && 960 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV &&
961 pll->flags & RADEON_PLL_USE_REF_DIV) 961 pll->flags & RADEON_PLL_USE_REF_DIV)
962 ref_div_max = pll->reference_div; 962 ref_div_max = pll->reference_div;
963 else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP)
964 /* fix for problems on RS880 */
965 ref_div_max = min(pll->max_ref_div, 7u);
963 else 966 else
964 ref_div_max = pll->max_ref_div; 967 ref_div_max = pll->max_ref_div;
965 968
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 84146d5901aa..5450fa95a47e 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
165 radeon_bo_unpin(rdev->gart.robj); 165 radeon_bo_unpin(rdev->gart.robj);
166 radeon_bo_unreserve(rdev->gart.robj); 166 radeon_bo_unreserve(rdev->gart.robj);
167 rdev->gart.table_addr = gpu_addr; 167 rdev->gart.table_addr = gpu_addr;
168
169 if (!r) {
170 int i;
171
172 /* We might have dropped some GART table updates while it wasn't
173 * mapped, restore all entries
174 */
175 for (i = 0; i < rdev->gart.num_gpu_pages; i++)
176 radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
177 mb();
178 radeon_gart_tlb_flush(rdev);
179 }
180
168 return r; 181 return r;
169} 182}
170 183
@@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
228 unsigned t; 241 unsigned t;
229 unsigned p; 242 unsigned p;
230 int i, j; 243 int i, j;
231 u64 page_base;
232 244
233 if (!rdev->gart.ready) { 245 if (!rdev->gart.ready) {
234 WARN(1, "trying to unbind memory from uninitialized GART !\n"); 246 WARN(1, "trying to unbind memory from uninitialized GART !\n");
@@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
239 for (i = 0; i < pages; i++, p++) { 251 for (i = 0; i < pages; i++, p++) {
240 if (rdev->gart.pages[p]) { 252 if (rdev->gart.pages[p]) {
241 rdev->gart.pages[p] = NULL; 253 rdev->gart.pages[p] = NULL;
242 rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
243 page_base = rdev->gart.pages_addr[p];
244 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 254 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
255 rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
245 if (rdev->gart.ptr) { 256 if (rdev->gart.ptr) {
246 radeon_gart_set_page(rdev, t, page_base, 257 radeon_gart_set_page(rdev, t,
247 RADEON_GART_PAGE_DUMMY); 258 rdev->dummy_page.entry);
248 } 259 }
249 page_base += RADEON_GPU_PAGE_SIZE;
250 } 260 }
251 } 261 }
252 } 262 }
@@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
274{ 284{
275 unsigned t; 285 unsigned t;
276 unsigned p; 286 unsigned p;
277 uint64_t page_base; 287 uint64_t page_base, page_entry;
278 int i, j; 288 int i, j;
279 289
280 if (!rdev->gart.ready) { 290 if (!rdev->gart.ready) {
@@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
285 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 295 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
286 296
287 for (i = 0; i < pages; i++, p++) { 297 for (i = 0; i < pages; i++, p++) {
288 rdev->gart.pages_addr[p] = dma_addr[i];
289 rdev->gart.pages[p] = pagelist[i]; 298 rdev->gart.pages[p] = pagelist[i];
290 if (rdev->gart.ptr) { 299 page_base = dma_addr[i];
291 page_base = rdev->gart.pages_addr[p]; 300 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
292 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 301 page_entry = radeon_gart_get_page_entry(page_base, flags);
293 radeon_gart_set_page(rdev, t, page_base, flags); 302 rdev->gart.pages_entry[t] = page_entry;
294 page_base += RADEON_GPU_PAGE_SIZE; 303 if (rdev->gart.ptr) {
304 radeon_gart_set_page(rdev, t, page_entry);
295 } 305 }
306 page_base += RADEON_GPU_PAGE_SIZE;
296 } 307 }
297 } 308 }
298 mb(); 309 mb();
@@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev)
334 radeon_gart_fini(rdev); 345 radeon_gart_fini(rdev);
335 return -ENOMEM; 346 return -ENOMEM;
336 } 347 }
337 rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * 348 rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
338 rdev->gart.num_cpu_pages); 349 rdev->gart.num_gpu_pages);
339 if (rdev->gart.pages_addr == NULL) { 350 if (rdev->gart.pages_entry == NULL) {
340 radeon_gart_fini(rdev); 351 radeon_gart_fini(rdev);
341 return -ENOMEM; 352 return -ENOMEM;
342 } 353 }
343 /* set GART entry to point to the dummy page by default */ 354 /* set GART entry to point to the dummy page by default */
344 for (i = 0; i < rdev->gart.num_cpu_pages; i++) { 355 for (i = 0; i < rdev->gart.num_gpu_pages; i++)
345 rdev->gart.pages_addr[i] = rdev->dummy_page.addr; 356 rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
346 }
347 return 0; 357 return 0;
348} 358}
349 359
@@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev)
356 */ 366 */
357void radeon_gart_fini(struct radeon_device *rdev) 367void radeon_gart_fini(struct radeon_device *rdev)
358{ 368{
359 if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { 369 if (rdev->gart.ready) {
360 /* unbind pages */ 370 /* unbind pages */
361 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); 371 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
362 } 372 }
363 rdev->gart.ready = false; 373 rdev->gart.ready = false;
364 vfree(rdev->gart.pages); 374 vfree(rdev->gart.pages);
365 vfree(rdev->gart.pages_addr); 375 vfree(rdev->gart.pages_entry);
366 rdev->gart.pages = NULL; 376 rdev->gart.pages = NULL;
367 rdev->gart.pages_addr = NULL; 377 rdev->gart.pages_entry = NULL;
368 378
369 radeon_dummy_page_fini(rdev); 379 radeon_dummy_page_fini(rdev);
370} 380}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d0b4f7d1140d..ac3c1310b953 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -146,7 +146,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
146 struct radeon_bo_va *bo_va; 146 struct radeon_bo_va *bo_va;
147 int r; 147 int r;
148 148
149 if (rdev->family < CHIP_CAYMAN) { 149 if ((rdev->family < CHIP_CAYMAN) ||
150 (!rdev->accel_working)) {
150 return 0; 151 return 0;
151 } 152 }
152 153
@@ -176,7 +177,8 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
176 struct radeon_bo_va *bo_va; 177 struct radeon_bo_va *bo_va;
177 int r; 178 int r;
178 179
179 if (rdev->family < CHIP_CAYMAN) { 180 if ((rdev->family < CHIP_CAYMAN) ||
181 (!rdev->accel_working)) {
180 return; 182 return;
181 } 183 }
182 184
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 8bf87f1203cc..bef9a0953284 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -436,7 +436,7 @@ static int kgd_init_memory(struct kgd_dev *kgd)
436static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, 436static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
437 uint32_t hpd_size, uint64_t hpd_gpu_addr) 437 uint32_t hpd_size, uint64_t hpd_gpu_addr)
438{ 438{
439 uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1; 439 uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
440 uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); 440 uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
441 441
442 lock_srbm(kgd, mec, pipe, 0, 0); 442 lock_srbm(kgd, mec, pipe, 0, 0);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 3cf9c1fa6475..686411e4e4f6 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -605,14 +605,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
605 return -ENOMEM; 605 return -ENOMEM;
606 } 606 }
607 607
608 vm = &fpriv->vm;
609 r = radeon_vm_init(rdev, vm);
610 if (r) {
611 kfree(fpriv);
612 return r;
613 }
614
615 if (rdev->accel_working) { 608 if (rdev->accel_working) {
609 vm = &fpriv->vm;
610 r = radeon_vm_init(rdev, vm);
611 if (r) {
612 kfree(fpriv);
613 return r;
614 }
615
616 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 616 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
617 if (r) { 617 if (r) {
618 radeon_vm_fini(rdev, vm); 618 radeon_vm_fini(rdev, vm);
@@ -668,9 +668,9 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
668 radeon_vm_bo_rmv(rdev, vm->ib_bo_va); 668 radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
669 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 669 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
670 } 670 }
671 radeon_vm_fini(rdev, vm);
671 } 672 }
672 673
673 radeon_vm_fini(rdev, vm);
674 kfree(fpriv); 674 kfree(fpriv);
675 file_priv->driver_priv = NULL; 675 file_priv->driver_priv = NULL;
676 } 676 }
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 07b506b41008..791818165c76 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -119,11 +119,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
119 if (ring == R600_RING_TYPE_DMA_INDEX) 119 if (ring == R600_RING_TYPE_DMA_INDEX)
120 fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, 120 fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
121 size / RADEON_GPU_PAGE_SIZE, 121 size / RADEON_GPU_PAGE_SIZE,
122 NULL); 122 vram_obj->tbo.resv);
123 else 123 else
124 fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, 124 fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
125 size / RADEON_GPU_PAGE_SIZE, 125 size / RADEON_GPU_PAGE_SIZE,
126 NULL); 126 vram_obj->tbo.resv);
127 if (IS_ERR(fence)) { 127 if (IS_ERR(fence)) {
128 DRM_ERROR("Failed GTT->VRAM copy %d\n", i); 128 DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
129 r = PTR_ERR(fence); 129 r = PTR_ERR(fence);
@@ -170,11 +170,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
170 if (ring == R600_RING_TYPE_DMA_INDEX) 170 if (ring == R600_RING_TYPE_DMA_INDEX)
171 fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, 171 fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
172 size / RADEON_GPU_PAGE_SIZE, 172 size / RADEON_GPU_PAGE_SIZE,
173 NULL); 173 vram_obj->tbo.resv);
174 else 174 else
175 fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, 175 fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
176 size / RADEON_GPU_PAGE_SIZE, 176 size / RADEON_GPU_PAGE_SIZE,
177 NULL); 177 vram_obj->tbo.resv);
178 if (IS_ERR(fence)) { 178 if (IS_ERR(fence)) {
179 DRM_ERROR("Failed VRAM->GTT copy %d\n", i); 179 DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
180 r = PTR_ERR(fence); 180 r = PTR_ERR(fence);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index cde48c42b30a..2a5a4a9e772d 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
587 uint64_t result; 587 uint64_t result;
588 588
589 /* page table offset */ 589 /* page table offset */
590 result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; 590 result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT];
591 591 result &= ~RADEON_GPU_PAGE_MASK;
592 /* in case cpu page size != gpu page size*/
593 result |= addr & (~PAGE_MASK);
594 592
595 return result; 593 return result;
596} 594}
@@ -745,9 +743,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
745 */ 743 */
746 744
747 /* NI is optimized for 256KB fragments, SI and newer for 64KB */ 745 /* NI is optimized for 256KB fragments, SI and newer for 64KB */
748 uint64_t frag_flags = rdev->family == CHIP_CAYMAN ? 746 uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) ||
747 (rdev->family == CHIP_ARUBA)) ?
749 R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB; 748 R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
750 uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80; 749 uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) ||
750 (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80;
751 751
752 uint64_t frag_start = ALIGN(pe_start, frag_align); 752 uint64_t frag_start = ALIGN(pe_start, frag_align);
753 uint64_t frag_end = pe_end & ~(frag_align - 1); 753 uint64_t frag_end = pe_end & ~(frag_align - 1);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index c5799f16aa4b..34e3235f41d2 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev)
212#define RS400_PTE_WRITEABLE (1 << 2) 212#define RS400_PTE_WRITEABLE (1 << 2)
213#define RS400_PTE_READABLE (1 << 3) 213#define RS400_PTE_READABLE (1 << 3)
214 214
215void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, 215uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)
216 uint64_t addr, uint32_t flags)
217{ 216{
218 uint32_t entry; 217 uint32_t entry;
219 u32 *gtt = rdev->gart.ptr;
220 218
221 entry = (lower_32_bits(addr) & PAGE_MASK) | 219 entry = (lower_32_bits(addr) & PAGE_MASK) |
222 ((upper_32_bits(addr) & 0xff) << 4); 220 ((upper_32_bits(addr) & 0xff) << 4);
@@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
226 entry |= RS400_PTE_WRITEABLE; 224 entry |= RS400_PTE_WRITEABLE;
227 if (!(flags & RADEON_GART_PAGE_SNOOP)) 225 if (!(flags & RADEON_GART_PAGE_SNOOP))
228 entry |= RS400_PTE_UNSNOOPED; 226 entry |= RS400_PTE_UNSNOOPED;
229 entry = cpu_to_le32(entry); 227 return entry;
230 gtt[i] = entry; 228}
229
230void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
231 uint64_t entry)
232{
233 u32 *gtt = rdev->gart.ptr;
234 gtt[i] = cpu_to_le32(lower_32_bits(entry));
231} 235}
232 236
233int rs400_mc_wait_for_idle(struct radeon_device *rdev) 237int rs400_mc_wait_for_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 9acb1c3c005b..74bce91aecc1 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeon_device *rdev)
625 radeon_gart_table_vram_free(rdev); 625 radeon_gart_table_vram_free(rdev);
626} 626}
627 627
628void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, 628uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)
629 uint64_t addr, uint32_t flags)
630{ 629{
631 void __iomem *ptr = (void *)rdev->gart.ptr;
632
633 addr = addr & 0xFFFFFFFFFFFFF000ULL; 630 addr = addr & 0xFFFFFFFFFFFFF000ULL;
634 addr |= R600_PTE_SYSTEM; 631 addr |= R600_PTE_SYSTEM;
635 if (flags & RADEON_GART_PAGE_VALID) 632 if (flags & RADEON_GART_PAGE_VALID)
@@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
640 addr |= R600_PTE_WRITEABLE; 637 addr |= R600_PTE_WRITEABLE;
641 if (flags & RADEON_GART_PAGE_SNOOP) 638 if (flags & RADEON_GART_PAGE_SNOOP)
642 addr |= R600_PTE_SNOOPED; 639 addr |= R600_PTE_SNOOPED;
643 writeq(addr, ptr + (i * 8)); 640 return addr;
641}
642
643void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
644 uint64_t entry)
645{
646 void __iomem *ptr = (void *)rdev->gart.ptr;
647 writeq(entry, ptr + (i * 8));
644} 648}
645 649
646int rs600_irq_set(struct radeon_device *rdev) 650int rs600_irq_set(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index aa7b872b2c43..83207929fc62 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev,
123 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 123 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
124 if (flags & R600_PTE_SYSTEM) { 124 if (flags & R600_PTE_SYSTEM) {
125 value = radeon_vm_map_gart(rdev, addr); 125 value = radeon_vm_map_gart(rdev, addr);
126 value &= 0xFFFFFFFFFFFFF000ULL;
127 } else if (flags & R600_PTE_VALID) { 126 } else if (flags & R600_PTE_VALID) {
128 value = addr; 127 value = addr;
129 } else { 128 } else {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 7b5d22110f25..6c6b655defcf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv,
406 if (unlikely(ret != 0)) 406 if (unlikely(ret != 0))
407 --dev_priv->num_3d_resources; 407 --dev_priv->num_3d_resources;
408 } else if (unhide_svga) { 408 } else if (unhide_svga) {
409 mutex_lock(&dev_priv->hw_mutex);
410 vmw_write(dev_priv, SVGA_REG_ENABLE, 409 vmw_write(dev_priv, SVGA_REG_ENABLE,
411 vmw_read(dev_priv, SVGA_REG_ENABLE) & 410 vmw_read(dev_priv, SVGA_REG_ENABLE) &
412 ~SVGA_REG_ENABLE_HIDE); 411 ~SVGA_REG_ENABLE_HIDE);
413 mutex_unlock(&dev_priv->hw_mutex);
414 } 412 }
415 413
416 mutex_unlock(&dev_priv->release_mutex); 414 mutex_unlock(&dev_priv->release_mutex);
@@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
433 mutex_lock(&dev_priv->release_mutex); 431 mutex_lock(&dev_priv->release_mutex);
434 if (unlikely(--dev_priv->num_3d_resources == 0)) 432 if (unlikely(--dev_priv->num_3d_resources == 0))
435 vmw_release_device(dev_priv); 433 vmw_release_device(dev_priv);
436 else if (hide_svga) { 434 else if (hide_svga)
437 mutex_lock(&dev_priv->hw_mutex);
438 vmw_write(dev_priv, SVGA_REG_ENABLE, 435 vmw_write(dev_priv, SVGA_REG_ENABLE,
439 vmw_read(dev_priv, SVGA_REG_ENABLE) | 436 vmw_read(dev_priv, SVGA_REG_ENABLE) |
440 SVGA_REG_ENABLE_HIDE); 437 SVGA_REG_ENABLE_HIDE);
441 mutex_unlock(&dev_priv->hw_mutex);
442 }
443 438
444 n3d = (int32_t) dev_priv->num_3d_resources; 439 n3d = (int32_t) dev_priv->num_3d_resources;
445 mutex_unlock(&dev_priv->release_mutex); 440 mutex_unlock(&dev_priv->release_mutex);
@@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
600 dev_priv->dev = dev; 595 dev_priv->dev = dev;
601 dev_priv->vmw_chipset = chipset; 596 dev_priv->vmw_chipset = chipset;
602 dev_priv->last_read_seqno = (uint32_t) -100; 597 dev_priv->last_read_seqno = (uint32_t) -100;
603 mutex_init(&dev_priv->hw_mutex);
604 mutex_init(&dev_priv->cmdbuf_mutex); 598 mutex_init(&dev_priv->cmdbuf_mutex);
605 mutex_init(&dev_priv->release_mutex); 599 mutex_init(&dev_priv->release_mutex);
606 mutex_init(&dev_priv->binding_mutex); 600 mutex_init(&dev_priv->binding_mutex);
607 rwlock_init(&dev_priv->resource_lock); 601 rwlock_init(&dev_priv->resource_lock);
608 ttm_lock_init(&dev_priv->reservation_sem); 602 ttm_lock_init(&dev_priv->reservation_sem);
603 spin_lock_init(&dev_priv->hw_lock);
604 spin_lock_init(&dev_priv->waiter_lock);
605 spin_lock_init(&dev_priv->cap_lock);
609 606
610 for (i = vmw_res_context; i < vmw_res_max; ++i) { 607 for (i = vmw_res_context; i < vmw_res_max; ++i) {
611 idr_init(&dev_priv->res_idr[i]); 608 idr_init(&dev_priv->res_idr[i]);
@@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
626 623
627 dev_priv->enable_fb = enable_fbdev; 624 dev_priv->enable_fb = enable_fbdev;
628 625
629 mutex_lock(&dev_priv->hw_mutex);
630
631 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 626 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
632 svga_id = vmw_read(dev_priv, SVGA_REG_ID); 627 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
633 if (svga_id != SVGA_ID_2) { 628 if (svga_id != SVGA_ID_2) {
634 ret = -ENOSYS; 629 ret = -ENOSYS;
635 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); 630 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
636 mutex_unlock(&dev_priv->hw_mutex);
637 goto out_err0; 631 goto out_err0;
638 } 632 }
639 633
@@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
683 dev_priv->prim_bb_mem = dev_priv->vram_size; 677 dev_priv->prim_bb_mem = dev_priv->vram_size;
684 678
685 ret = vmw_dma_masks(dev_priv); 679 ret = vmw_dma_masks(dev_priv);
686 if (unlikely(ret != 0)) { 680 if (unlikely(ret != 0))
687 mutex_unlock(&dev_priv->hw_mutex);
688 goto out_err0; 681 goto out_err0;
689 }
690 682
691 /* 683 /*
692 * Limit back buffer size to VRAM size. Remove this once 684 * Limit back buffer size to VRAM size. Remove this once
@@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
695 if (dev_priv->prim_bb_mem > dev_priv->vram_size) 687 if (dev_priv->prim_bb_mem > dev_priv->vram_size)
696 dev_priv->prim_bb_mem = dev_priv->vram_size; 688 dev_priv->prim_bb_mem = dev_priv->vram_size;
697 689
698 mutex_unlock(&dev_priv->hw_mutex);
699
700 vmw_print_capabilities(dev_priv->capabilities); 690 vmw_print_capabilities(dev_priv->capabilities);
701 691
702 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 692 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
@@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev,
1160 if (unlikely(ret != 0)) 1150 if (unlikely(ret != 0))
1161 return ret; 1151 return ret;
1162 vmw_kms_save_vga(dev_priv); 1152 vmw_kms_save_vga(dev_priv);
1163 mutex_lock(&dev_priv->hw_mutex);
1164 vmw_write(dev_priv, SVGA_REG_TRACES, 0); 1153 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
1165 mutex_unlock(&dev_priv->hw_mutex);
1166 } 1154 }
1167 1155
1168 if (active) { 1156 if (active) {
@@ -1196,9 +1184,7 @@ out_no_active_lock:
1196 if (!dev_priv->enable_fb) { 1184 if (!dev_priv->enable_fb) {
1197 vmw_kms_restore_vga(dev_priv); 1185 vmw_kms_restore_vga(dev_priv);
1198 vmw_3d_resource_dec(dev_priv, true); 1186 vmw_3d_resource_dec(dev_priv, true);
1199 mutex_lock(&dev_priv->hw_mutex);
1200 vmw_write(dev_priv, SVGA_REG_TRACES, 1); 1187 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
1201 mutex_unlock(&dev_priv->hw_mutex);
1202 } 1188 }
1203 return ret; 1189 return ret;
1204} 1190}
@@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev,
1233 DRM_ERROR("Unable to clean VRAM on master drop.\n"); 1219 DRM_ERROR("Unable to clean VRAM on master drop.\n");
1234 vmw_kms_restore_vga(dev_priv); 1220 vmw_kms_restore_vga(dev_priv);
1235 vmw_3d_resource_dec(dev_priv, true); 1221 vmw_3d_resource_dec(dev_priv, true);
1236 mutex_lock(&dev_priv->hw_mutex);
1237 vmw_write(dev_priv, SVGA_REG_TRACES, 1); 1222 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
1238 mutex_unlock(&dev_priv->hw_mutex);
1239 } 1223 }
1240 1224
1241 dev_priv->active_master = &dev_priv->fbdev_master; 1225 dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev)
1367 struct drm_device *dev = pci_get_drvdata(pdev); 1351 struct drm_device *dev = pci_get_drvdata(pdev);
1368 struct vmw_private *dev_priv = vmw_priv(dev); 1352 struct vmw_private *dev_priv = vmw_priv(dev);
1369 1353
1370 mutex_lock(&dev_priv->hw_mutex);
1371 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 1354 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1372 (void) vmw_read(dev_priv, SVGA_REG_ID); 1355 (void) vmw_read(dev_priv, SVGA_REG_ID);
1373 mutex_unlock(&dev_priv->hw_mutex);
1374 1356
1375 /** 1357 /**
1376 * Reclaim 3d reference held by fbdev and potentially 1358 * Reclaim 3d reference held by fbdev and potentially
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 4ee799b43d5d..d26a6daa9719 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -399,7 +399,8 @@ struct vmw_private {
399 uint32_t memory_size; 399 uint32_t memory_size;
400 bool has_gmr; 400 bool has_gmr;
401 bool has_mob; 401 bool has_mob;
402 struct mutex hw_mutex; 402 spinlock_t hw_lock;
403 spinlock_t cap_lock;
403 404
404 /* 405 /*
405 * VGA registers. 406 * VGA registers.
@@ -449,8 +450,9 @@ struct vmw_private {
449 atomic_t marker_seq; 450 atomic_t marker_seq;
450 wait_queue_head_t fence_queue; 451 wait_queue_head_t fence_queue;
451 wait_queue_head_t fifo_queue; 452 wait_queue_head_t fifo_queue;
452 int fence_queue_waiters; /* Protected by hw_mutex */ 453 spinlock_t waiter_lock;
453 int goal_queue_waiters; /* Protected by hw_mutex */ 454 int fence_queue_waiters; /* Protected by waiter_lock */
455 int goal_queue_waiters; /* Protected by waiter_lock */
454 atomic_t fifo_queue_waiters; 456 atomic_t fifo_queue_waiters;
455 uint32_t last_read_seqno; 457 uint32_t last_read_seqno;
456 spinlock_t irq_lock; 458 spinlock_t irq_lock;
@@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
553 return (struct vmw_master *) master->driver_priv; 555 return (struct vmw_master *) master->driver_priv;
554} 556}
555 557
558/*
559 * The locking here is fine-grained, so that it is performed once
560 * for every read- and write operation. This is of course costly, but we
561 * don't perform much register access in the timing critical paths anyway.
562 * Instead we have the extra benefit of being sure that we don't forget
563 * the hw lock around register accesses.
564 */
556static inline void vmw_write(struct vmw_private *dev_priv, 565static inline void vmw_write(struct vmw_private *dev_priv,
557 unsigned int offset, uint32_t value) 566 unsigned int offset, uint32_t value)
558{ 567{
568 unsigned long irq_flags;
569
570 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
559 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 571 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
560 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); 572 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
573 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
561} 574}
562 575
563static inline uint32_t vmw_read(struct vmw_private *dev_priv, 576static inline uint32_t vmw_read(struct vmw_private *dev_priv,
564 unsigned int offset) 577 unsigned int offset)
565{ 578{
566 uint32_t val; 579 unsigned long irq_flags;
580 u32 val;
567 581
582 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
568 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 583 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
569 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); 584 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
585 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
586
570 return val; 587 return val;
571} 588}
572 589
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index b7594cb758af..945f1e0dad92 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -35,7 +35,7 @@ struct vmw_fence_manager {
35 struct vmw_private *dev_priv; 35 struct vmw_private *dev_priv;
36 spinlock_t lock; 36 spinlock_t lock;
37 struct list_head fence_list; 37 struct list_head fence_list;
38 struct work_struct work, ping_work; 38 struct work_struct work;
39 u32 user_fence_size; 39 u32 user_fence_size;
40 u32 fence_size; 40 u32 fence_size;
41 u32 event_fence_action_size; 41 u32 event_fence_action_size;
@@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f)
134 return "svga"; 134 return "svga";
135} 135}
136 136
137static void vmw_fence_ping_func(struct work_struct *work)
138{
139 struct vmw_fence_manager *fman =
140 container_of(work, struct vmw_fence_manager, ping_work);
141
142 vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC);
143}
144
145static bool vmw_fence_enable_signaling(struct fence *f) 137static bool vmw_fence_enable_signaling(struct fence *f)
146{ 138{
147 struct vmw_fence_obj *fence = 139 struct vmw_fence_obj *fence =
@@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
155 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) 147 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
156 return false; 148 return false;
157 149
158 if (mutex_trylock(&dev_priv->hw_mutex)) { 150 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
159 vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC);
160 mutex_unlock(&dev_priv->hw_mutex);
161 } else
162 schedule_work(&fman->ping_work);
163 151
164 return true; 152 return true;
165} 153}
@@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
305 INIT_LIST_HEAD(&fman->fence_list); 293 INIT_LIST_HEAD(&fman->fence_list);
306 INIT_LIST_HEAD(&fman->cleanup_list); 294 INIT_LIST_HEAD(&fman->cleanup_list);
307 INIT_WORK(&fman->work, &vmw_fence_work_func); 295 INIT_WORK(&fman->work, &vmw_fence_work_func);
308 INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);
309 fman->fifo_down = true; 296 fman->fifo_down = true;
310 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); 297 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
311 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); 298 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
@@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
323 bool lists_empty; 310 bool lists_empty;
324 311
325 (void) cancel_work_sync(&fman->work); 312 (void) cancel_work_sync(&fman->work);
326 (void) cancel_work_sync(&fman->ping_work);
327 313
328 spin_lock_irqsave(&fman->lock, irq_flags); 314 spin_lock_irqsave(&fman->lock, irq_flags);
329 lists_empty = list_empty(&fman->fence_list) && 315 lists_empty = list_empty(&fman->fence_list) &&
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 09e10aefcd8e..39f2b03888e7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
44 if (!dev_priv->has_mob) 44 if (!dev_priv->has_mob)
45 return false; 45 return false;
46 46
47 mutex_lock(&dev_priv->hw_mutex); 47 spin_lock(&dev_priv->cap_lock);
48 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); 48 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
49 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 49 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
50 mutex_unlock(&dev_priv->hw_mutex); 50 spin_unlock(&dev_priv->cap_lock);
51 51
52 return (result != 0); 52 return (result != 0);
53 } 53 }
@@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
120 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); 120 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
121 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); 121 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
122 122
123 mutex_lock(&dev_priv->hw_mutex);
124 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); 123 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
125 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); 124 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
126 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); 125 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
@@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
143 mb(); 142 mb();
144 143
145 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); 144 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
146 mutex_unlock(&dev_priv->hw_mutex);
147 145
148 max = ioread32(fifo_mem + SVGA_FIFO_MAX); 146 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
149 min = ioread32(fifo_mem + SVGA_FIFO_MIN); 147 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
@@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
160 return vmw_fifo_send_fence(dev_priv, &dummy); 158 return vmw_fifo_send_fence(dev_priv, &dummy);
161} 159}
162 160
163void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason) 161void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
164{ 162{
165 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 163 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
164 static DEFINE_SPINLOCK(ping_lock);
165 unsigned long irq_flags;
166 166
167 /*
168 * The ping_lock is needed because we don't have an atomic
169 * test-and-set of the SVGA_FIFO_BUSY register.
170 */
171 spin_lock_irqsave(&ping_lock, irq_flags);
167 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { 172 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
168 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); 173 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
169 vmw_write(dev_priv, SVGA_REG_SYNC, reason); 174 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
170 } 175 }
171} 176 spin_unlock_irqrestore(&ping_lock, irq_flags);
172
173void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
174{
175 mutex_lock(&dev_priv->hw_mutex);
176
177 vmw_fifo_ping_host_locked(dev_priv, reason);
178
179 mutex_unlock(&dev_priv->hw_mutex);
180} 177}
181 178
182void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 179void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
183{ 180{
184 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 181 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
185 182
186 mutex_lock(&dev_priv->hw_mutex);
187
188 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); 183 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
189 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) 184 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
190 ; 185 ;
@@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
198 vmw_write(dev_priv, SVGA_REG_TRACES, 193 vmw_write(dev_priv, SVGA_REG_TRACES,
199 dev_priv->traces_state); 194 dev_priv->traces_state);
200 195
201 mutex_unlock(&dev_priv->hw_mutex);
202 vmw_marker_queue_takedown(&fifo->marker_queue); 196 vmw_marker_queue_takedown(&fifo->marker_queue);
203 197
204 if (likely(fifo->static_buffer != NULL)) { 198 if (likely(fifo->static_buffer != NULL)) {
@@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
271 return vmw_fifo_wait_noirq(dev_priv, bytes, 265 return vmw_fifo_wait_noirq(dev_priv, bytes,
272 interruptible, timeout); 266 interruptible, timeout);
273 267
274 mutex_lock(&dev_priv->hw_mutex); 268 spin_lock(&dev_priv->waiter_lock);
275 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { 269 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
276 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); 270 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
277 outl(SVGA_IRQFLAG_FIFO_PROGRESS, 271 outl(SVGA_IRQFLAG_FIFO_PROGRESS,
@@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
280 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 274 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
281 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 275 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
282 } 276 }
283 mutex_unlock(&dev_priv->hw_mutex); 277 spin_unlock(&dev_priv->waiter_lock);
284 278
285 if (interruptible) 279 if (interruptible)
286 ret = wait_event_interruptible_timeout 280 ret = wait_event_interruptible_timeout
@@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
296 else if (likely(ret > 0)) 290 else if (likely(ret > 0))
297 ret = 0; 291 ret = 0;
298 292
299 mutex_lock(&dev_priv->hw_mutex); 293 spin_lock(&dev_priv->waiter_lock);
300 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { 294 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
301 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); 295 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
302 dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; 296 dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
303 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 297 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
304 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 298 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
305 } 299 }
306 mutex_unlock(&dev_priv->hw_mutex); 300 spin_unlock(&dev_priv->waiter_lock);
307 301
308 return ret; 302 return ret;
309} 303}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 37881ecf5d7a..69c8ce23123c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
135 (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); 135 (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
136 compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; 136 compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
137 137
138 mutex_lock(&dev_priv->hw_mutex); 138 spin_lock(&dev_priv->cap_lock);
139 for (i = 0; i < max_size; ++i) { 139 for (i = 0; i < max_size; ++i) {
140 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); 140 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
141 compat_cap->pairs[i][0] = i; 141 compat_cap->pairs[i][0] = i;
142 compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 142 compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
143 } 143 }
144 mutex_unlock(&dev_priv->hw_mutex); 144 spin_unlock(&dev_priv->cap_lock);
145 145
146 return 0; 146 return 0;
147} 147}
@@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
191 if (num > SVGA3D_DEVCAP_MAX) 191 if (num > SVGA3D_DEVCAP_MAX)
192 num = SVGA3D_DEVCAP_MAX; 192 num = SVGA3D_DEVCAP_MAX;
193 193
194 mutex_lock(&dev_priv->hw_mutex); 194 spin_lock(&dev_priv->cap_lock);
195 for (i = 0; i < num; ++i) { 195 for (i = 0; i < num; ++i) {
196 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); 196 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
197 *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 197 *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
198 } 198 }
199 mutex_unlock(&dev_priv->hw_mutex); 199 spin_unlock(&dev_priv->cap_lock);
200 } else if (gb_objects) { 200 } else if (gb_objects) {
201 ret = vmw_fill_compat_cap(dev_priv, bounce, size); 201 ret = vmw_fill_compat_cap(dev_priv, bounce, size);
202 if (unlikely(ret != 0)) 202 if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 0c423766c441..9fe9827ee499 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
62 62
63static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) 63static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
64{ 64{
65 uint32_t busy;
66 65
67 mutex_lock(&dev_priv->hw_mutex); 66 return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
68 busy = vmw_read(dev_priv, SVGA_REG_BUSY);
69 mutex_unlock(&dev_priv->hw_mutex);
70
71 return (busy == 0);
72} 67}
73 68
74void vmw_update_seqno(struct vmw_private *dev_priv, 69void vmw_update_seqno(struct vmw_private *dev_priv,
@@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
184 179
185void vmw_seqno_waiter_add(struct vmw_private *dev_priv) 180void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
186{ 181{
187 mutex_lock(&dev_priv->hw_mutex); 182 spin_lock(&dev_priv->waiter_lock);
188 if (dev_priv->fence_queue_waiters++ == 0) { 183 if (dev_priv->fence_queue_waiters++ == 0) {
189 unsigned long irq_flags; 184 unsigned long irq_flags;
190 185
@@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
195 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 190 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
196 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 191 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
197 } 192 }
198 mutex_unlock(&dev_priv->hw_mutex); 193 spin_unlock(&dev_priv->waiter_lock);
199} 194}
200 195
201void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) 196void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
202{ 197{
203 mutex_lock(&dev_priv->hw_mutex); 198 spin_lock(&dev_priv->waiter_lock);
204 if (--dev_priv->fence_queue_waiters == 0) { 199 if (--dev_priv->fence_queue_waiters == 0) {
205 unsigned long irq_flags; 200 unsigned long irq_flags;
206 201
@@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
209 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 204 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
210 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 205 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
211 } 206 }
212 mutex_unlock(&dev_priv->hw_mutex); 207 spin_unlock(&dev_priv->waiter_lock);
213} 208}
214 209
215 210
216void vmw_goal_waiter_add(struct vmw_private *dev_priv) 211void vmw_goal_waiter_add(struct vmw_private *dev_priv)
217{ 212{
218 mutex_lock(&dev_priv->hw_mutex); 213 spin_lock(&dev_priv->waiter_lock);
219 if (dev_priv->goal_queue_waiters++ == 0) { 214 if (dev_priv->goal_queue_waiters++ == 0) {
220 unsigned long irq_flags; 215 unsigned long irq_flags;
221 216
@@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv)
226 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 221 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
227 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 222 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
228 } 223 }
229 mutex_unlock(&dev_priv->hw_mutex); 224 spin_unlock(&dev_priv->waiter_lock);
230} 225}
231 226
232void vmw_goal_waiter_remove(struct vmw_private *dev_priv) 227void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
233{ 228{
234 mutex_lock(&dev_priv->hw_mutex); 229 spin_lock(&dev_priv->waiter_lock);
235 if (--dev_priv->goal_queue_waiters == 0) { 230 if (--dev_priv->goal_queue_waiters == 0) {
236 unsigned long irq_flags; 231 unsigned long irq_flags;
237 232
@@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
240 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 235 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
241 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 236 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
242 } 237 }
243 mutex_unlock(&dev_priv->hw_mutex); 238 spin_unlock(&dev_priv->waiter_lock);
244} 239}
245 240
246int vmw_wait_seqno(struct vmw_private *dev_priv, 241int vmw_wait_seqno(struct vmw_private *dev_priv,
@@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev)
315 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) 310 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
316 return; 311 return;
317 312
318 mutex_lock(&dev_priv->hw_mutex);
319 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); 313 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
320 mutex_unlock(&dev_priv->hw_mutex);
321 314
322 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 315 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
323 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 316 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 3725b521d931..8725b79e7847 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)
1828 struct vmw_private *dev_priv = vmw_priv(dev); 1828 struct vmw_private *dev_priv = vmw_priv(dev);
1829 struct vmw_display_unit *du = vmw_connector_to_du(connector); 1829 struct vmw_display_unit *du = vmw_connector_to_du(connector);
1830 1830
1831 mutex_lock(&dev_priv->hw_mutex);
1832 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); 1831 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
1833 mutex_unlock(&dev_priv->hw_mutex);
1834 1832
1835 return ((vmw_connector_to_du(connector)->unit < num_displays && 1833 return ((vmw_connector_to_du(connector)->unit < num_displays &&
1836 du->pref_active) ? 1834 du->pref_active) ?
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 31e8308ba899..ab838d9e28b6 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -881,6 +881,7 @@ config I2C_XLR
881config I2C_RCAR 881config I2C_RCAR
882 tristate "Renesas R-Car I2C Controller" 882 tristate "Renesas R-Car I2C Controller"
883 depends on ARCH_SHMOBILE || COMPILE_TEST 883 depends on ARCH_SHMOBILE || COMPILE_TEST
884 select I2C_SLAVE
884 help 885 help
885 If you say yes to this option, support will be included for the 886 If you say yes to this option, support will be included for the
886 R-Car I2C controller. 887 R-Car I2C controller.
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index bff20a589621..958c8db4ec30 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -785,14 +785,16 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
785 int ret; 785 int ret;
786 786
787 pm_runtime_get_sync(&adap->dev); 787 pm_runtime_get_sync(&adap->dev);
788 clk_prepare_enable(i2c->clk); 788 ret = clk_enable(i2c->clk);
789 if (ret)
790 return ret;
789 791
790 for (retry = 0; retry < adap->retries; retry++) { 792 for (retry = 0; retry < adap->retries; retry++) {
791 793
792 ret = s3c24xx_i2c_doxfer(i2c, msgs, num); 794 ret = s3c24xx_i2c_doxfer(i2c, msgs, num);
793 795
794 if (ret != -EAGAIN) { 796 if (ret != -EAGAIN) {
795 clk_disable_unprepare(i2c->clk); 797 clk_disable(i2c->clk);
796 pm_runtime_put(&adap->dev); 798 pm_runtime_put(&adap->dev);
797 return ret; 799 return ret;
798 } 800 }
@@ -802,7 +804,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
802 udelay(100); 804 udelay(100);
803 } 805 }
804 806
805 clk_disable_unprepare(i2c->clk); 807 clk_disable(i2c->clk);
806 pm_runtime_put(&adap->dev); 808 pm_runtime_put(&adap->dev);
807 return -EREMOTEIO; 809 return -EREMOTEIO;
808} 810}
@@ -1197,7 +1199,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1197 1199
1198 clk_prepare_enable(i2c->clk); 1200 clk_prepare_enable(i2c->clk);
1199 ret = s3c24xx_i2c_init(i2c); 1201 ret = s3c24xx_i2c_init(i2c);
1200 clk_disable_unprepare(i2c->clk); 1202 clk_disable(i2c->clk);
1201 if (ret != 0) { 1203 if (ret != 0) {
1202 dev_err(&pdev->dev, "I2C controller init failed\n"); 1204 dev_err(&pdev->dev, "I2C controller init failed\n");
1203 return ret; 1205 return ret;
@@ -1210,6 +1212,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1210 i2c->irq = ret = platform_get_irq(pdev, 0); 1212 i2c->irq = ret = platform_get_irq(pdev, 0);
1211 if (ret <= 0) { 1213 if (ret <= 0) {
1212 dev_err(&pdev->dev, "cannot find IRQ\n"); 1214 dev_err(&pdev->dev, "cannot find IRQ\n");
1215 clk_unprepare(i2c->clk);
1213 return ret; 1216 return ret;
1214 } 1217 }
1215 1218
@@ -1218,6 +1221,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1218 1221
1219 if (ret != 0) { 1222 if (ret != 0) {
1220 dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq); 1223 dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
1224 clk_unprepare(i2c->clk);
1221 return ret; 1225 return ret;
1222 } 1226 }
1223 } 1227 }
@@ -1225,6 +1229,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1225 ret = s3c24xx_i2c_register_cpufreq(i2c); 1229 ret = s3c24xx_i2c_register_cpufreq(i2c);
1226 if (ret < 0) { 1230 if (ret < 0) {
1227 dev_err(&pdev->dev, "failed to register cpufreq notifier\n"); 1231 dev_err(&pdev->dev, "failed to register cpufreq notifier\n");
1232 clk_unprepare(i2c->clk);
1228 return ret; 1233 return ret;
1229 } 1234 }
1230 1235
@@ -1241,6 +1246,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1241 if (ret < 0) { 1246 if (ret < 0) {
1242 dev_err(&pdev->dev, "failed to add bus to i2c core\n"); 1247 dev_err(&pdev->dev, "failed to add bus to i2c core\n");
1243 s3c24xx_i2c_deregister_cpufreq(i2c); 1248 s3c24xx_i2c_deregister_cpufreq(i2c);
1249 clk_unprepare(i2c->clk);
1244 return ret; 1250 return ret;
1245 } 1251 }
1246 1252
@@ -1262,6 +1268,8 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
1262{ 1268{
1263 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); 1269 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
1264 1270
1271 clk_unprepare(i2c->clk);
1272
1265 pm_runtime_disable(&i2c->adap.dev); 1273 pm_runtime_disable(&i2c->adap.dev);
1266 pm_runtime_disable(&pdev->dev); 1274 pm_runtime_disable(&pdev->dev);
1267 1275
@@ -1293,13 +1301,16 @@ static int s3c24xx_i2c_resume_noirq(struct device *dev)
1293{ 1301{
1294 struct platform_device *pdev = to_platform_device(dev); 1302 struct platform_device *pdev = to_platform_device(dev);
1295 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); 1303 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
1304 int ret;
1296 1305
1297 if (!IS_ERR(i2c->sysreg)) 1306 if (!IS_ERR(i2c->sysreg))
1298 regmap_write(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, i2c->sys_i2c_cfg); 1307 regmap_write(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, i2c->sys_i2c_cfg);
1299 1308
1300 clk_prepare_enable(i2c->clk); 1309 ret = clk_enable(i2c->clk);
1310 if (ret)
1311 return ret;
1301 s3c24xx_i2c_init(i2c); 1312 s3c24xx_i2c_init(i2c);
1302 clk_disable_unprepare(i2c->clk); 1313 clk_disable(i2c->clk);
1303 i2c->suspended = 0; 1314 i2c->suspended = 0;
1304 1315
1305 return 0; 1316 return 0;
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 440d5dbc8b5f..007818b3e174 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -139,6 +139,7 @@ struct sh_mobile_i2c_data {
139 int pos; 139 int pos;
140 int sr; 140 int sr;
141 bool send_stop; 141 bool send_stop;
142 bool stop_after_dma;
142 143
143 struct resource *res; 144 struct resource *res;
144 struct dma_chan *dma_tx; 145 struct dma_chan *dma_tx;
@@ -407,7 +408,7 @@ static int sh_mobile_i2c_isr_tx(struct sh_mobile_i2c_data *pd)
407 408
408 if (pd->pos == pd->msg->len) { 409 if (pd->pos == pd->msg->len) {
409 /* Send stop if we haven't yet (DMA case) */ 410 /* Send stop if we haven't yet (DMA case) */
410 if (pd->send_stop && (iic_rd(pd, ICCR) & ICCR_BBSY)) 411 if (pd->send_stop && pd->stop_after_dma)
411 i2c_op(pd, OP_TX_STOP, 0); 412 i2c_op(pd, OP_TX_STOP, 0);
412 return 1; 413 return 1;
413 } 414 }
@@ -449,6 +450,13 @@ static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd)
449 real_pos = pd->pos - 2; 450 real_pos = pd->pos - 2;
450 451
451 if (pd->pos == pd->msg->len) { 452 if (pd->pos == pd->msg->len) {
453 if (pd->stop_after_dma) {
454 /* Simulate PIO end condition after DMA transfer */
455 i2c_op(pd, OP_RX_STOP, 0);
456 pd->pos++;
457 break;
458 }
459
452 if (real_pos < 0) { 460 if (real_pos < 0) {
453 i2c_op(pd, OP_RX_STOP, 0); 461 i2c_op(pd, OP_RX_STOP, 0);
454 break; 462 break;
@@ -536,6 +544,7 @@ static void sh_mobile_i2c_dma_callback(void *data)
536 544
537 sh_mobile_i2c_dma_unmap(pd); 545 sh_mobile_i2c_dma_unmap(pd);
538 pd->pos = pd->msg->len; 546 pd->pos = pd->msg->len;
547 pd->stop_after_dma = true;
539 548
540 iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); 549 iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
541} 550}
@@ -726,6 +735,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
726 bool do_start = pd->send_stop || !i; 735 bool do_start = pd->send_stop || !i;
727 msg = &msgs[i]; 736 msg = &msgs[i];
728 pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; 737 pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP;
738 pd->stop_after_dma = false;
729 739
730 err = start_ch(pd, msg, do_start); 740 err = start_ch(pd, msg, do_start);
731 if (err) 741 if (err)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 39d25a8cb1ad..e9eae57a2b50 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -2972,6 +2972,7 @@ trace:
2972} 2972}
2973EXPORT_SYMBOL(i2c_smbus_xfer); 2973EXPORT_SYMBOL(i2c_smbus_xfer);
2974 2974
2975#if IS_ENABLED(CONFIG_I2C_SLAVE)
2975int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) 2976int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
2976{ 2977{
2977 int ret; 2978 int ret;
@@ -3019,6 +3020,7 @@ int i2c_slave_unregister(struct i2c_client *client)
3019 return ret; 3020 return ret;
3020} 3021}
3021EXPORT_SYMBOL_GPL(i2c_slave_unregister); 3022EXPORT_SYMBOL_GPL(i2c_slave_unregister);
3023#endif
3022 3024
3023MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); 3025MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
3024MODULE_DESCRIPTION("I2C-Bus main module"); 3026MODULE_DESCRIPTION("I2C-Bus main module");
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 6631400b5f02..cf9b09db092f 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -74,7 +74,7 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
74 struct eeprom_data *eeprom; 74 struct eeprom_data *eeprom;
75 unsigned long flags; 75 unsigned long flags;
76 76
77 if (off + count >= attr->size) 77 if (off + count > attr->size)
78 return -EFBIG; 78 return -EFBIG;
79 79
80 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); 80 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
@@ -92,7 +92,7 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
92 struct eeprom_data *eeprom; 92 struct eeprom_data *eeprom;
93 unsigned long flags; 93 unsigned long flags;
94 94
95 if (off + count >= attr->size) 95 if (off + count > attr->size)
96 return -EFBIG; 96 return -EFBIG;
97 97
98 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); 98 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index e6c23b9eab33..5db1a8cc388d 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -123,7 +123,6 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
123 struct ib_udata *uhw) = { 123 struct ib_udata *uhw) = {
124 [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow, 124 [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
125 [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow, 125 [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow,
126 [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device
127}; 126};
128 127
129static void ib_uverbs_add_one(struct ib_device *device); 128static void ib_uverbs_add_one(struct ib_device *device);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 8ba80a6d3a46..d7562beb5423 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -98,15 +98,9 @@ enum {
98 98
99 IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */ 99 IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */
100 IPOIB_MCAST_FLAG_SENDONLY = 1, 100 IPOIB_MCAST_FLAG_SENDONLY = 1,
101 /* 101 IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
102 * For IPOIB_MCAST_FLAG_BUSY
103 * When set, in flight join and mcast->mc is unreliable
104 * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or
105 * haven't started yet
106 * When clear and mcast->mc is valid pointer, join was successful
107 */
108 IPOIB_MCAST_FLAG_BUSY = 2,
109 IPOIB_MCAST_FLAG_ATTACHED = 3, 102 IPOIB_MCAST_FLAG_ATTACHED = 3,
103 IPOIB_MCAST_JOIN_STARTED = 4,
110 104
111 MAX_SEND_CQE = 16, 105 MAX_SEND_CQE = 16,
112 IPOIB_CM_COPYBREAK = 256, 106 IPOIB_CM_COPYBREAK = 256,
@@ -323,7 +317,6 @@ struct ipoib_dev_priv {
323 struct list_head multicast_list; 317 struct list_head multicast_list;
324 struct rb_root multicast_tree; 318 struct rb_root multicast_tree;
325 319
326 struct workqueue_struct *wq;
327 struct delayed_work mcast_task; 320 struct delayed_work mcast_task;
328 struct work_struct carrier_on_task; 321 struct work_struct carrier_on_task;
329 struct work_struct flush_light; 322 struct work_struct flush_light;
@@ -484,10 +477,10 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work);
484void ipoib_pkey_event(struct work_struct *work); 477void ipoib_pkey_event(struct work_struct *work);
485void ipoib_ib_dev_cleanup(struct net_device *dev); 478void ipoib_ib_dev_cleanup(struct net_device *dev);
486 479
487int ipoib_ib_dev_open(struct net_device *dev); 480int ipoib_ib_dev_open(struct net_device *dev, int flush);
488int ipoib_ib_dev_up(struct net_device *dev); 481int ipoib_ib_dev_up(struct net_device *dev);
489int ipoib_ib_dev_down(struct net_device *dev); 482int ipoib_ib_dev_down(struct net_device *dev, int flush);
490int ipoib_ib_dev_stop(struct net_device *dev); 483int ipoib_ib_dev_stop(struct net_device *dev, int flush);
491void ipoib_pkey_dev_check_presence(struct net_device *dev); 484void ipoib_pkey_dev_check_presence(struct net_device *dev);
492 485
493int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); 486int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
@@ -499,7 +492,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
499 492
500void ipoib_mcast_restart_task(struct work_struct *work); 493void ipoib_mcast_restart_task(struct work_struct *work);
501int ipoib_mcast_start_thread(struct net_device *dev); 494int ipoib_mcast_start_thread(struct net_device *dev);
502int ipoib_mcast_stop_thread(struct net_device *dev); 495int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
503 496
504void ipoib_mcast_dev_down(struct net_device *dev); 497void ipoib_mcast_dev_down(struct net_device *dev);
505void ipoib_mcast_dev_flush(struct net_device *dev); 498void ipoib_mcast_dev_flush(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 56959adb6c7d..933efcea0d03 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
474 } 474 }
475 475
476 spin_lock_irq(&priv->lock); 476 spin_lock_irq(&priv->lock);
477 queue_delayed_work(priv->wq, 477 queue_delayed_work(ipoib_workqueue,
478 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 478 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
479 /* Add this entry to passive ids list head, but do not re-add it 479 /* Add this entry to passive ids list head, but do not re-add it
480 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ 480 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
@@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
576 spin_lock_irqsave(&priv->lock, flags); 576 spin_lock_irqsave(&priv->lock, flags);
577 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); 577 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
578 ipoib_cm_start_rx_drain(priv); 578 ipoib_cm_start_rx_drain(priv);
579 queue_work(priv->wq, &priv->cm.rx_reap_task); 579 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
580 spin_unlock_irqrestore(&priv->lock, flags); 580 spin_unlock_irqrestore(&priv->lock, flags);
581 } else 581 } else
582 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", 582 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
@@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
603 spin_lock_irqsave(&priv->lock, flags); 603 spin_lock_irqsave(&priv->lock, flags);
604 list_move(&p->list, &priv->cm.rx_reap_list); 604 list_move(&p->list, &priv->cm.rx_reap_list);
605 spin_unlock_irqrestore(&priv->lock, flags); 605 spin_unlock_irqrestore(&priv->lock, flags);
606 queue_work(priv->wq, &priv->cm.rx_reap_task); 606 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
607 } 607 }
608 return; 608 return;
609 } 609 }
@@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
827 827
828 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 828 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
829 list_move(&tx->list, &priv->cm.reap_list); 829 list_move(&tx->list, &priv->cm.reap_list);
830 queue_work(priv->wq, &priv->cm.reap_task); 830 queue_work(ipoib_workqueue, &priv->cm.reap_task);
831 } 831 }
832 832
833 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); 833 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
@@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1255 1255
1256 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 1256 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1257 list_move(&tx->list, &priv->cm.reap_list); 1257 list_move(&tx->list, &priv->cm.reap_list);
1258 queue_work(priv->wq, &priv->cm.reap_task); 1258 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1259 } 1259 }
1260 1260
1261 spin_unlock_irqrestore(&priv->lock, flags); 1261 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
1284 tx->dev = dev; 1284 tx->dev = dev;
1285 list_add(&tx->list, &priv->cm.start_list); 1285 list_add(&tx->list, &priv->cm.start_list);
1286 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); 1286 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1287 queue_work(priv->wq, &priv->cm.start_task); 1287 queue_work(ipoib_workqueue, &priv->cm.start_task);
1288 return tx; 1288 return tx;
1289} 1289}
1290 1290
@@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1295 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 1295 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1296 spin_lock_irqsave(&priv->lock, flags); 1296 spin_lock_irqsave(&priv->lock, flags);
1297 list_move(&tx->list, &priv->cm.reap_list); 1297 list_move(&tx->list, &priv->cm.reap_list);
1298 queue_work(priv->wq, &priv->cm.reap_task); 1298 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1299 ipoib_dbg(priv, "Reap connection for gid %pI6\n", 1299 ipoib_dbg(priv, "Reap connection for gid %pI6\n",
1300 tx->neigh->daddr + 4); 1300 tx->neigh->daddr + 4);
1301 tx->neigh = NULL; 1301 tx->neigh = NULL;
@@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
1417 1417
1418 skb_queue_tail(&priv->cm.skb_queue, skb); 1418 skb_queue_tail(&priv->cm.skb_queue, skb);
1419 if (e) 1419 if (e)
1420 queue_work(priv->wq, &priv->cm.skb_task); 1420 queue_work(ipoib_workqueue, &priv->cm.skb_task);
1421} 1421}
1422 1422
1423static void ipoib_cm_rx_reap(struct work_struct *work) 1423static void ipoib_cm_rx_reap(struct work_struct *work)
@@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
1450 } 1450 }
1451 1451
1452 if (!list_empty(&priv->cm.passive_ids)) 1452 if (!list_empty(&priv->cm.passive_ids))
1453 queue_delayed_work(priv->wq, 1453 queue_delayed_work(ipoib_workqueue,
1454 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 1454 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1455 spin_unlock_irq(&priv->lock); 1455 spin_unlock_irq(&priv->lock);
1456} 1456}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index fe65abb5150c..72626c348174 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -655,7 +655,7 @@ void ipoib_reap_ah(struct work_struct *work)
655 __ipoib_reap_ah(dev); 655 __ipoib_reap_ah(dev);
656 656
657 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) 657 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
658 queue_delayed_work(priv->wq, &priv->ah_reap_task, 658 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
659 round_jiffies_relative(HZ)); 659 round_jiffies_relative(HZ));
660} 660}
661 661
@@ -664,7 +664,7 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx)
664 drain_tx_cq((struct net_device *)ctx); 664 drain_tx_cq((struct net_device *)ctx);
665} 665}
666 666
667int ipoib_ib_dev_open(struct net_device *dev) 667int ipoib_ib_dev_open(struct net_device *dev, int flush)
668{ 668{
669 struct ipoib_dev_priv *priv = netdev_priv(dev); 669 struct ipoib_dev_priv *priv = netdev_priv(dev);
670 int ret; 670 int ret;
@@ -696,7 +696,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
696 } 696 }
697 697
698 clear_bit(IPOIB_STOP_REAPER, &priv->flags); 698 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
699 queue_delayed_work(priv->wq, &priv->ah_reap_task, 699 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
700 round_jiffies_relative(HZ)); 700 round_jiffies_relative(HZ));
701 701
702 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) 702 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
@@ -706,7 +706,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
706dev_stop: 706dev_stop:
707 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) 707 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
708 napi_enable(&priv->napi); 708 napi_enable(&priv->napi);
709 ipoib_ib_dev_stop(dev); 709 ipoib_ib_dev_stop(dev, flush);
710 return -1; 710 return -1;
711} 711}
712 712
@@ -738,7 +738,7 @@ int ipoib_ib_dev_up(struct net_device *dev)
738 return ipoib_mcast_start_thread(dev); 738 return ipoib_mcast_start_thread(dev);
739} 739}
740 740
741int ipoib_ib_dev_down(struct net_device *dev) 741int ipoib_ib_dev_down(struct net_device *dev, int flush)
742{ 742{
743 struct ipoib_dev_priv *priv = netdev_priv(dev); 743 struct ipoib_dev_priv *priv = netdev_priv(dev);
744 744
@@ -747,7 +747,7 @@ int ipoib_ib_dev_down(struct net_device *dev)
747 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); 747 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
748 netif_carrier_off(dev); 748 netif_carrier_off(dev);
749 749
750 ipoib_mcast_stop_thread(dev); 750 ipoib_mcast_stop_thread(dev, flush);
751 ipoib_mcast_dev_flush(dev); 751 ipoib_mcast_dev_flush(dev);
752 752
753 ipoib_flush_paths(dev); 753 ipoib_flush_paths(dev);
@@ -807,7 +807,7 @@ void ipoib_drain_cq(struct net_device *dev)
807 local_bh_enable(); 807 local_bh_enable();
808} 808}
809 809
810int ipoib_ib_dev_stop(struct net_device *dev) 810int ipoib_ib_dev_stop(struct net_device *dev, int flush)
811{ 811{
812 struct ipoib_dev_priv *priv = netdev_priv(dev); 812 struct ipoib_dev_priv *priv = netdev_priv(dev);
813 struct ib_qp_attr qp_attr; 813 struct ib_qp_attr qp_attr;
@@ -880,7 +880,8 @@ timeout:
880 /* Wait for all AHs to be reaped */ 880 /* Wait for all AHs to be reaped */
881 set_bit(IPOIB_STOP_REAPER, &priv->flags); 881 set_bit(IPOIB_STOP_REAPER, &priv->flags);
882 cancel_delayed_work(&priv->ah_reap_task); 882 cancel_delayed_work(&priv->ah_reap_task);
883 flush_workqueue(priv->wq); 883 if (flush)
884 flush_workqueue(ipoib_workqueue);
884 885
885 begin = jiffies; 886 begin = jiffies;
886 887
@@ -917,7 +918,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
917 (unsigned long) dev); 918 (unsigned long) dev);
918 919
919 if (dev->flags & IFF_UP) { 920 if (dev->flags & IFF_UP) {
920 if (ipoib_ib_dev_open(dev)) { 921 if (ipoib_ib_dev_open(dev, 1)) {
921 ipoib_transport_dev_cleanup(dev); 922 ipoib_transport_dev_cleanup(dev);
922 return -ENODEV; 923 return -ENODEV;
923 } 924 }
@@ -1039,12 +1040,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
1039 } 1040 }
1040 1041
1041 if (level >= IPOIB_FLUSH_NORMAL) 1042 if (level >= IPOIB_FLUSH_NORMAL)
1042 ipoib_ib_dev_down(dev); 1043 ipoib_ib_dev_down(dev, 0);
1043 1044
1044 if (level == IPOIB_FLUSH_HEAVY) { 1045 if (level == IPOIB_FLUSH_HEAVY) {
1045 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) 1046 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
1046 ipoib_ib_dev_stop(dev); 1047 ipoib_ib_dev_stop(dev, 0);
1047 if (ipoib_ib_dev_open(dev) != 0) 1048 if (ipoib_ib_dev_open(dev, 0) != 0)
1048 return; 1049 return;
1049 if (netif_queue_stopped(dev)) 1050 if (netif_queue_stopped(dev))
1050 netif_start_queue(dev); 1051 netif_start_queue(dev);
@@ -1096,7 +1097,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
1096 */ 1097 */
1097 ipoib_flush_paths(dev); 1098 ipoib_flush_paths(dev);
1098 1099
1099 ipoib_mcast_stop_thread(dev); 1100 ipoib_mcast_stop_thread(dev, 1);
1100 ipoib_mcast_dev_flush(dev); 1101 ipoib_mcast_dev_flush(dev);
1101 1102
1102 ipoib_transport_dev_cleanup(dev); 1103 ipoib_transport_dev_cleanup(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 6bad17d4d588..58b5aa3b6f2d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -108,7 +108,7 @@ int ipoib_open(struct net_device *dev)
108 108
109 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 109 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
110 110
111 if (ipoib_ib_dev_open(dev)) { 111 if (ipoib_ib_dev_open(dev, 1)) {
112 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) 112 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
113 return 0; 113 return 0;
114 goto err_disable; 114 goto err_disable;
@@ -139,7 +139,7 @@ int ipoib_open(struct net_device *dev)
139 return 0; 139 return 0;
140 140
141err_stop: 141err_stop:
142 ipoib_ib_dev_stop(dev); 142 ipoib_ib_dev_stop(dev, 1);
143 143
144err_disable: 144err_disable:
145 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 145 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
@@ -157,8 +157,8 @@ static int ipoib_stop(struct net_device *dev)
157 157
158 netif_stop_queue(dev); 158 netif_stop_queue(dev);
159 159
160 ipoib_ib_dev_down(dev); 160 ipoib_ib_dev_down(dev, 1);
161 ipoib_ib_dev_stop(dev); 161 ipoib_ib_dev_stop(dev, 0);
162 162
163 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 163 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
164 struct ipoib_dev_priv *cpriv; 164 struct ipoib_dev_priv *cpriv;
@@ -839,7 +839,7 @@ static void ipoib_set_mcast_list(struct net_device *dev)
839 return; 839 return;
840 } 840 }
841 841
842 queue_work(priv->wq, &priv->restart_task); 842 queue_work(ipoib_workqueue, &priv->restart_task);
843} 843}
844 844
845static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) 845static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
@@ -954,7 +954,7 @@ static void ipoib_reap_neigh(struct work_struct *work)
954 __ipoib_reap_neigh(priv); 954 __ipoib_reap_neigh(priv);
955 955
956 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 956 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
957 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 957 queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
958 arp_tbl.gc_interval); 958 arp_tbl.gc_interval);
959} 959}
960 960
@@ -1133,7 +1133,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
1133 1133
1134 /* start garbage collection */ 1134 /* start garbage collection */
1135 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1135 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1136 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 1136 queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
1137 arp_tbl.gc_interval); 1137 arp_tbl.gc_interval);
1138 1138
1139 return 0; 1139 return 0;
@@ -1262,13 +1262,15 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
1262{ 1262{
1263 struct ipoib_dev_priv *priv = netdev_priv(dev); 1263 struct ipoib_dev_priv *priv = netdev_priv(dev);
1264 1264
1265 if (ipoib_neigh_hash_init(priv) < 0)
1266 goto out;
1265 /* Allocate RX/TX "rings" to hold queued skbs */ 1267 /* Allocate RX/TX "rings" to hold queued skbs */
1266 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, 1268 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
1267 GFP_KERNEL); 1269 GFP_KERNEL);
1268 if (!priv->rx_ring) { 1270 if (!priv->rx_ring) {
1269 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 1271 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
1270 ca->name, ipoib_recvq_size); 1272 ca->name, ipoib_recvq_size);
1271 goto out; 1273 goto out_neigh_hash_cleanup;
1272 } 1274 }
1273 1275
1274 priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring); 1276 priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
@@ -1283,24 +1285,16 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
1283 if (ipoib_ib_dev_init(dev, ca, port)) 1285 if (ipoib_ib_dev_init(dev, ca, port))
1284 goto out_tx_ring_cleanup; 1286 goto out_tx_ring_cleanup;
1285 1287
1286 /*
1287 * Must be after ipoib_ib_dev_init so we can allocate a per
1288 * device wq there and use it here
1289 */
1290 if (ipoib_neigh_hash_init(priv) < 0)
1291 goto out_dev_uninit;
1292
1293 return 0; 1288 return 0;
1294 1289
1295out_dev_uninit:
1296 ipoib_ib_dev_cleanup(dev);
1297
1298out_tx_ring_cleanup: 1290out_tx_ring_cleanup:
1299 vfree(priv->tx_ring); 1291 vfree(priv->tx_ring);
1300 1292
1301out_rx_ring_cleanup: 1293out_rx_ring_cleanup:
1302 kfree(priv->rx_ring); 1294 kfree(priv->rx_ring);
1303 1295
1296out_neigh_hash_cleanup:
1297 ipoib_neigh_hash_uninit(dev);
1304out: 1298out:
1305 return -ENOMEM; 1299 return -ENOMEM;
1306} 1300}
@@ -1323,12 +1317,6 @@ void ipoib_dev_cleanup(struct net_device *dev)
1323 } 1317 }
1324 unregister_netdevice_many(&head); 1318 unregister_netdevice_many(&head);
1325 1319
1326 /*
1327 * Must be before ipoib_ib_dev_cleanup or we delete an in use
1328 * work queue
1329 */
1330 ipoib_neigh_hash_uninit(dev);
1331
1332 ipoib_ib_dev_cleanup(dev); 1320 ipoib_ib_dev_cleanup(dev);
1333 1321
1334 kfree(priv->rx_ring); 1322 kfree(priv->rx_ring);
@@ -1336,6 +1324,8 @@ void ipoib_dev_cleanup(struct net_device *dev)
1336 1324
1337 priv->rx_ring = NULL; 1325 priv->rx_ring = NULL;
1338 priv->tx_ring = NULL; 1326 priv->tx_ring = NULL;
1327
1328 ipoib_neigh_hash_uninit(dev);
1339} 1329}
1340 1330
1341static const struct header_ops ipoib_header_ops = { 1331static const struct header_ops ipoib_header_ops = {
@@ -1646,7 +1636,7 @@ register_failed:
1646 /* Stop GC if started before flush */ 1636 /* Stop GC if started before flush */
1647 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1637 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1648 cancel_delayed_work(&priv->neigh_reap_task); 1638 cancel_delayed_work(&priv->neigh_reap_task);
1649 flush_workqueue(priv->wq); 1639 flush_workqueue(ipoib_workqueue);
1650 1640
1651event_failed: 1641event_failed:
1652 ipoib_dev_cleanup(priv->dev); 1642 ipoib_dev_cleanup(priv->dev);
@@ -1717,7 +1707,7 @@ static void ipoib_remove_one(struct ib_device *device)
1717 /* Stop GC */ 1707 /* Stop GC */
1718 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1708 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1719 cancel_delayed_work(&priv->neigh_reap_task); 1709 cancel_delayed_work(&priv->neigh_reap_task);
1720 flush_workqueue(priv->wq); 1710 flush_workqueue(ipoib_workqueue);
1721 1711
1722 unregister_netdev(priv->dev); 1712 unregister_netdev(priv->dev);
1723 free_netdev(priv->dev); 1713 free_netdev(priv->dev);
@@ -1758,13 +1748,8 @@ static int __init ipoib_init_module(void)
1758 * unregister_netdev() and linkwatch_event take the rtnl lock, 1748 * unregister_netdev() and linkwatch_event take the rtnl lock,
1759 * so flush_scheduled_work() can deadlock during device 1749 * so flush_scheduled_work() can deadlock during device
1760 * removal. 1750 * removal.
1761 *
1762 * In addition, bringing one device up and another down at the
1763 * same time can deadlock a single workqueue, so we have this
1764 * global fallback workqueue, but we also attempt to open a
1765 * per device workqueue each time we bring an interface up
1766 */ 1751 */
1767 ipoib_workqueue = create_singlethread_workqueue("ipoib_flush"); 1752 ipoib_workqueue = create_singlethread_workqueue("ipoib");
1768 if (!ipoib_workqueue) { 1753 if (!ipoib_workqueue) {
1769 ret = -ENOMEM; 1754 ret = -ENOMEM;
1770 goto err_fs; 1755 goto err_fs;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index bc50dd0d0e4d..ffb83b5f7e80 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -190,6 +190,12 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
190 spin_unlock_irq(&priv->lock); 190 spin_unlock_irq(&priv->lock);
191 priv->tx_wr.wr.ud.remote_qkey = priv->qkey; 191 priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
192 set_qkey = 1; 192 set_qkey = 1;
193
194 if (!ipoib_cm_admin_enabled(dev)) {
195 rtnl_lock();
196 dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
197 rtnl_unlock();
198 }
193 } 199 }
194 200
195 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { 201 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
@@ -271,27 +277,16 @@ ipoib_mcast_sendonly_join_complete(int status,
271 struct ipoib_mcast *mcast = multicast->context; 277 struct ipoib_mcast *mcast = multicast->context;
272 struct net_device *dev = mcast->dev; 278 struct net_device *dev = mcast->dev;
273 279
274 /*
275 * We have to take the mutex to force mcast_sendonly_join to
276 * return from ib_sa_multicast_join and set mcast->mc to a
277 * valid value. Otherwise we were racing with ourselves in
278 * that we might fail here, but get a valid return from
279 * ib_sa_multicast_join after we had cleared mcast->mc here,
280 * resulting in mis-matched joins and leaves and a deadlock
281 */
282 mutex_lock(&mcast_mutex);
283
284 /* We trap for port events ourselves. */ 280 /* We trap for port events ourselves. */
285 if (status == -ENETRESET) 281 if (status == -ENETRESET)
286 goto out; 282 return 0;
287 283
288 if (!status) 284 if (!status)
289 status = ipoib_mcast_join_finish(mcast, &multicast->rec); 285 status = ipoib_mcast_join_finish(mcast, &multicast->rec);
290 286
291 if (status) { 287 if (status) {
292 if (mcast->logcount++ < 20) 288 if (mcast->logcount++ < 20)
293 ipoib_dbg_mcast(netdev_priv(dev), "sendonly multicast " 289 ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n",
294 "join failed for %pI6, status %d\n",
295 mcast->mcmember.mgid.raw, status); 290 mcast->mcmember.mgid.raw, status);
296 291
297 /* Flush out any queued packets */ 292 /* Flush out any queued packets */
@@ -301,15 +296,11 @@ ipoib_mcast_sendonly_join_complete(int status,
301 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 296 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
302 } 297 }
303 netif_tx_unlock_bh(dev); 298 netif_tx_unlock_bh(dev);
299
300 /* Clear the busy flag so we try again */
301 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
302 &mcast->flags);
304 } 303 }
305out:
306 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
307 if (status)
308 mcast->mc = NULL;
309 complete(&mcast->done);
310 if (status == -ENETRESET)
311 status = 0;
312 mutex_unlock(&mcast_mutex);
313 return status; 304 return status;
314} 305}
315 306
@@ -327,14 +318,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
327 int ret = 0; 318 int ret = 0;
328 319
329 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 320 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
330 ipoib_dbg_mcast(priv, "device shutting down, no sendonly " 321 ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
331 "multicast joins\n");
332 return -ENODEV; 322 return -ENODEV;
333 } 323 }
334 324
335 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { 325 if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
336 ipoib_dbg_mcast(priv, "multicast entry busy, skipping " 326 ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
337 "sendonly join\n");
338 return -EBUSY; 327 return -EBUSY;
339 } 328 }
340 329
@@ -342,9 +331,6 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
342 rec.port_gid = priv->local_gid; 331 rec.port_gid = priv->local_gid;
343 rec.pkey = cpu_to_be16(priv->pkey); 332 rec.pkey = cpu_to_be16(priv->pkey);
344 333
345 mutex_lock(&mcast_mutex);
346 init_completion(&mcast->done);
347 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
348 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, 334 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
349 priv->port, &rec, 335 priv->port, &rec,
350 IB_SA_MCMEMBER_REC_MGID | 336 IB_SA_MCMEMBER_REC_MGID |
@@ -357,14 +343,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
357 if (IS_ERR(mcast->mc)) { 343 if (IS_ERR(mcast->mc)) {
358 ret = PTR_ERR(mcast->mc); 344 ret = PTR_ERR(mcast->mc);
359 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 345 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
360 complete(&mcast->done); 346 ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
361 ipoib_warn(priv, "ib_sa_join_multicast for sendonly join " 347 ret);
362 "failed (ret = %d)\n", ret);
363 } else { 348 } else {
364 ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting " 349 ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n",
365 "sendonly join\n", mcast->mcmember.mgid.raw); 350 mcast->mcmember.mgid.raw);
366 } 351 }
367 mutex_unlock(&mcast_mutex);
368 352
369 return ret; 353 return ret;
370} 354}
@@ -375,29 +359,18 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
375 carrier_on_task); 359 carrier_on_task);
376 struct ib_port_attr attr; 360 struct ib_port_attr attr;
377 361
362 /*
363 * Take rtnl_lock to avoid racing with ipoib_stop() and
364 * turning the carrier back on while a device is being
365 * removed.
366 */
378 if (ib_query_port(priv->ca, priv->port, &attr) || 367 if (ib_query_port(priv->ca, priv->port, &attr) ||
379 attr.state != IB_PORT_ACTIVE) { 368 attr.state != IB_PORT_ACTIVE) {
380 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); 369 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
381 return; 370 return;
382 } 371 }
383 372
384 /* 373 rtnl_lock();
385 * Take rtnl_lock to avoid racing with ipoib_stop() and
386 * turning the carrier back on while a device is being
387 * removed. However, ipoib_stop() will attempt to flush
388 * the workqueue while holding the rtnl lock, so loop
389 * on trylock until either we get the lock or we see
390 * FLAG_ADMIN_UP go away as that signals that we are bailing
391 * and can safely ignore the carrier on work.
392 */
393 while (!rtnl_trylock()) {
394 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
395 return;
396 else
397 msleep(20);
398 }
399 if (!ipoib_cm_admin_enabled(priv->dev))
400 dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu));
401 netif_carrier_on(priv->dev); 374 netif_carrier_on(priv->dev);
402 rtnl_unlock(); 375 rtnl_unlock();
403} 376}
@@ -412,63 +385,60 @@ static int ipoib_mcast_join_complete(int status,
412 ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n", 385 ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n",
413 mcast->mcmember.mgid.raw, status); 386 mcast->mcmember.mgid.raw, status);
414 387
415 /*
416 * We have to take the mutex to force mcast_join to
417 * return from ib_sa_multicast_join and set mcast->mc to a
418 * valid value. Otherwise we were racing with ourselves in
419 * that we might fail here, but get a valid return from
420 * ib_sa_multicast_join after we had cleared mcast->mc here,
421 * resulting in mis-matched joins and leaves and a deadlock
422 */
423 mutex_lock(&mcast_mutex);
424
425 /* We trap for port events ourselves. */ 388 /* We trap for port events ourselves. */
426 if (status == -ENETRESET) 389 if (status == -ENETRESET) {
390 status = 0;
427 goto out; 391 goto out;
392 }
428 393
429 if (!status) 394 if (!status)
430 status = ipoib_mcast_join_finish(mcast, &multicast->rec); 395 status = ipoib_mcast_join_finish(mcast, &multicast->rec);
431 396
432 if (!status) { 397 if (!status) {
433 mcast->backoff = 1; 398 mcast->backoff = 1;
399 mutex_lock(&mcast_mutex);
434 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 400 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
435 queue_delayed_work(priv->wq, &priv->mcast_task, 0); 401 queue_delayed_work(ipoib_workqueue,
402 &priv->mcast_task, 0);
403 mutex_unlock(&mcast_mutex);
436 404
437 /* 405 /*
438 * Defer carrier on work to priv->wq to avoid a 406 * Defer carrier on work to ipoib_workqueue to avoid a
439 * deadlock on rtnl_lock here. 407 * deadlock on rtnl_lock here.
440 */ 408 */
441 if (mcast == priv->broadcast) 409 if (mcast == priv->broadcast)
442 queue_work(priv->wq, &priv->carrier_on_task); 410 queue_work(ipoib_workqueue, &priv->carrier_on_task);
443 } else {
444 if (mcast->logcount++ < 20) {
445 if (status == -ETIMEDOUT || status == -EAGAIN) {
446 ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
447 mcast->mcmember.mgid.raw, status);
448 } else {
449 ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
450 mcast->mcmember.mgid.raw, status);
451 }
452 }
453 411
454 mcast->backoff *= 2; 412 status = 0;
455 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 413 goto out;
456 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
457 } 414 }
458out: 415
416 if (mcast->logcount++ < 20) {
417 if (status == -ETIMEDOUT || status == -EAGAIN) {
418 ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
419 mcast->mcmember.mgid.raw, status);
420 } else {
421 ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
422 mcast->mcmember.mgid.raw, status);
423 }
424 }
425
426 mcast->backoff *= 2;
427 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
428 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
429
430 /* Clear the busy flag so we try again */
431 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
432
433 mutex_lock(&mcast_mutex);
459 spin_lock_irq(&priv->lock); 434 spin_lock_irq(&priv->lock);
460 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 435 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
461 if (status) 436 queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
462 mcast->mc = NULL;
463 complete(&mcast->done);
464 if (status == -ENETRESET)
465 status = 0;
466 if (status && test_bit(IPOIB_MCAST_RUN, &priv->flags))
467 queue_delayed_work(priv->wq, &priv->mcast_task,
468 mcast->backoff * HZ); 437 mcast->backoff * HZ);
469 spin_unlock_irq(&priv->lock); 438 spin_unlock_irq(&priv->lock);
470 mutex_unlock(&mcast_mutex); 439 mutex_unlock(&mcast_mutex);
471 440out:
441 complete(&mcast->done);
472 return status; 442 return status;
473} 443}
474 444
@@ -517,9 +487,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
517 rec.hop_limit = priv->broadcast->mcmember.hop_limit; 487 rec.hop_limit = priv->broadcast->mcmember.hop_limit;
518 } 488 }
519 489
520 mutex_lock(&mcast_mutex);
521 init_completion(&mcast->done);
522 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 490 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
491 init_completion(&mcast->done);
492 set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags);
493
523 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, 494 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
524 &rec, comp_mask, GFP_KERNEL, 495 &rec, comp_mask, GFP_KERNEL,
525 ipoib_mcast_join_complete, mcast); 496 ipoib_mcast_join_complete, mcast);
@@ -533,11 +504,13 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
533 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 504 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
534 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; 505 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
535 506
507 mutex_lock(&mcast_mutex);
536 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 508 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
537 queue_delayed_work(priv->wq, &priv->mcast_task, 509 queue_delayed_work(ipoib_workqueue,
510 &priv->mcast_task,
538 mcast->backoff * HZ); 511 mcast->backoff * HZ);
512 mutex_unlock(&mcast_mutex);
539 } 513 }
540 mutex_unlock(&mcast_mutex);
541} 514}
542 515
543void ipoib_mcast_join_task(struct work_struct *work) 516void ipoib_mcast_join_task(struct work_struct *work)
@@ -574,8 +547,8 @@ void ipoib_mcast_join_task(struct work_struct *work)
574 ipoib_warn(priv, "failed to allocate broadcast group\n"); 547 ipoib_warn(priv, "failed to allocate broadcast group\n");
575 mutex_lock(&mcast_mutex); 548 mutex_lock(&mcast_mutex);
576 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 549 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
577 queue_delayed_work(priv->wq, &priv->mcast_task, 550 queue_delayed_work(ipoib_workqueue,
578 HZ); 551 &priv->mcast_task, HZ);
579 mutex_unlock(&mcast_mutex); 552 mutex_unlock(&mcast_mutex);
580 return; 553 return;
581 } 554 }
@@ -590,8 +563,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
590 } 563 }
591 564
592 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { 565 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
593 if (IS_ERR_OR_NULL(priv->broadcast->mc) && 566 if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
594 !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
595 ipoib_mcast_join(dev, priv->broadcast, 0); 567 ipoib_mcast_join(dev, priv->broadcast, 0);
596 return; 568 return;
597 } 569 }
@@ -599,33 +571,23 @@ void ipoib_mcast_join_task(struct work_struct *work)
599 while (1) { 571 while (1) {
600 struct ipoib_mcast *mcast = NULL; 572 struct ipoib_mcast *mcast = NULL;
601 573
602 /*
603 * Need the mutex so our flags are consistent, need the
604 * priv->lock so we don't race with list removals in either
605 * mcast_dev_flush or mcast_restart_task
606 */
607 mutex_lock(&mcast_mutex);
608 spin_lock_irq(&priv->lock); 574 spin_lock_irq(&priv->lock);
609 list_for_each_entry(mcast, &priv->multicast_list, list) { 575 list_for_each_entry(mcast, &priv->multicast_list, list) {
610 if (IS_ERR_OR_NULL(mcast->mc) && 576 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
611 !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) && 577 && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
612 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 578 && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
613 /* Found the next unjoined group */ 579 /* Found the next unjoined group */
614 break; 580 break;
615 } 581 }
616 } 582 }
617 spin_unlock_irq(&priv->lock); 583 spin_unlock_irq(&priv->lock);
618 mutex_unlock(&mcast_mutex);
619 584
620 if (&mcast->list == &priv->multicast_list) { 585 if (&mcast->list == &priv->multicast_list) {
621 /* All done */ 586 /* All done */
622 break; 587 break;
623 } 588 }
624 589
625 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) 590 ipoib_mcast_join(dev, mcast, 1);
626 ipoib_mcast_sendonly_join(mcast);
627 else
628 ipoib_mcast_join(dev, mcast, 1);
629 return; 591 return;
630 } 592 }
631 593
@@ -642,13 +604,13 @@ int ipoib_mcast_start_thread(struct net_device *dev)
642 604
643 mutex_lock(&mcast_mutex); 605 mutex_lock(&mcast_mutex);
644 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) 606 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
645 queue_delayed_work(priv->wq, &priv->mcast_task, 0); 607 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
646 mutex_unlock(&mcast_mutex); 608 mutex_unlock(&mcast_mutex);
647 609
648 return 0; 610 return 0;
649} 611}
650 612
651int ipoib_mcast_stop_thread(struct net_device *dev) 613int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
652{ 614{
653 struct ipoib_dev_priv *priv = netdev_priv(dev); 615 struct ipoib_dev_priv *priv = netdev_priv(dev);
654 616
@@ -659,7 +621,8 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
659 cancel_delayed_work(&priv->mcast_task); 621 cancel_delayed_work(&priv->mcast_task);
660 mutex_unlock(&mcast_mutex); 622 mutex_unlock(&mcast_mutex);
661 623
662 flush_workqueue(priv->wq); 624 if (flush)
625 flush_workqueue(ipoib_workqueue);
663 626
664 return 0; 627 return 0;
665} 628}
@@ -670,9 +633,6 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
670 int ret = 0; 633 int ret = 0;
671 634
672 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 635 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
673 ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n");
674
675 if (!IS_ERR_OR_NULL(mcast->mc))
676 ib_sa_free_multicast(mcast->mc); 636 ib_sa_free_multicast(mcast->mc);
677 637
678 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 638 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
@@ -725,8 +685,6 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
725 memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid)); 685 memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
726 __ipoib_mcast_add(dev, mcast); 686 __ipoib_mcast_add(dev, mcast);
727 list_add_tail(&mcast->list, &priv->multicast_list); 687 list_add_tail(&mcast->list, &priv->multicast_list);
728 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
729 queue_delayed_work(priv->wq, &priv->mcast_task, 0);
730 } 688 }
731 689
732 if (!mcast->ah) { 690 if (!mcast->ah) {
@@ -740,6 +698,8 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
740 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 698 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
741 ipoib_dbg_mcast(priv, "no address vector, " 699 ipoib_dbg_mcast(priv, "no address vector, "
742 "but multicast join already started\n"); 700 "but multicast join already started\n");
701 else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
702 ipoib_mcast_sendonly_join(mcast);
743 703
744 /* 704 /*
745 * If lookup completes between here and out:, don't 705 * If lookup completes between here and out:, don't
@@ -799,12 +759,9 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
799 759
800 spin_unlock_irqrestore(&priv->lock, flags); 760 spin_unlock_irqrestore(&priv->lock, flags);
801 761
802 /* 762 /* seperate between the wait to the leave*/
803 * make sure the in-flight joins have finished before we attempt
804 * to leave
805 */
806 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) 763 list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
807 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 764 if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags))
808 wait_for_completion(&mcast->done); 765 wait_for_completion(&mcast->done);
809 766
810 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 767 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
@@ -837,6 +794,8 @@ void ipoib_mcast_restart_task(struct work_struct *work)
837 794
838 ipoib_dbg_mcast(priv, "restarting multicast task\n"); 795 ipoib_dbg_mcast(priv, "restarting multicast task\n");
839 796
797 ipoib_mcast_stop_thread(dev, 0);
798
840 local_irq_save(flags); 799 local_irq_save(flags);
841 netif_addr_lock(dev); 800 netif_addr_lock(dev);
842 spin_lock(&priv->lock); 801 spin_lock(&priv->lock);
@@ -921,38 +880,14 @@ void ipoib_mcast_restart_task(struct work_struct *work)
921 netif_addr_unlock(dev); 880 netif_addr_unlock(dev);
922 local_irq_restore(flags); 881 local_irq_restore(flags);
923 882
924 /* 883 /* We have to cancel outside of the spinlock */
925 * make sure the in-flight joins have finished before we attempt
926 * to leave
927 */
928 list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
929 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
930 wait_for_completion(&mcast->done);
931
932 /*
933 * We have to cancel outside of the spinlock, but we have to
934 * take the rtnl lock or else we race with the removal of
935 * entries from the remove list in mcast_dev_flush as part
936 * of ipoib_stop(). We detect the drop of the ADMIN_UP flag
937 * to signal that we have hit this particular race, and we
938 * return since we know we don't need to do anything else
939 * anyway.
940 */
941 while (!rtnl_trylock()) {
942 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
943 return;
944 else
945 msleep(20);
946 }
947 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 884 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
948 ipoib_mcast_leave(mcast->dev, mcast); 885 ipoib_mcast_leave(mcast->dev, mcast);
949 ipoib_mcast_free(mcast); 886 ipoib_mcast_free(mcast);
950 } 887 }
951 /* 888
952 * Restart our join task if needed 889 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
953 */ 890 ipoib_mcast_start_thread(dev);
954 ipoib_mcast_start_thread(dev);
955 rtnl_unlock();
956} 891}
957 892
958#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 893#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index b72a753eb41d..c56d5d44c53b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -145,20 +145,10 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
145 int ret, size; 145 int ret, size;
146 int i; 146 int i;
147 147
148 /*
149 * the various IPoIB tasks assume they will never race against
150 * themselves, so always use a single thread workqueue
151 */
152 priv->wq = create_singlethread_workqueue("ipoib_wq");
153 if (!priv->wq) {
154 printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
155 return -ENODEV;
156 }
157
158 priv->pd = ib_alloc_pd(priv->ca); 148 priv->pd = ib_alloc_pd(priv->ca);
159 if (IS_ERR(priv->pd)) { 149 if (IS_ERR(priv->pd)) {
160 printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name); 150 printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
161 goto out_free_wq; 151 return -ENODEV;
162 } 152 }
163 153
164 priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE); 154 priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE);
@@ -252,10 +242,6 @@ out_free_mr:
252 242
253out_free_pd: 243out_free_pd:
254 ib_dealloc_pd(priv->pd); 244 ib_dealloc_pd(priv->pd);
255
256out_free_wq:
257 destroy_workqueue(priv->wq);
258 priv->wq = NULL;
259 return -ENODEV; 245 return -ENODEV;
260} 246}
261 247
@@ -284,12 +270,6 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
284 270
285 if (ib_dealloc_pd(priv->pd)) 271 if (ib_dealloc_pd(priv->pd))
286 ipoib_warn(priv, "ib_dealloc_pd failed\n"); 272 ipoib_warn(priv, "ib_dealloc_pd failed\n");
287
288 if (priv->wq) {
289 flush_workqueue(priv->wq);
290 destroy_workqueue(priv->wq);
291 priv->wq = NULL;
292 }
293} 273}
294 274
295void ipoib_event(struct ib_event_handler *handler, 275void ipoib_event(struct ib_event_handler *handler,
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 77ecf6d32237..6e22682c8255 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1097,6 +1097,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
1097 * Asus UX31 0x361f00 20, 15, 0e clickpad 1097 * Asus UX31 0x361f00 20, 15, 0e clickpad
1098 * Asus UX32VD 0x361f02 00, 15, 0e clickpad 1098 * Asus UX32VD 0x361f02 00, 15, 0e clickpad
1099 * Avatar AVIU-145A2 0x361f00 ? clickpad 1099 * Avatar AVIU-145A2 0x361f00 ? clickpad
1100 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
1101 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
1100 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) 1102 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
1101 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons 1103 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
1102 * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) 1104 * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
@@ -1475,6 +1477,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
1475 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"), 1477 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
1476 }, 1478 },
1477 }, 1479 },
1480 {
1481 /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
1482 .matches = {
1483 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1484 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
1485 },
1486 },
1487 {
1488 /* Fujitsu LIFEBOOK E544 does not work with crc_enabled == 0 */
1489 .matches = {
1490 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1491 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
1492 },
1493 },
1478#endif 1494#endif
1479 { } 1495 { }
1480}; 1496};
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index f9472920d986..23e26e0768b5 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -135,8 +135,9 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
135 1232, 5710, 1156, 4696 135 1232, 5710, 1156, 4696
136 }, 136 },
137 { 137 {
138 (const char * const []){"LEN0034", "LEN0036", "LEN0039", 138 (const char * const []){"LEN0034", "LEN0036", "LEN0037",
139 "LEN2002", "LEN2004", NULL}, 139 "LEN0039", "LEN2002", "LEN2004",
140 NULL},
140 1024, 5112, 2024, 4832 141 1024, 5112, 2024, 4832
141 }, 142 },
142 { 143 {
@@ -165,7 +166,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
165 "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */ 166 "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
166 "LEN0035", /* X240 */ 167 "LEN0035", /* X240 */
167 "LEN0036", /* T440 */ 168 "LEN0036", /* T440 */
168 "LEN0037", 169 "LEN0037", /* X1 Carbon 2nd */
169 "LEN0038", 170 "LEN0038",
170 "LEN0039", /* T440s */ 171 "LEN0039", /* T440s */
171 "LEN0041", 172 "LEN0041",
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 764857b4e268..c11556563ef0 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -152,6 +152,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
152 }, 152 },
153 }, 153 },
154 { 154 {
155 /* Medion Akoya E7225 */
156 .matches = {
157 DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
158 DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
159 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
160 },
161 },
162 {
155 /* Blue FB5601 */ 163 /* Blue FB5601 */
156 .matches = { 164 .matches = {
157 DMI_MATCH(DMI_SYS_VENDOR, "blue"), 165 DMI_MATCH(DMI_SYS_VENDOR, "blue"),
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index f722a0c466cf..c48da057dbb1 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -315,6 +315,7 @@ static const struct iommu_ops gart_iommu_ops = {
315 .attach_dev = gart_iommu_attach_dev, 315 .attach_dev = gart_iommu_attach_dev,
316 .detach_dev = gart_iommu_detach_dev, 316 .detach_dev = gart_iommu_detach_dev,
317 .map = gart_iommu_map, 317 .map = gart_iommu_map,
318 .map_sg = default_iommu_map_sg,
318 .unmap = gart_iommu_unmap, 319 .unmap = gart_iommu_unmap,
319 .iova_to_phys = gart_iommu_iova_to_phys, 320 .iova_to_phys = gart_iommu_iova_to_phys,
320 .pgsize_bitmap = GART_IOMMU_PGSIZES, 321 .pgsize_bitmap = GART_IOMMU_PGSIZES,
@@ -395,7 +396,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
395 do_gart_setup(gart, NULL); 396 do_gart_setup(gart, NULL);
396 397
397 gart_handle = gart; 398 gart_handle = gart;
398 bus_set_iommu(&platform_bus_type, &gart_iommu_ops); 399
399 return 0; 400 return 0;
400} 401}
401 402
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index 0b380603a578..d7c286656a25 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -1474,7 +1474,7 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
1474 add_ai(plci, &parms[5]); 1474 add_ai(plci, &parms[5]);
1475 sig_req(plci, REJECT, 0); 1475 sig_req(plci, REJECT, 0);
1476 } 1476 }
1477 else if (Reject == 1 || Reject > 9) 1477 else if (Reject == 1 || Reject >= 9)
1478 { 1478 {
1479 add_ai(plci, &parms[5]); 1479 add_ai(plci, &parms[5]);
1480 sig_req(plci, HANGUP, 0); 1480 sig_req(plci, HANGUP, 0);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index da3604e73e8a..1695ee5f3ffc 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -72,6 +72,19 @@ __acquires(bitmap->lock)
72 /* this page has not been allocated yet */ 72 /* this page has not been allocated yet */
73 73
74 spin_unlock_irq(&bitmap->lock); 74 spin_unlock_irq(&bitmap->lock);
75 /* It is possible that this is being called inside a
76 * prepare_to_wait/finish_wait loop from raid5c:make_request().
77 * In general it is not permitted to sleep in that context as it
78 * can cause the loop to spin freely.
79 * That doesn't apply here as we can only reach this point
80 * once with any loop.
81 * When this function completes, either bp[page].map or
82 * bp[page].hijacked. In either case, this function will
83 * abort before getting to this point again. So there is
84 * no risk of a free-spin, and so it is safe to assert
85 * that sleeping here is allowed.
86 */
87 sched_annotate_sleep();
75 mappage = kzalloc(PAGE_SIZE, GFP_NOIO); 88 mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
76 spin_lock_irq(&bitmap->lock); 89 spin_lock_irq(&bitmap->lock);
77 90
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 21b156242e42..c1c010498a21 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -683,7 +683,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
683 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 683 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
684 if (!cmd) { 684 if (!cmd) {
685 DMERR("could not allocate metadata struct"); 685 DMERR("could not allocate metadata struct");
686 return NULL; 686 return ERR_PTR(-ENOMEM);
687 } 687 }
688 688
689 atomic_set(&cmd->ref_count, 1); 689 atomic_set(&cmd->ref_count, 1);
@@ -745,7 +745,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
745 return cmd; 745 return cmd;
746 746
747 cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size); 747 cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
748 if (cmd) { 748 if (!IS_ERR(cmd)) {
749 mutex_lock(&table_lock); 749 mutex_lock(&table_lock);
750 cmd2 = lookup(bdev); 750 cmd2 = lookup(bdev);
751 if (cmd2) { 751 if (cmd2) {
@@ -780,9 +780,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
780{ 780{
781 struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, 781 struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
782 may_format_device, policy_hint_size); 782 may_format_device, policy_hint_size);
783 if (cmd && !same_params(cmd, data_block_size)) { 783
784 if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
784 dm_cache_metadata_close(cmd); 785 dm_cache_metadata_close(cmd);
785 return NULL; 786 return ERR_PTR(-EINVAL);
786 } 787 }
787 788
788 return cmd; 789 return cmd;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 493478989dbd..07705ee181e3 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3385,6 +3385,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
3385 struct pool_c *pt = ti->private; 3385 struct pool_c *pt = ti->private;
3386 struct pool *pool = pt->pool; 3386 struct pool *pool = pt->pool;
3387 3387
3388 if (get_pool_mode(pool) >= PM_READ_ONLY) {
3389 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
3390 dm_device_name(pool->pool_md));
3391 return -EINVAL;
3392 }
3393
3388 if (!strcasecmp(argv[0], "create_thin")) 3394 if (!strcasecmp(argv[0], "create_thin"))
3389 r = process_create_thin_mesg(argc, argv, pool); 3395 r = process_create_thin_mesg(argc, argv, pool);
3390 3396
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c1b0d52bfcb0..b98765f6f77f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3195,6 +3195,11 @@ static void handle_stripe_dirtying(struct r5conf *conf,
3195 (unsigned long long)sh->sector, 3195 (unsigned long long)sh->sector,
3196 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); 3196 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
3197 } 3197 }
3198
3199 if (rcw > disks && rmw > disks &&
3200 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3201 set_bit(STRIPE_DELAYED, &sh->state);
3202
3198 /* now if nothing is locked, and if we have enough data, 3203 /* now if nothing is locked, and if we have enough data,
3199 * we can start a write request 3204 * we can start a write request
3200 */ 3205 */
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 5e40a8b68cbe..b3b922adc0e4 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -1415,7 +1415,6 @@ static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
1415 1415
1416 cfhsi = netdev_priv(dev); 1416 cfhsi = netdev_priv(dev);
1417 cfhsi_netlink_parms(data, cfhsi); 1417 cfhsi_netlink_parms(data, cfhsi);
1418 dev_net_set(cfhsi->ndev, src_net);
1419 1418
1420 get_ops = symbol_get(cfhsi_get_ops); 1419 get_ops = symbol_get(cfhsi_get_ops);
1421 if (!get_ops) { 1420 if (!get_ops) {
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 5d3b5202327c..c638c85f3954 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -45,7 +45,7 @@ config AMD8111_ETH
45 45
46config LANCE 46config LANCE
47 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" 47 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
48 depends on ISA && ISA_DMA_API 48 depends on ISA && ISA_DMA_API && !ARM
49 ---help--- 49 ---help---
50 If you have a network (Ethernet) card of this type, say Y and read 50 If you have a network (Ethernet) card of this type, say Y and read
51 the Ethernet-HOWTO, available from 51 the Ethernet-HOWTO, available from
@@ -142,7 +142,7 @@ config PCMCIA_NMCLAN
142 142
143config NI65 143config NI65
144 tristate "NI6510 support" 144 tristate "NI6510 support"
145 depends on ISA && ISA_DMA_API 145 depends on ISA && ISA_DMA_API && !ARM
146 ---help--- 146 ---help---
147 If you have a network (Ethernet) card of this type, say Y and read 147 If you have a network (Ethernet) card of this type, say Y and read
148 the Ethernet-HOWTO, available from 148 the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 5b22764ba88d..27245efe9f50 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -952,6 +952,8 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
952 do { 952 do {
953 /* WARNING: MACE_IR is a READ/CLEAR port! */ 953 /* WARNING: MACE_IR is a READ/CLEAR port! */
954 status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR); 954 status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);
955 if (!(status & ~MACE_IMR_DEFAULT) && IntrCnt == MACE_MAX_IR_ITERATIONS)
956 return IRQ_NONE;
955 957
956 pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status); 958 pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);
957 959
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index c036a0e61bba..d41f9f468688 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -522,6 +522,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
522 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 522 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
523 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 523 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
524 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 524 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
525 hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
525 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); 526 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
526 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 527 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
527 HASHTBLSZ); 528 HASHTBLSZ);
@@ -551,13 +552,14 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
551 break; 552 break;
552 } 553 }
553 554
554 /* The Queue and Channel counts are zero based so increment them 555 /* The Queue, Channel and TC counts are zero based so increment them
555 * to get the actual number 556 * to get the actual number
556 */ 557 */
557 hw_feat->rx_q_cnt++; 558 hw_feat->rx_q_cnt++;
558 hw_feat->tx_q_cnt++; 559 hw_feat->tx_q_cnt++;
559 hw_feat->rx_ch_cnt++; 560 hw_feat->rx_ch_cnt++;
560 hw_feat->tx_ch_cnt++; 561 hw_feat->tx_ch_cnt++;
562 hw_feat->tc_cnt++;
561 563
562 DBGPR("<--xgbe_get_all_hw_features\n"); 564 DBGPR("<--xgbe_get_all_hw_features\n");
563} 565}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 02add385a33d..44b15373d6b3 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -373,6 +373,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
373 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) 373 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
374 break; 374 break;
375 375
376 /* read fpqnum field after dataaddr field */
377 dma_rmb();
376 if (is_rx_desc(raw_desc)) 378 if (is_rx_desc(raw_desc))
377 ret = xgene_enet_rx_frame(ring, raw_desc); 379 ret = xgene_enet_rx_frame(ring, raw_desc);
378 else 380 else
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 7403dff8f14a..905ac5f5d9a6 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -32,7 +32,8 @@ config CS89x0
32 will be called cs89x0. 32 will be called cs89x0.
33 33
34config CS89x0_PLATFORM 34config CS89x0_PLATFORM
35 bool "CS89x0 platform driver support" 35 bool "CS89x0 platform driver support" if HAS_IOPORT_MAP
36 default !HAS_IOPORT_MAP
36 depends on CS89x0 37 depends on CS89x0
37 help 38 help
38 Say Y to compile the cs89x0 driver as a platform driver. This 39 Say Y to compile the cs89x0 driver as a platform driver. This
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 3e1a9c1a67a9..fda12fb32ec7 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1586,7 +1586,7 @@ static int gfar_write_filer_table(struct gfar_private *priv,
1586 return -EBUSY; 1586 return -EBUSY;
1587 1587
1588 /* Fill regular entries */ 1588 /* Fill regular entries */
1589 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); 1589 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
1590 i++) 1590 i++)
1591 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); 1591 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1592 /* Fill the rest with fall-troughs */ 1592 /* Fill the rest with fall-troughs */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index ad2b4897b392..ebf9d4a42fdd 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1907,7 +1907,8 @@ static void igbvf_watchdog_task(struct work_struct *work)
1907 1907
1908static int igbvf_tso(struct igbvf_adapter *adapter, 1908static int igbvf_tso(struct igbvf_adapter *adapter,
1909 struct igbvf_ring *tx_ring, 1909 struct igbvf_ring *tx_ring,
1910 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 1910 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
1911 __be16 protocol)
1911{ 1912{
1912 struct e1000_adv_tx_context_desc *context_desc; 1913 struct e1000_adv_tx_context_desc *context_desc;
1913 struct igbvf_buffer *buffer_info; 1914 struct igbvf_buffer *buffer_info;
@@ -1927,7 +1928,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
1927 l4len = tcp_hdrlen(skb); 1928 l4len = tcp_hdrlen(skb);
1928 *hdr_len += l4len; 1929 *hdr_len += l4len;
1929 1930
1930 if (skb->protocol == htons(ETH_P_IP)) { 1931 if (protocol == htons(ETH_P_IP)) {
1931 struct iphdr *iph = ip_hdr(skb); 1932 struct iphdr *iph = ip_hdr(skb);
1932 iph->tot_len = 0; 1933 iph->tot_len = 0;
1933 iph->check = 0; 1934 iph->check = 0;
@@ -1958,7 +1959,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
1958 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 1959 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1959 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 1960 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
1960 1961
1961 if (skb->protocol == htons(ETH_P_IP)) 1962 if (protocol == htons(ETH_P_IP))
1962 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 1963 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
1963 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 1964 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1964 1965
@@ -1984,7 +1985,8 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
1984 1985
1985static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, 1986static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
1986 struct igbvf_ring *tx_ring, 1987 struct igbvf_ring *tx_ring,
1987 struct sk_buff *skb, u32 tx_flags) 1988 struct sk_buff *skb, u32 tx_flags,
1989 __be16 protocol)
1988{ 1990{
1989 struct e1000_adv_tx_context_desc *context_desc; 1991 struct e1000_adv_tx_context_desc *context_desc;
1990 unsigned int i; 1992 unsigned int i;
@@ -2011,7 +2013,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
2011 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 2013 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2012 2014
2013 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2015 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2014 switch (skb->protocol) { 2016 switch (protocol) {
2015 case htons(ETH_P_IP): 2017 case htons(ETH_P_IP):
2016 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 2018 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2017 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2019 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -2211,6 +2213,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2211 u8 hdr_len = 0; 2213 u8 hdr_len = 0;
2212 int count = 0; 2214 int count = 0;
2213 int tso = 0; 2215 int tso = 0;
2216 __be16 protocol = vlan_get_protocol(skb);
2214 2217
2215 if (test_bit(__IGBVF_DOWN, &adapter->state)) { 2218 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2216 dev_kfree_skb_any(skb); 2219 dev_kfree_skb_any(skb);
@@ -2240,13 +2243,13 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2240 IGBVF_TX_FLAGS_VLAN_SHIFT); 2243 IGBVF_TX_FLAGS_VLAN_SHIFT);
2241 } 2244 }
2242 2245
2243 if (skb->protocol == htons(ETH_P_IP)) 2246 if (protocol == htons(ETH_P_IP))
2244 tx_flags |= IGBVF_TX_FLAGS_IPV4; 2247 tx_flags |= IGBVF_TX_FLAGS_IPV4;
2245 2248
2246 first = tx_ring->next_to_use; 2249 first = tx_ring->next_to_use;
2247 2250
2248 tso = skb_is_gso(skb) ? 2251 tso = skb_is_gso(skb) ?
2249 igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0; 2252 igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0;
2250 if (unlikely(tso < 0)) { 2253 if (unlikely(tso < 0)) {
2251 dev_kfree_skb_any(skb); 2254 dev_kfree_skb_any(skb);
2252 return NETDEV_TX_OK; 2255 return NETDEV_TX_OK;
@@ -2254,7 +2257,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2254 2257
2255 if (tso) 2258 if (tso)
2256 tx_flags |= IGBVF_TX_FLAGS_TSO; 2259 tx_flags |= IGBVF_TX_FLAGS_TSO;
2257 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) && 2260 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) &&
2258 (skb->ip_summed == CHECKSUM_PARTIAL)) 2261 (skb->ip_summed == CHECKSUM_PARTIAL))
2259 tx_flags |= IGBVF_TX_FLAGS_CSUM; 2262 tx_flags |= IGBVF_TX_FLAGS_CSUM;
2260 2263
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index e4086fea4be2..e9e3a1eb9a97 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7227,11 +7227,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
7227 if (!vhdr) 7227 if (!vhdr)
7228 goto out_drop; 7228 goto out_drop;
7229 7229
7230 protocol = vhdr->h_vlan_encapsulated_proto;
7231 tx_flags |= ntohs(vhdr->h_vlan_TCI) << 7230 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
7232 IXGBE_TX_FLAGS_VLAN_SHIFT; 7231 IXGBE_TX_FLAGS_VLAN_SHIFT;
7233 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; 7232 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
7234 } 7233 }
7234 protocol = vlan_get_protocol(skb);
7235 7235
7236 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 7236 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
7237 adapter->ptp_clock && 7237 adapter->ptp_clock &&
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index c9b49bfb51bb..fe2e10f40df8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3099,7 +3099,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3099 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 3099 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3100 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 3100 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3101 3101
3102 if (skb->protocol == htons(ETH_P_IP)) { 3102 if (first->protocol == htons(ETH_P_IP)) {
3103 struct iphdr *iph = ip_hdr(skb); 3103 struct iphdr *iph = ip_hdr(skb);
3104 iph->tot_len = 0; 3104 iph->tot_len = 0;
3105 iph->check = 0; 3105 iph->check = 0;
@@ -3156,7 +3156,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3156 3156
3157 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3157 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3158 u8 l4_hdr = 0; 3158 u8 l4_hdr = 0;
3159 switch (skb->protocol) { 3159 switch (first->protocol) {
3160 case htons(ETH_P_IP): 3160 case htons(ETH_P_IP):
3161 vlan_macip_lens |= skb_network_header_len(skb); 3161 vlan_macip_lens |= skb_network_header_len(skb);
3162 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 3162 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 803f17653da7..1409d0cd6143 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -244,7 +244,8 @@ extern int mlx4_log_num_mgm_entry_size;
244extern int log_mtts_per_seg; 244extern int log_mtts_per_seg;
245extern int mlx4_internal_err_reset; 245extern int mlx4_internal_err_reset;
246 246
247#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF) 247#define MLX4_MAX_NUM_SLAVES (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \
248 MLX4_MFUNC_MAX))
248#define ALL_SLAVES 0xff 249#define ALL_SLAVES 0xff
249 250
250struct mlx4_bitmap { 251struct mlx4_bitmap {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 4d2496f28b85..d4b5085a21fa 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -968,7 +968,12 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
968 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, 968 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
969 budget); 969 budget);
970 work_done = qlcnic_process_rcv_ring(sds_ring, budget); 970 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
971 if ((work_done < budget) && tx_complete) { 971
972 /* Check if we need a repoll */
973 if (!tx_complete)
974 work_done = budget;
975
976 if (work_done < budget) {
972 napi_complete(&sds_ring->napi); 977 napi_complete(&sds_ring->napi);
973 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { 978 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
974 qlcnic_enable_sds_intr(adapter, sds_ring); 979 qlcnic_enable_sds_intr(adapter, sds_ring);
@@ -993,6 +998,9 @@ static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
993 napi_complete(&tx_ring->napi); 998 napi_complete(&tx_ring->napi);
994 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) 999 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
995 qlcnic_enable_tx_intr(adapter, tx_ring); 1000 qlcnic_enable_tx_intr(adapter, tx_ring);
1001 } else {
1002 /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/
1003 work_done = budget;
996 } 1004 }
997 1005
998 return work_done; 1006 return work_done;
@@ -1951,7 +1959,12 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
1951 1959
1952 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); 1960 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1953 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); 1961 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1954 if ((work_done < budget) && tx_complete) { 1962
1963 /* Check if we need a repoll */
1964 if (!tx_complete)
1965 work_done = budget;
1966
1967 if (work_done < budget) {
1955 napi_complete(&sds_ring->napi); 1968 napi_complete(&sds_ring->napi);
1956 qlcnic_enable_sds_intr(adapter, sds_ring); 1969 qlcnic_enable_sds_intr(adapter, sds_ring);
1957 } 1970 }
@@ -1974,7 +1987,12 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
1974 1987
1975 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); 1988 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1976 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); 1989 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1977 if ((work_done < budget) && tx_complete) { 1990
1991 /* Check if we need a repoll */
1992 if (!tx_complete)
1993 work_done = budget;
1994
1995 if (work_done < budget) {
1978 napi_complete(&sds_ring->napi); 1996 napi_complete(&sds_ring->napi);
1979 qlcnic_enable_sds_intr(adapter, sds_ring); 1997 qlcnic_enable_sds_intr(adapter, sds_ring);
1980 } 1998 }
@@ -1996,6 +2014,9 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
1996 napi_complete(&tx_ring->napi); 2014 napi_complete(&tx_ring->napi);
1997 if (test_bit(__QLCNIC_DEV_UP , &adapter->state)) 2015 if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
1998 qlcnic_enable_tx_intr(adapter, tx_ring); 2016 qlcnic_enable_tx_intr(adapter, tx_ring);
2017 } else {
2018 /* need a repoll */
2019 work_done = budget;
1999 } 2020 }
2000 2021
2001 return work_done; 2022 return work_done;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index dc0058f90370..8011ef3e7707 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2351,23 +2351,29 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
2351{ 2351{
2352 struct ql_adapter *qdev = netdev_priv(ndev); 2352 struct ql_adapter *qdev = netdev_priv(ndev);
2353 int status = 0; 2353 int status = 0;
2354 bool need_restart = netif_running(ndev);
2354 2355
2355 status = ql_adapter_down(qdev); 2356 if (need_restart) {
2356 if (status) { 2357 status = ql_adapter_down(qdev);
2357 netif_err(qdev, link, qdev->ndev, 2358 if (status) {
2358 "Failed to bring down the adapter\n"); 2359 netif_err(qdev, link, qdev->ndev,
2359 return status; 2360 "Failed to bring down the adapter\n");
2361 return status;
2362 }
2360 } 2363 }
2361 2364
2362 /* update the features with resent change */ 2365 /* update the features with resent change */
2363 ndev->features = features; 2366 ndev->features = features;
2364 2367
2365 status = ql_adapter_up(qdev); 2368 if (need_restart) {
2366 if (status) { 2369 status = ql_adapter_up(qdev);
2367 netif_err(qdev, link, qdev->ndev, 2370 if (status) {
2368 "Failed to bring up the adapter\n"); 2371 netif_err(qdev, link, qdev->ndev,
2369 return status; 2372 "Failed to bring up the adapter\n");
2373 return status;
2374 }
2370 } 2375 }
2376
2371 return status; 2377 return status;
2372} 2378}
2373 2379
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 2b719ccd6e7c..2b10b85d8a08 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1123,6 +1123,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
1123 skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; 1123 skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
1124 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; 1124 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
1125 } 1125 }
1126 nskb->queue_mapping = skb->queue_mapping;
1126 dev_kfree_skb(skb); 1127 dev_kfree_skb(skb);
1127 skb = nskb; 1128 skb = nskb;
1128 } 1129 }
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d2af032ff225..58bb4102afac 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -717,7 +717,7 @@ int netvsc_send(struct hv_device *device,
717 u64 req_id; 717 u64 req_id;
718 unsigned int section_index = NETVSC_INVALID_INDEX; 718 unsigned int section_index = NETVSC_INVALID_INDEX;
719 u32 msg_size = 0; 719 u32 msg_size = 0;
720 struct sk_buff *skb; 720 struct sk_buff *skb = NULL;
721 u16 q_idx = packet->q_idx; 721 u16 q_idx = packet->q_idx;
722 722
723 723
@@ -744,8 +744,6 @@ int netvsc_send(struct hv_device *device,
744 packet); 744 packet);
745 skb = (struct sk_buff *) 745 skb = (struct sk_buff *)
746 (unsigned long)packet->send_completion_tid; 746 (unsigned long)packet->send_completion_tid;
747 if (skb)
748 dev_kfree_skb_any(skb);
749 packet->page_buf_cnt = 0; 747 packet->page_buf_cnt = 0;
750 } 748 }
751 } 749 }
@@ -811,6 +809,13 @@ int netvsc_send(struct hv_device *device,
811 packet, ret); 809 packet, ret);
812 } 810 }
813 811
812 if (ret != 0) {
813 if (section_index != NETVSC_INVALID_INDEX)
814 netvsc_free_send_slot(net_device, section_index);
815 } else if (skb) {
816 dev_kfree_skb_any(skb);
817 }
818
814 return ret; 819 return ret;
815} 820}
816 821
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index d0ed5694dd7d..e40fdfccc9c1 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -17,7 +17,6 @@
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/uio.h> 18#include <linux/uio.h>
19 19
20#include <net/ipv6.h>
21#include <net/net_namespace.h> 20#include <net/net_namespace.h>
22#include <net/rtnetlink.h> 21#include <net/rtnetlink.h>
23#include <net/sock.h> 22#include <net/sock.h>
@@ -81,7 +80,7 @@ static struct cdev macvtap_cdev;
81static const struct proto_ops macvtap_socket_ops; 80static const struct proto_ops macvtap_socket_ops;
82 81
83#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ 82#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
84 NETIF_F_TSO6) 83 NETIF_F_TSO6 | NETIF_F_UFO)
85#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) 84#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
86#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) 85#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
87 86
@@ -586,11 +585,7 @@ static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q,
586 gso_type = SKB_GSO_TCPV6; 585 gso_type = SKB_GSO_TCPV6;
587 break; 586 break;
588 case VIRTIO_NET_HDR_GSO_UDP: 587 case VIRTIO_NET_HDR_GSO_UDP:
589 pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
590 current->comm);
591 gso_type = SKB_GSO_UDP; 588 gso_type = SKB_GSO_UDP;
592 if (skb->protocol == htons(ETH_P_IPV6))
593 ipv6_proxy_select_ident(skb);
594 break; 589 break;
595 default: 590 default:
596 return -EINVAL; 591 return -EINVAL;
@@ -636,6 +631,8 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
636 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 631 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
637 else if (sinfo->gso_type & SKB_GSO_TCPV6) 632 else if (sinfo->gso_type & SKB_GSO_TCPV6)
638 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 633 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
634 else if (sinfo->gso_type & SKB_GSO_UDP)
635 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
639 else 636 else
640 BUG(); 637 BUG();
641 if (sinfo->gso_type & SKB_GSO_TCP_ECN) 638 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
@@ -965,6 +962,9 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
965 if (arg & TUN_F_TSO6) 962 if (arg & TUN_F_TSO6)
966 feature_mask |= NETIF_F_TSO6; 963 feature_mask |= NETIF_F_TSO6;
967 } 964 }
965
966 if (arg & TUN_F_UFO)
967 feature_mask |= NETIF_F_UFO;
968 } 968 }
969 969
970 /* tun/tap driver inverts the usage for TSO offloads, where 970 /* tun/tap driver inverts the usage for TSO offloads, where
@@ -975,7 +975,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
975 * When user space turns off TSO, we turn off GSO/LRO so that 975 * When user space turns off TSO, we turn off GSO/LRO so that
976 * user-space will not receive TSO frames. 976 * user-space will not receive TSO frames.
977 */ 977 */
978 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6)) 978 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
979 features |= RX_OFFLOADS; 979 features |= RX_OFFLOADS;
980 else 980 else
981 features &= ~RX_OFFLOADS; 981 features &= ~RX_OFFLOADS;
@@ -1090,7 +1090,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
1090 case TUNSETOFFLOAD: 1090 case TUNSETOFFLOAD:
1091 /* let the user check for future flags */ 1091 /* let the user check for future flags */
1092 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | 1092 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
1093 TUN_F_TSO_ECN)) 1093 TUN_F_TSO_ECN | TUN_F_UFO))
1094 return -EINVAL; 1094 return -EINVAL;
1095 1095
1096 rtnl_lock(); 1096 rtnl_lock();
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
index 602c625d95d5..b5edc7f96a39 100644
--- a/drivers/net/ppp/ppp_deflate.c
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -246,7 +246,7 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
246 /* 246 /*
247 * See if we managed to reduce the size of the packet. 247 * See if we managed to reduce the size of the packet.
248 */ 248 */
249 if (olen < isize) { 249 if (olen < isize && olen <= osize) {
250 state->stats.comp_bytes += olen; 250 state->stats.comp_bytes += olen;
251 state->stats.comp_packets++; 251 state->stats.comp_packets++;
252 } else { 252 } else {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 3ff8cd7bf74d..ad7d3d5f3ee5 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -65,7 +65,6 @@
65#include <linux/nsproxy.h> 65#include <linux/nsproxy.h>
66#include <linux/virtio_net.h> 66#include <linux/virtio_net.h>
67#include <linux/rcupdate.h> 67#include <linux/rcupdate.h>
68#include <net/ipv6.h>
69#include <net/net_namespace.h> 68#include <net/net_namespace.h>
70#include <net/netns/generic.h> 69#include <net/netns/generic.h>
71#include <net/rtnetlink.h> 70#include <net/rtnetlink.h>
@@ -186,7 +185,7 @@ struct tun_struct {
186 struct net_device *dev; 185 struct net_device *dev;
187 netdev_features_t set_features; 186 netdev_features_t set_features;
188#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 187#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
189 NETIF_F_TSO6) 188 NETIF_F_TSO6|NETIF_F_UFO)
190 189
191 int vnet_hdr_sz; 190 int vnet_hdr_sz;
192 int sndbuf; 191 int sndbuf;
@@ -1166,8 +1165,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1166 break; 1165 break;
1167 } 1166 }
1168 1167
1169 skb_reset_network_header(skb);
1170
1171 if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { 1168 if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1172 pr_debug("GSO!\n"); 1169 pr_debug("GSO!\n");
1173 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 1170 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
@@ -1178,20 +1175,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1178 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 1175 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1179 break; 1176 break;
1180 case VIRTIO_NET_HDR_GSO_UDP: 1177 case VIRTIO_NET_HDR_GSO_UDP:
1181 {
1182 static bool warned;
1183
1184 if (!warned) {
1185 warned = true;
1186 netdev_warn(tun->dev,
1187 "%s: using disabled UFO feature; please fix this program\n",
1188 current->comm);
1189 }
1190 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 1178 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1191 if (skb->protocol == htons(ETH_P_IPV6))
1192 ipv6_proxy_select_ident(skb);
1193 break; 1179 break;
1194 }
1195 default: 1180 default:
1196 tun->dev->stats.rx_frame_errors++; 1181 tun->dev->stats.rx_frame_errors++;
1197 kfree_skb(skb); 1182 kfree_skb(skb);
@@ -1220,6 +1205,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1220 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1205 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1221 } 1206 }
1222 1207
1208 skb_reset_network_header(skb);
1223 skb_probe_transport_header(skb, 0); 1209 skb_probe_transport_header(skb, 0);
1224 1210
1225 rxhash = skb_get_hash(skb); 1211 rxhash = skb_get_hash(skb);
@@ -1297,6 +1283,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1297 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 1283 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1298 else if (sinfo->gso_type & SKB_GSO_TCPV6) 1284 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1299 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 1285 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1286 else if (sinfo->gso_type & SKB_GSO_UDP)
1287 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1300 else { 1288 else {
1301 pr_err("unexpected GSO type: " 1289 pr_err("unexpected GSO type: "
1302 "0x%x, gso_size %d, hdr_len %d\n", 1290 "0x%x, gso_size %d, hdr_len %d\n",
@@ -1752,6 +1740,11 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
1752 features |= NETIF_F_TSO6; 1740 features |= NETIF_F_TSO6;
1753 arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 1741 arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
1754 } 1742 }
1743
1744 if (arg & TUN_F_UFO) {
1745 features |= NETIF_F_UFO;
1746 arg &= ~TUN_F_UFO;
1747 }
1755 } 1748 }
1756 1749
1757 /* This gives the user a way to test for new features in future by 1750 /* This gives the user a way to test for new features in future by
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 99b69af14274..4a1e9c489f1f 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -77,7 +77,7 @@ static int wait_phy_eeprom_ready(struct usbnet *dev, int phy)
77 int ret; 77 int ret;
78 78
79 udelay(1); 79 udelay(1);
80 ret = sr_read_reg(dev, EPCR, &tmp); 80 ret = sr_read_reg(dev, SR_EPCR, &tmp);
81 if (ret < 0) 81 if (ret < 0)
82 return ret; 82 return ret;
83 83
@@ -98,15 +98,15 @@ static int sr_share_read_word(struct usbnet *dev, int phy, u8 reg,
98 98
99 mutex_lock(&dev->phy_mutex); 99 mutex_lock(&dev->phy_mutex);
100 100
101 sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); 101 sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
102 sr_write_reg(dev, EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR); 102 sr_write_reg(dev, SR_EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR);
103 103
104 ret = wait_phy_eeprom_ready(dev, phy); 104 ret = wait_phy_eeprom_ready(dev, phy);
105 if (ret < 0) 105 if (ret < 0)
106 goto out_unlock; 106 goto out_unlock;
107 107
108 sr_write_reg(dev, EPCR, 0x0); 108 sr_write_reg(dev, SR_EPCR, 0x0);
109 ret = sr_read(dev, EPDR, 2, value); 109 ret = sr_read(dev, SR_EPDR, 2, value);
110 110
111 netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n", 111 netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n",
112 phy, reg, *value, ret); 112 phy, reg, *value, ret);
@@ -123,19 +123,19 @@ static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg,
123 123
124 mutex_lock(&dev->phy_mutex); 124 mutex_lock(&dev->phy_mutex);
125 125
126 ret = sr_write(dev, EPDR, 2, &value); 126 ret = sr_write(dev, SR_EPDR, 2, &value);
127 if (ret < 0) 127 if (ret < 0)
128 goto out_unlock; 128 goto out_unlock;
129 129
130 sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); 130 sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
131 sr_write_reg(dev, EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) : 131 sr_write_reg(dev, SR_EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) :
132 (EPCR_WEP | EPCR_ERPRW)); 132 (EPCR_WEP | EPCR_ERPRW));
133 133
134 ret = wait_phy_eeprom_ready(dev, phy); 134 ret = wait_phy_eeprom_ready(dev, phy);
135 if (ret < 0) 135 if (ret < 0)
136 goto out_unlock; 136 goto out_unlock;
137 137
138 sr_write_reg(dev, EPCR, 0x0); 138 sr_write_reg(dev, SR_EPCR, 0x0);
139 139
140out_unlock: 140out_unlock:
141 mutex_unlock(&dev->phy_mutex); 141 mutex_unlock(&dev->phy_mutex);
@@ -188,7 +188,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
188 if (loc == MII_BMSR) { 188 if (loc == MII_BMSR) {
189 u8 value; 189 u8 value;
190 190
191 sr_read_reg(dev, NSR, &value); 191 sr_read_reg(dev, SR_NSR, &value);
192 if (value & NSR_LINKST) 192 if (value & NSR_LINKST)
193 rc = 1; 193 rc = 1;
194 } 194 }
@@ -228,7 +228,7 @@ static u32 sr9700_get_link(struct net_device *netdev)
228 int rc = 0; 228 int rc = 0;
229 229
230 /* Get the Link Status directly */ 230 /* Get the Link Status directly */
231 sr_read_reg(dev, NSR, &value); 231 sr_read_reg(dev, SR_NSR, &value);
232 if (value & NSR_LINKST) 232 if (value & NSR_LINKST)
233 rc = 1; 233 rc = 1;
234 234
@@ -281,8 +281,8 @@ static void sr9700_set_multicast(struct net_device *netdev)
281 } 281 }
282 } 282 }
283 283
284 sr_write_async(dev, MAR, SR_MCAST_SIZE, hashes); 284 sr_write_async(dev, SR_MAR, SR_MCAST_SIZE, hashes);
285 sr_write_reg_async(dev, RCR, rx_ctl); 285 sr_write_reg_async(dev, SR_RCR, rx_ctl);
286} 286}
287 287
288static int sr9700_set_mac_address(struct net_device *netdev, void *p) 288static int sr9700_set_mac_address(struct net_device *netdev, void *p)
@@ -297,7 +297,7 @@ static int sr9700_set_mac_address(struct net_device *netdev, void *p)
297 } 297 }
298 298
299 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 299 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
300 sr_write_async(dev, PAR, 6, netdev->dev_addr); 300 sr_write_async(dev, SR_PAR, 6, netdev->dev_addr);
301 301
302 return 0; 302 return 0;
303} 303}
@@ -340,7 +340,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
340 mii->phy_id_mask = 0x1f; 340 mii->phy_id_mask = 0x1f;
341 mii->reg_num_mask = 0x1f; 341 mii->reg_num_mask = 0x1f;
342 342
343 sr_write_reg(dev, NCR, NCR_RST); 343 sr_write_reg(dev, SR_NCR, NCR_RST);
344 udelay(20); 344 udelay(20);
345 345
346 /* read MAC 346 /* read MAC
@@ -348,17 +348,17 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
348 * EEPROM automatically to PAR. In case there is no EEPROM externally, 348 * EEPROM automatically to PAR. In case there is no EEPROM externally,
349 * a default MAC address is stored in PAR for making chip work properly. 349 * a default MAC address is stored in PAR for making chip work properly.
350 */ 350 */
351 if (sr_read(dev, PAR, ETH_ALEN, netdev->dev_addr) < 0) { 351 if (sr_read(dev, SR_PAR, ETH_ALEN, netdev->dev_addr) < 0) {
352 netdev_err(netdev, "Error reading MAC address\n"); 352 netdev_err(netdev, "Error reading MAC address\n");
353 ret = -ENODEV; 353 ret = -ENODEV;
354 goto out; 354 goto out;
355 } 355 }
356 356
357 /* power up and reset phy */ 357 /* power up and reset phy */
358 sr_write_reg(dev, PRR, PRR_PHY_RST); 358 sr_write_reg(dev, SR_PRR, PRR_PHY_RST);
359 /* at least 10ms, here 20ms for safe */ 359 /* at least 10ms, here 20ms for safe */
360 mdelay(20); 360 mdelay(20);
361 sr_write_reg(dev, PRR, 0); 361 sr_write_reg(dev, SR_PRR, 0);
362 /* at least 1ms, here 2ms for reading right register */ 362 /* at least 1ms, here 2ms for reading right register */
363 udelay(2 * 1000); 363 udelay(2 * 1000);
364 364
diff --git a/drivers/net/usb/sr9700.h b/drivers/net/usb/sr9700.h
index fd687c575e74..258b030277e7 100644
--- a/drivers/net/usb/sr9700.h
+++ b/drivers/net/usb/sr9700.h
@@ -14,13 +14,13 @@
14/* sr9700 spec. register table on Linux platform */ 14/* sr9700 spec. register table on Linux platform */
15 15
16/* Network Control Reg */ 16/* Network Control Reg */
17#define NCR 0x00 17#define SR_NCR 0x00
18#define NCR_RST (1 << 0) 18#define NCR_RST (1 << 0)
19#define NCR_LBK (3 << 1) 19#define NCR_LBK (3 << 1)
20#define NCR_FDX (1 << 3) 20#define NCR_FDX (1 << 3)
21#define NCR_WAKEEN (1 << 6) 21#define NCR_WAKEEN (1 << 6)
22/* Network Status Reg */ 22/* Network Status Reg */
23#define NSR 0x01 23#define SR_NSR 0x01
24#define NSR_RXRDY (1 << 0) 24#define NSR_RXRDY (1 << 0)
25#define NSR_RXOV (1 << 1) 25#define NSR_RXOV (1 << 1)
26#define NSR_TX1END (1 << 2) 26#define NSR_TX1END (1 << 2)
@@ -30,7 +30,7 @@
30#define NSR_LINKST (1 << 6) 30#define NSR_LINKST (1 << 6)
31#define NSR_SPEED (1 << 7) 31#define NSR_SPEED (1 << 7)
32/* Tx Control Reg */ 32/* Tx Control Reg */
33#define TCR 0x02 33#define SR_TCR 0x02
34#define TCR_CRC_DIS (1 << 1) 34#define TCR_CRC_DIS (1 << 1)
35#define TCR_PAD_DIS (1 << 2) 35#define TCR_PAD_DIS (1 << 2)
36#define TCR_LC_CARE (1 << 3) 36#define TCR_LC_CARE (1 << 3)
@@ -38,7 +38,7 @@
38#define TCR_EXCECM (1 << 5) 38#define TCR_EXCECM (1 << 5)
39#define TCR_LF_EN (1 << 6) 39#define TCR_LF_EN (1 << 6)
40/* Tx Status Reg for Packet Index 1 */ 40/* Tx Status Reg for Packet Index 1 */
41#define TSR1 0x03 41#define SR_TSR1 0x03
42#define TSR1_EC (1 << 2) 42#define TSR1_EC (1 << 2)
43#define TSR1_COL (1 << 3) 43#define TSR1_COL (1 << 3)
44#define TSR1_LC (1 << 4) 44#define TSR1_LC (1 << 4)
@@ -46,7 +46,7 @@
46#define TSR1_LOC (1 << 6) 46#define TSR1_LOC (1 << 6)
47#define TSR1_TLF (1 << 7) 47#define TSR1_TLF (1 << 7)
48/* Tx Status Reg for Packet Index 2 */ 48/* Tx Status Reg for Packet Index 2 */
49#define TSR2 0x04 49#define SR_TSR2 0x04
50#define TSR2_EC (1 << 2) 50#define TSR2_EC (1 << 2)
51#define TSR2_COL (1 << 3) 51#define TSR2_COL (1 << 3)
52#define TSR2_LC (1 << 4) 52#define TSR2_LC (1 << 4)
@@ -54,7 +54,7 @@
54#define TSR2_LOC (1 << 6) 54#define TSR2_LOC (1 << 6)
55#define TSR2_TLF (1 << 7) 55#define TSR2_TLF (1 << 7)
56/* Rx Control Reg*/ 56/* Rx Control Reg*/
57#define RCR 0x05 57#define SR_RCR 0x05
58#define RCR_RXEN (1 << 0) 58#define RCR_RXEN (1 << 0)
59#define RCR_PRMSC (1 << 1) 59#define RCR_PRMSC (1 << 1)
60#define RCR_RUNT (1 << 2) 60#define RCR_RUNT (1 << 2)
@@ -62,87 +62,87 @@
62#define RCR_DIS_CRC (1 << 4) 62#define RCR_DIS_CRC (1 << 4)
63#define RCR_DIS_LONG (1 << 5) 63#define RCR_DIS_LONG (1 << 5)
64/* Rx Status Reg */ 64/* Rx Status Reg */
65#define RSR 0x06 65#define SR_RSR 0x06
66#define RSR_AE (1 << 2) 66#define RSR_AE (1 << 2)
67#define RSR_MF (1 << 6) 67#define RSR_MF (1 << 6)
68#define RSR_RF (1 << 7) 68#define RSR_RF (1 << 7)
69/* Rx Overflow Counter Reg */ 69/* Rx Overflow Counter Reg */
70#define ROCR 0x07 70#define SR_ROCR 0x07
71#define ROCR_ROC (0x7F << 0) 71#define ROCR_ROC (0x7F << 0)
72#define ROCR_RXFU (1 << 7) 72#define ROCR_RXFU (1 << 7)
73/* Back Pressure Threshold Reg */ 73/* Back Pressure Threshold Reg */
74#define BPTR 0x08 74#define SR_BPTR 0x08
75#define BPTR_JPT (0x0F << 0) 75#define BPTR_JPT (0x0F << 0)
76#define BPTR_BPHW (0x0F << 4) 76#define BPTR_BPHW (0x0F << 4)
77/* Flow Control Threshold Reg */ 77/* Flow Control Threshold Reg */
78#define FCTR 0x09 78#define SR_FCTR 0x09
79#define FCTR_LWOT (0x0F << 0) 79#define FCTR_LWOT (0x0F << 0)
80#define FCTR_HWOT (0x0F << 4) 80#define FCTR_HWOT (0x0F << 4)
81/* rx/tx Flow Control Reg */ 81/* rx/tx Flow Control Reg */
82#define FCR 0x0A 82#define SR_FCR 0x0A
83#define FCR_FLCE (1 << 0) 83#define FCR_FLCE (1 << 0)
84#define FCR_BKPA (1 << 4) 84#define FCR_BKPA (1 << 4)
85#define FCR_TXPEN (1 << 5) 85#define FCR_TXPEN (1 << 5)
86#define FCR_TXPF (1 << 6) 86#define FCR_TXPF (1 << 6)
87#define FCR_TXP0 (1 << 7) 87#define FCR_TXP0 (1 << 7)
88/* Eeprom & Phy Control Reg */ 88/* Eeprom & Phy Control Reg */
89#define EPCR 0x0B 89#define SR_EPCR 0x0B
90#define EPCR_ERRE (1 << 0) 90#define EPCR_ERRE (1 << 0)
91#define EPCR_ERPRW (1 << 1) 91#define EPCR_ERPRW (1 << 1)
92#define EPCR_ERPRR (1 << 2) 92#define EPCR_ERPRR (1 << 2)
93#define EPCR_EPOS (1 << 3) 93#define EPCR_EPOS (1 << 3)
94#define EPCR_WEP (1 << 4) 94#define EPCR_WEP (1 << 4)
95/* Eeprom & Phy Address Reg */ 95/* Eeprom & Phy Address Reg */
96#define EPAR 0x0C 96#define SR_EPAR 0x0C
97#define EPAR_EROA (0x3F << 0) 97#define EPAR_EROA (0x3F << 0)
98#define EPAR_PHY_ADR_MASK (0x03 << 6) 98#define EPAR_PHY_ADR_MASK (0x03 << 6)
99#define EPAR_PHY_ADR (0x01 << 6) 99#define EPAR_PHY_ADR (0x01 << 6)
100/* Eeprom & Phy Data Reg */ 100/* Eeprom & Phy Data Reg */
101#define EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */ 101#define SR_EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */
102/* Wakeup Control Reg */ 102/* Wakeup Control Reg */
103#define WCR 0x0F 103#define SR_WCR 0x0F
104#define WCR_MAGICST (1 << 0) 104#define WCR_MAGICST (1 << 0)
105#define WCR_LINKST (1 << 2) 105#define WCR_LINKST (1 << 2)
106#define WCR_MAGICEN (1 << 3) 106#define WCR_MAGICEN (1 << 3)
107#define WCR_LINKEN (1 << 5) 107#define WCR_LINKEN (1 << 5)
108/* Physical Address Reg */ 108/* Physical Address Reg */
109#define PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */ 109#define SR_PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */
110/* Multicast Address Reg */ 110/* Multicast Address Reg */
111#define MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */ 111#define SR_MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */
112/* 0x1e unused */ 112/* 0x1e unused */
113/* Phy Reset Reg */ 113/* Phy Reset Reg */
114#define PRR 0x1F 114#define SR_PRR 0x1F
115#define PRR_PHY_RST (1 << 0) 115#define PRR_PHY_RST (1 << 0)
116/* Tx sdram Write Pointer Address Low */ 116/* Tx sdram Write Pointer Address Low */
117#define TWPAL 0x20 117#define SR_TWPAL 0x20
118/* Tx sdram Write Pointer Address High */ 118/* Tx sdram Write Pointer Address High */
119#define TWPAH 0x21 119#define SR_TWPAH 0x21
120/* Tx sdram Read Pointer Address Low */ 120/* Tx sdram Read Pointer Address Low */
121#define TRPAL 0x22 121#define SR_TRPAL 0x22
122/* Tx sdram Read Pointer Address High */ 122/* Tx sdram Read Pointer Address High */
123#define TRPAH 0x23 123#define SR_TRPAH 0x23
124/* Rx sdram Write Pointer Address Low */ 124/* Rx sdram Write Pointer Address Low */
125#define RWPAL 0x24 125#define SR_RWPAL 0x24
126/* Rx sdram Write Pointer Address High */ 126/* Rx sdram Write Pointer Address High */
127#define RWPAH 0x25 127#define SR_RWPAH 0x25
128/* Rx sdram Read Pointer Address Low */ 128/* Rx sdram Read Pointer Address Low */
129#define RRPAL 0x26 129#define SR_RRPAL 0x26
130/* Rx sdram Read Pointer Address High */ 130/* Rx sdram Read Pointer Address High */
131#define RRPAH 0x27 131#define SR_RRPAH 0x27
132/* Vendor ID register */ 132/* Vendor ID register */
133#define VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */ 133#define SR_VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */
134/* Product ID register */ 134/* Product ID register */
135#define PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */ 135#define SR_PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */
136/* CHIP Revision register */ 136/* CHIP Revision register */
137#define CHIPR 0x2C 137#define SR_CHIPR 0x2C
138/* 0x2D --> 0xEF unused */ 138/* 0x2D --> 0xEF unused */
139/* USB Device Address */ 139/* USB Device Address */
140#define USBDA 0xF0 140#define SR_USBDA 0xF0
141#define USBDA_USBFA (0x7F << 0) 141#define USBDA_USBFA (0x7F << 0)
142/* RX packet Counter Reg */ 142/* RX packet Counter Reg */
143#define RXC 0xF1 143#define SR_RXC 0xF1
144/* Tx packet Counter & USB Status Reg */ 144/* Tx packet Counter & USB Status Reg */
145#define TXC_USBS 0xF2 145#define SR_TXC_USBS 0xF2
146#define TXC_USBS_TXC0 (1 << 0) 146#define TXC_USBS_TXC0 (1 << 0)
147#define TXC_USBS_TXC1 (1 << 1) 147#define TXC_USBS_TXC1 (1 << 1)
148#define TXC_USBS_TXC2 (1 << 2) 148#define TXC_USBS_TXC2 (1 << 2)
@@ -150,7 +150,7 @@
150#define TXC_USBS_SUSFLAG (1 << 6) 150#define TXC_USBS_SUSFLAG (1 << 6)
151#define TXC_USBS_RXFAULT (1 << 7) 151#define TXC_USBS_RXFAULT (1 << 7)
152/* USB Control register */ 152/* USB Control register */
153#define USBC 0xF4 153#define SR_USBC 0xF4
154#define USBC_EP3NAK (1 << 4) 154#define USBC_EP3NAK (1 << 4)
155#define USBC_EP3ACK (1 << 5) 155#define USBC_EP3ACK (1 << 5)
156 156
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9bd71d53c5e0..110a2cf67244 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -490,17 +490,8 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
490 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 490 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
491 break; 491 break;
492 case VIRTIO_NET_HDR_GSO_UDP: 492 case VIRTIO_NET_HDR_GSO_UDP:
493 {
494 static bool warned;
495
496 if (!warned) {
497 warned = true;
498 netdev_warn(dev,
499 "host using disabled UFO feature; please fix it\n");
500 }
501 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 493 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
502 break; 494 break;
503 }
504 case VIRTIO_NET_HDR_GSO_TCPV6: 495 case VIRTIO_NET_HDR_GSO_TCPV6:
505 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 496 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
506 break; 497 break;
@@ -888,6 +879,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
888 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 879 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
889 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 880 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
890 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 881 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
882 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
883 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
891 else 884 else
892 BUG(); 885 BUG();
893 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) 886 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
@@ -1752,7 +1745,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1752 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1745 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1753 1746
1754 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 1747 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1755 dev->hw_features |= NETIF_F_TSO 1748 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
1756 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 1749 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
1757 } 1750 }
1758 /* Individual feature bits: what can host handle? */ 1751 /* Individual feature bits: what can host handle? */
@@ -1762,11 +1755,13 @@ static int virtnet_probe(struct virtio_device *vdev)
1762 dev->hw_features |= NETIF_F_TSO6; 1755 dev->hw_features |= NETIF_F_TSO6;
1763 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 1756 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
1764 dev->hw_features |= NETIF_F_TSO_ECN; 1757 dev->hw_features |= NETIF_F_TSO_ECN;
1758 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
1759 dev->hw_features |= NETIF_F_UFO;
1765 1760
1766 dev->features |= NETIF_F_GSO_ROBUST; 1761 dev->features |= NETIF_F_GSO_ROBUST;
1767 1762
1768 if (gso) 1763 if (gso)
1769 dev->features |= dev->hw_features & NETIF_F_ALL_TSO; 1764 dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
1770 /* (!csum && gso) case will be fixed by register_netdev() */ 1765 /* (!csum && gso) case will be fixed by register_netdev() */
1771 } 1766 }
1772 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) 1767 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
@@ -1804,7 +1799,8 @@ static int virtnet_probe(struct virtio_device *vdev)
1804 /* If we can receive ANY GSO packets, we must allocate large ones. */ 1799 /* If we can receive ANY GSO packets, we must allocate large ones. */
1805 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 1800 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1806 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || 1801 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1807 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) 1802 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
1803 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
1808 vi->big_packets = true; 1804 vi->big_packets = true;
1809 1805
1810 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 1806 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
@@ -2000,9 +1996,9 @@ static struct virtio_device_id id_table[] = {
2000static unsigned int features[] = { 1996static unsigned int features[] = {
2001 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, 1997 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
2002 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 1998 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
2003 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6, 1999 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
2004 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, 2000 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
2005 VIRTIO_NET_F_GUEST_ECN, 2001 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
2006 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 2002 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
2007 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 2003 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
2008 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, 2004 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index d08072c10aa9..e6ed3e66964d 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2625,10 +2625,10 @@ static void vxlan_sock_work(struct work_struct *work)
2625 dev_put(vxlan->dev); 2625 dev_put(vxlan->dev);
2626} 2626}
2627 2627
2628static int vxlan_newlink(struct net *net, struct net_device *dev, 2628static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2629 struct nlattr *tb[], struct nlattr *data[]) 2629 struct nlattr *tb[], struct nlattr *data[])
2630{ 2630{
2631 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2631 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
2632 struct vxlan_dev *vxlan = netdev_priv(dev); 2632 struct vxlan_dev *vxlan = netdev_priv(dev);
2633 struct vxlan_rdst *dst = &vxlan->default_dst; 2633 struct vxlan_rdst *dst = &vxlan->default_dst;
2634 __u32 vni; 2634 __u32 vni;
@@ -2638,7 +2638,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2638 if (!data[IFLA_VXLAN_ID]) 2638 if (!data[IFLA_VXLAN_ID])
2639 return -EINVAL; 2639 return -EINVAL;
2640 2640
2641 vxlan->net = dev_net(dev); 2641 vxlan->net = src_net;
2642 2642
2643 vni = nla_get_u32(data[IFLA_VXLAN_ID]); 2643 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
2644 dst->remote_vni = vni; 2644 dst->remote_vni = vni;
@@ -2674,7 +2674,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2674 if (data[IFLA_VXLAN_LINK] && 2674 if (data[IFLA_VXLAN_LINK] &&
2675 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) { 2675 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
2676 struct net_device *lowerdev 2676 struct net_device *lowerdev
2677 = __dev_get_by_index(net, dst->remote_ifindex); 2677 = __dev_get_by_index(src_net, dst->remote_ifindex);
2678 2678
2679 if (!lowerdev) { 2679 if (!lowerdev) {
2680 pr_info("ifindex %d does not exist\n", dst->remote_ifindex); 2680 pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
@@ -2761,7 +2761,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2761 if (data[IFLA_VXLAN_GBP]) 2761 if (data[IFLA_VXLAN_GBP])
2762 vxlan->flags |= VXLAN_F_GBP; 2762 vxlan->flags |= VXLAN_F_GBP;
2763 2763
2764 if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET, 2764 if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
2765 vxlan->dst_port, vxlan->flags)) { 2765 vxlan->dst_port, vxlan->flags)) {
2766 pr_info("duplicate VNI %u\n", vni); 2766 pr_info("duplicate VNI %u\n", vni);
2767 return -EEXIST; 2767 return -EEXIST;
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 94e234975c61..a2fdd15f285a 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -25,7 +25,7 @@ if WAN
25# There is no way to detect a comtrol sv11 - force it modular for now. 25# There is no way to detect a comtrol sv11 - force it modular for now.
26config HOSTESS_SV11 26config HOSTESS_SV11
27 tristate "Comtrol Hostess SV-11 support" 27 tristate "Comtrol Hostess SV-11 support"
28 depends on ISA && m && ISA_DMA_API && INET && HDLC 28 depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
29 help 29 help
30 Driver for Comtrol Hostess SV-11 network card which 30 Driver for Comtrol Hostess SV-11 network card which
31 operates on low speed synchronous serial links at up to 31 operates on low speed synchronous serial links at up to
@@ -37,7 +37,7 @@ config HOSTESS_SV11
37# The COSA/SRP driver has not been tested as non-modular yet. 37# The COSA/SRP driver has not been tested as non-modular yet.
38config COSA 38config COSA
39 tristate "COSA/SRP sync serial boards support" 39 tristate "COSA/SRP sync serial boards support"
40 depends on ISA && m && ISA_DMA_API && HDLC 40 depends on ISA && m && ISA_DMA_API && HDLC && VIRT_TO_BUS
41 ---help--- 41 ---help---
42 Driver for COSA and SRP synchronous serial boards. 42 Driver for COSA and SRP synchronous serial boards.
43 43
@@ -87,7 +87,7 @@ config LANMEDIA
87# There is no way to detect a Sealevel board. Force it modular 87# There is no way to detect a Sealevel board. Force it modular
88config SEALEVEL_4021 88config SEALEVEL_4021
89 tristate "Sealevel Systems 4021 support" 89 tristate "Sealevel Systems 4021 support"
90 depends on ISA && m && ISA_DMA_API && INET && HDLC 90 depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
91 help 91 help
92 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. 92 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
93 93
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 9259a732e8a4..037f74f0fcf6 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -578,6 +578,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
578 goto err_rx_unbind; 578 goto err_rx_unbind;
579 } 579 }
580 queue->task = task; 580 queue->task = task;
581 get_task_struct(task);
581 582
582 task = kthread_create(xenvif_dealloc_kthread, 583 task = kthread_create(xenvif_dealloc_kthread,
583 (void *)queue, "%s-dealloc", queue->name); 584 (void *)queue, "%s-dealloc", queue->name);
@@ -634,6 +635,7 @@ void xenvif_disconnect(struct xenvif *vif)
634 635
635 if (queue->task) { 636 if (queue->task) {
636 kthread_stop(queue->task); 637 kthread_stop(queue->task);
638 put_task_struct(queue->task);
637 queue->task = NULL; 639 queue->task = NULL;
638 } 640 }
639 641
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 49322b6c32df..13899d5099e5 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -2008,8 +2008,7 @@ int xenvif_kthread_guest_rx(void *data)
2008 */ 2008 */
2009 if (unlikely(vif->disabled && queue->id == 0)) { 2009 if (unlikely(vif->disabled && queue->id == 0)) {
2010 xenvif_carrier_off(vif); 2010 xenvif_carrier_off(vif);
2011 xenvif_rx_queue_purge(queue); 2011 break;
2012 continue;
2013 } 2012 }
2014 2013
2015 if (!skb_queue_empty(&queue->rx_queue)) 2014 if (!skb_queue_empty(&queue->rx_queue))
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index df781cdf13c1..17ca98657a28 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -283,6 +283,9 @@ static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
283 struct msi_msg msg; 283 struct msi_msg msg;
284 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata); 284 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
285 285
286 if (desc->msi_attrib.is_msix)
287 return -EINVAL;
288
286 irq = assign_irq(1, desc, &pos); 289 irq = assign_irq(1, desc, &pos);
287 if (irq < 0) 290 if (irq < 0)
288 return irq; 291 return irq;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e52356aa09b8..903d5078b5ed 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -324,18 +324,52 @@ static void quirk_s3_64M(struct pci_dev *dev)
324DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); 324DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
325DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); 325DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
326 326
327static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
328 const char *name)
329{
330 u32 region;
331 struct pci_bus_region bus_region;
332 struct resource *res = dev->resource + pos;
333
334 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region);
335
336 if (!region)
337 return;
338
339 res->name = pci_name(dev);
340 res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
341 res->flags |=
342 (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
343 region &= ~(size - 1);
344
345 /* Convert from PCI bus to resource space */
346 bus_region.start = region;
347 bus_region.end = region + size - 1;
348 pcibios_bus_to_resource(dev->bus, res, &bus_region);
349
350 dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
351 name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
352}
353
327/* 354/*
328 * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS 355 * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
329 * ver. 1.33 20070103) don't set the correct ISA PCI region header info. 356 * ver. 1.33 20070103) don't set the correct ISA PCI region header info.
330 * BAR0 should be 8 bytes; instead, it may be set to something like 8k 357 * BAR0 should be 8 bytes; instead, it may be set to something like 8k
331 * (which conflicts w/ BAR1's memory range). 358 * (which conflicts w/ BAR1's memory range).
359 *
360 * CS553x's ISA PCI BARs may also be read-only (ref:
361 * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward).
332 */ 362 */
333static void quirk_cs5536_vsa(struct pci_dev *dev) 363static void quirk_cs5536_vsa(struct pci_dev *dev)
334{ 364{
365 static char *name = "CS5536 ISA bridge";
366
335 if (pci_resource_len(dev, 0) != 8) { 367 if (pci_resource_len(dev, 0) != 8) {
336 struct resource *res = &dev->resource[0]; 368 quirk_io(dev, 0, 8, name); /* SMB */
337 res->end = res->start + 8 - 1; 369 quirk_io(dev, 1, 256, name); /* GPIO */
338 dev_info(&dev->dev, "CS5536 ISA bridge bug detected (incorrect header); workaround applied\n"); 370 quirk_io(dev, 2, 64, name); /* MFGPT */
371 dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n",
372 name);
339 } 373 }
340} 374}
341DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); 375DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index dfd021e8268f..f4cd0b9b2438 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -177,7 +177,7 @@ struct at91_pinctrl {
177 struct device *dev; 177 struct device *dev;
178 struct pinctrl_dev *pctl; 178 struct pinctrl_dev *pctl;
179 179
180 int nbanks; 180 int nactive_banks;
181 181
182 uint32_t *mux_mask; 182 uint32_t *mux_mask;
183 int nmux; 183 int nmux;
@@ -653,12 +653,18 @@ static int pin_check_config(struct at91_pinctrl *info, const char *name,
653 int mux; 653 int mux;
654 654
655 /* check if it's a valid config */ 655 /* check if it's a valid config */
656 if (pin->bank >= info->nbanks) { 656 if (pin->bank >= gpio_banks) {
657 dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n", 657 dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n",
658 name, index, pin->bank, info->nbanks); 658 name, index, pin->bank, gpio_banks);
659 return -EINVAL; 659 return -EINVAL;
660 } 660 }
661 661
662 if (!gpio_chips[pin->bank]) {
663 dev_err(info->dev, "%s: pin conf %d bank_id %d not enabled\n",
664 name, index, pin->bank);
665 return -ENXIO;
666 }
667
662 if (pin->pin >= MAX_NB_GPIO_PER_BANK) { 668 if (pin->pin >= MAX_NB_GPIO_PER_BANK) {
663 dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n", 669 dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n",
664 name, index, pin->pin, MAX_NB_GPIO_PER_BANK); 670 name, index, pin->pin, MAX_NB_GPIO_PER_BANK);
@@ -981,7 +987,8 @@ static void at91_pinctrl_child_count(struct at91_pinctrl *info,
981 987
982 for_each_child_of_node(np, child) { 988 for_each_child_of_node(np, child) {
983 if (of_device_is_compatible(child, gpio_compat)) { 989 if (of_device_is_compatible(child, gpio_compat)) {
984 info->nbanks++; 990 if (of_device_is_available(child))
991 info->nactive_banks++;
985 } else { 992 } else {
986 info->nfunctions++; 993 info->nfunctions++;
987 info->ngroups += of_get_child_count(child); 994 info->ngroups += of_get_child_count(child);
@@ -1003,11 +1010,11 @@ static int at91_pinctrl_mux_mask(struct at91_pinctrl *info,
1003 } 1010 }
1004 1011
1005 size /= sizeof(*list); 1012 size /= sizeof(*list);
1006 if (!size || size % info->nbanks) { 1013 if (!size || size % gpio_banks) {
1007 dev_err(info->dev, "wrong mux mask array should be by %d\n", info->nbanks); 1014 dev_err(info->dev, "wrong mux mask array should be by %d\n", gpio_banks);
1008 return -EINVAL; 1015 return -EINVAL;
1009 } 1016 }
1010 info->nmux = size / info->nbanks; 1017 info->nmux = size / gpio_banks;
1011 1018
1012 info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL); 1019 info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL);
1013 if (!info->mux_mask) { 1020 if (!info->mux_mask) {
@@ -1131,7 +1138,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
1131 of_match_device(at91_pinctrl_of_match, &pdev->dev)->data; 1138 of_match_device(at91_pinctrl_of_match, &pdev->dev)->data;
1132 at91_pinctrl_child_count(info, np); 1139 at91_pinctrl_child_count(info, np);
1133 1140
1134 if (info->nbanks < 1) { 1141 if (gpio_banks < 1) {
1135 dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n"); 1142 dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n");
1136 return -EINVAL; 1143 return -EINVAL;
1137 } 1144 }
@@ -1144,7 +1151,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
1144 1151
1145 dev_dbg(&pdev->dev, "mux-mask\n"); 1152 dev_dbg(&pdev->dev, "mux-mask\n");
1146 tmp = info->mux_mask; 1153 tmp = info->mux_mask;
1147 for (i = 0; i < info->nbanks; i++) { 1154 for (i = 0; i < gpio_banks; i++) {
1148 for (j = 0; j < info->nmux; j++, tmp++) { 1155 for (j = 0; j < info->nmux; j++, tmp++) {
1149 dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]); 1156 dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]);
1150 } 1157 }
@@ -1162,7 +1169,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
1162 if (!info->groups) 1169 if (!info->groups)
1163 return -ENOMEM; 1170 return -ENOMEM;
1164 1171
1165 dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks); 1172 dev_dbg(&pdev->dev, "nbanks = %d\n", gpio_banks);
1166 dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions); 1173 dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions);
1167 dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups); 1174 dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups);
1168 1175
@@ -1185,7 +1192,7 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
1185{ 1192{
1186 struct at91_pinctrl *info; 1193 struct at91_pinctrl *info;
1187 struct pinctrl_pin_desc *pdesc; 1194 struct pinctrl_pin_desc *pdesc;
1188 int ret, i, j, k; 1195 int ret, i, j, k, ngpio_chips_enabled = 0;
1189 1196
1190 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); 1197 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
1191 if (!info) 1198 if (!info)
@@ -1200,23 +1207,27 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
1200 * to obtain references to the struct gpio_chip * for them, and we 1207 * to obtain references to the struct gpio_chip * for them, and we
1201 * need this to proceed. 1208 * need this to proceed.
1202 */ 1209 */
1203 for (i = 0; i < info->nbanks; i++) { 1210 for (i = 0; i < gpio_banks; i++)
1204 if (!gpio_chips[i]) { 1211 if (gpio_chips[i])
1205 dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i); 1212 ngpio_chips_enabled++;
1206 devm_kfree(&pdev->dev, info); 1213
1207 return -EPROBE_DEFER; 1214 if (ngpio_chips_enabled < info->nactive_banks) {
1208 } 1215 dev_warn(&pdev->dev,
1216 "All GPIO chips are not registered yet (%d/%d)\n",
1217 ngpio_chips_enabled, info->nactive_banks);
1218 devm_kfree(&pdev->dev, info);
1219 return -EPROBE_DEFER;
1209 } 1220 }
1210 1221
1211 at91_pinctrl_desc.name = dev_name(&pdev->dev); 1222 at91_pinctrl_desc.name = dev_name(&pdev->dev);
1212 at91_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK; 1223 at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK;
1213 at91_pinctrl_desc.pins = pdesc = 1224 at91_pinctrl_desc.pins = pdesc =
1214 devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL); 1225 devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL);
1215 1226
1216 if (!at91_pinctrl_desc.pins) 1227 if (!at91_pinctrl_desc.pins)
1217 return -ENOMEM; 1228 return -ENOMEM;
1218 1229
1219 for (i = 0 , k = 0; i < info->nbanks; i++) { 1230 for (i = 0, k = 0; i < gpio_banks; i++) {
1220 for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) { 1231 for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) {
1221 pdesc->number = k; 1232 pdesc->number = k;
1222 pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j); 1233 pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j);
@@ -1234,8 +1245,9 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
1234 } 1245 }
1235 1246
1236 /* We will handle a range of GPIO pins */ 1247 /* We will handle a range of GPIO pins */
1237 for (i = 0; i < info->nbanks; i++) 1248 for (i = 0; i < gpio_banks; i++)
1238 pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range); 1249 if (gpio_chips[i])
1250 pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
1239 1251
1240 dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n"); 1252 dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n");
1241 1253
@@ -1613,9 +1625,10 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
1613static int at91_gpio_of_irq_setup(struct platform_device *pdev, 1625static int at91_gpio_of_irq_setup(struct platform_device *pdev,
1614 struct at91_gpio_chip *at91_gpio) 1626 struct at91_gpio_chip *at91_gpio)
1615{ 1627{
1628 struct gpio_chip *gpiochip_prev = NULL;
1616 struct at91_gpio_chip *prev = NULL; 1629 struct at91_gpio_chip *prev = NULL;
1617 struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq); 1630 struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq);
1618 int ret; 1631 int ret, i;
1619 1632
1620 at91_gpio->pioc_hwirq = irqd_to_hwirq(d); 1633 at91_gpio->pioc_hwirq = irqd_to_hwirq(d);
1621 1634
@@ -1641,24 +1654,33 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
1641 return ret; 1654 return ret;
1642 } 1655 }
1643 1656
1644 /* Setup chained handler */
1645 if (at91_gpio->pioc_idx)
1646 prev = gpio_chips[at91_gpio->pioc_idx - 1];
1647
1648 /* The top level handler handles one bank of GPIOs, except 1657 /* The top level handler handles one bank of GPIOs, except
1649 * on some SoC it can handle up to three... 1658 * on some SoC it can handle up to three...
1650 * We only set up the handler for the first of the list. 1659 * We only set up the handler for the first of the list.
1651 */ 1660 */
1652 if (prev && prev->next == at91_gpio) 1661 gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq);
1662 if (!gpiochip_prev) {
1663 /* Then register the chain on the parent IRQ */
1664 gpiochip_set_chained_irqchip(&at91_gpio->chip,
1665 &gpio_irqchip,
1666 at91_gpio->pioc_virq,
1667 gpio_irq_handler);
1653 return 0; 1668 return 0;
1669 }
1654 1670
1655 /* Then register the chain on the parent IRQ */ 1671 prev = container_of(gpiochip_prev, struct at91_gpio_chip, chip);
1656 gpiochip_set_chained_irqchip(&at91_gpio->chip,
1657 &gpio_irqchip,
1658 at91_gpio->pioc_virq,
1659 gpio_irq_handler);
1660 1672
1661 return 0; 1673 /* we can only have 2 banks before */
1674 for (i = 0; i < 2; i++) {
1675 if (prev->next) {
1676 prev = prev->next;
1677 } else {
1678 prev->next = at91_gpio;
1679 return 0;
1680 }
1681 }
1682
1683 return -EINVAL;
1662} 1684}
1663 1685
1664/* This structure is replicated for each GPIO block allocated at probe time */ 1686/* This structure is replicated for each GPIO block allocated at probe time */
@@ -1675,24 +1697,6 @@ static struct gpio_chip at91_gpio_template = {
1675 .ngpio = MAX_NB_GPIO_PER_BANK, 1697 .ngpio = MAX_NB_GPIO_PER_BANK,
1676}; 1698};
1677 1699
1678static void at91_gpio_probe_fixup(void)
1679{
1680 unsigned i;
1681 struct at91_gpio_chip *at91_gpio, *last = NULL;
1682
1683 for (i = 0; i < gpio_banks; i++) {
1684 at91_gpio = gpio_chips[i];
1685
1686 /*
1687 * GPIO controller are grouped on some SoC:
1688 * PIOC, PIOD and PIOE can share the same IRQ line
1689 */
1690 if (last && last->pioc_virq == at91_gpio->pioc_virq)
1691 last->next = at91_gpio;
1692 last = at91_gpio;
1693 }
1694}
1695
1696static struct of_device_id at91_gpio_of_match[] = { 1700static struct of_device_id at91_gpio_of_match[] = {
1697 { .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, }, 1701 { .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, },
1698 { .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops }, 1702 { .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops },
@@ -1805,8 +1809,6 @@ static int at91_gpio_probe(struct platform_device *pdev)
1805 gpio_chips[alias_idx] = at91_chip; 1809 gpio_chips[alias_idx] = at91_chip;
1806 gpio_banks = max(gpio_banks, alias_idx + 1); 1810 gpio_banks = max(gpio_banks, alias_idx + 1);
1807 1811
1808 at91_gpio_probe_fixup();
1809
1810 ret = at91_gpio_of_irq_setup(pdev, at91_chip); 1812 ret = at91_gpio_of_irq_setup(pdev, at91_chip);
1811 if (ret) 1813 if (ret)
1812 goto irq_setup_err; 1814 goto irq_setup_err;
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index 1dba62c5cf6a..1efebc9eedfb 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -136,11 +136,12 @@ static void __detach_handler (struct kref *kref)
136 struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh; 136 struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh;
137 struct scsi_device *sdev = scsi_dh_data->sdev; 137 struct scsi_device *sdev = scsi_dh_data->sdev;
138 138
139 scsi_dh->detach(sdev);
140
139 spin_lock_irq(sdev->request_queue->queue_lock); 141 spin_lock_irq(sdev->request_queue->queue_lock);
140 sdev->scsi_dh_data = NULL; 142 sdev->scsi_dh_data = NULL;
141 spin_unlock_irq(sdev->request_queue->queue_lock); 143 spin_unlock_irq(sdev->request_queue->queue_lock);
142 144
143 scsi_dh->detach(sdev);
144 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name); 145 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name);
145 module_put(scsi_dh->module); 146 module_put(scsi_dh->module);
146} 147}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 399516925d80..05ea0d49a3a3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2800,9 +2800,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
2800 */ 2800 */
2801 sd_set_flush_flag(sdkp); 2801 sd_set_flush_flag(sdkp);
2802 2802
2803 max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), 2803 max_xfer = sdkp->max_xfer_blocks;
2804 sdkp->max_xfer_blocks);
2805 max_xfer <<= ilog2(sdp->sector_size) - 9; 2804 max_xfer <<= ilog2(sdp->sector_size) - 9;
2805
2806 max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
2807 max_xfer);
2806 blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer); 2808 blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
2807 set_capacity(disk, sdkp->capacity); 2809 set_capacity(disk, sdkp->capacity);
2808 sd_config_write_same(sdkp); 2810 sd_config_write_same(sdkp);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 4cda994d3f40..9b80d54d4ddb 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -342,8 +342,7 @@ static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
342 /* Only alloc on first setup */ 342 /* Only alloc on first setup */
343 chip = spi_get_ctldata(spi); 343 chip = spi_get_ctldata(spi);
344 if (chip == NULL) { 344 if (chip == NULL) {
345 chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data), 345 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
346 GFP_KERNEL);
347 if (!chip) 346 if (!chip)
348 return -ENOMEM; 347 return -ENOMEM;
349 } 348 }
@@ -382,6 +381,16 @@ static int dspi_setup(struct spi_device *spi)
382 return dspi_setup_transfer(spi, NULL); 381 return dspi_setup_transfer(spi, NULL);
383} 382}
384 383
384static void dspi_cleanup(struct spi_device *spi)
385{
386 struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
387
388 dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
389 spi->master->bus_num, spi->chip_select);
390
391 kfree(chip);
392}
393
385static irqreturn_t dspi_interrupt(int irq, void *dev_id) 394static irqreturn_t dspi_interrupt(int irq, void *dev_id)
386{ 395{
387 struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id; 396 struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
@@ -467,6 +476,7 @@ static int dspi_probe(struct platform_device *pdev)
467 dspi->bitbang.master->setup = dspi_setup; 476 dspi->bitbang.master->setup = dspi_setup;
468 dspi->bitbang.master->dev.of_node = pdev->dev.of_node; 477 dspi->bitbang.master->dev.of_node = pdev->dev.of_node;
469 478
479 master->cleanup = dspi_cleanup;
470 master->mode_bits = SPI_CPOL | SPI_CPHA; 480 master->mode_bits = SPI_CPOL | SPI_CPHA;
471 master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) | 481 master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
472 SPI_BPW_MASK(16); 482 SPI_BPW_MASK(16);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 961b97d43b43..fe1b7699fab6 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -823,6 +823,10 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
823 struct dma_slave_config slave_config = {}; 823 struct dma_slave_config slave_config = {};
824 int ret; 824 int ret;
825 825
826 /* use pio mode for i.mx6dl chip TKT238285 */
827 if (of_machine_is_compatible("fsl,imx6dl"))
828 return 0;
829
826 /* Prepare for TX DMA: */ 830 /* Prepare for TX DMA: */
827 master->dma_tx = dma_request_slave_channel(dev, "tx"); 831 master->dma_tx = dma_request_slave_channel(dev, "tx");
828 if (!master->dma_tx) { 832 if (!master->dma_tx) {
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 930f6010203e..65d610abe06e 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -632,7 +632,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
632 return 0; 632 return 0;
633 } 633 }
634 634
635 if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) { 635 if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
636 CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); 636 CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
637 return -EFAULT; 637 return -EFAULT;
638 } 638 }
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 093535c6217b..120b70d72d79 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -85,23 +85,20 @@ static struct nvec_chip *nvec_power_handle;
85static const struct mfd_cell nvec_devices[] = { 85static const struct mfd_cell nvec_devices[] = {
86 { 86 {
87 .name = "nvec-kbd", 87 .name = "nvec-kbd",
88 .id = 1,
89 }, 88 },
90 { 89 {
91 .name = "nvec-mouse", 90 .name = "nvec-mouse",
92 .id = 1,
93 }, 91 },
94 { 92 {
95 .name = "nvec-power", 93 .name = "nvec-power",
96 .id = 1, 94 .id = 0,
97 }, 95 },
98 { 96 {
99 .name = "nvec-power", 97 .name = "nvec-power",
100 .id = 2, 98 .id = 1,
101 }, 99 },
102 { 100 {
103 .name = "nvec-paz00", 101 .name = "nvec-paz00",
104 .id = 1,
105 }, 102 },
106}; 103};
107 104
@@ -891,7 +888,7 @@ static int tegra_nvec_probe(struct platform_device *pdev)
891 nvec_msg_free(nvec, msg); 888 nvec_msg_free(nvec, msg);
892 } 889 }
893 890
894 ret = mfd_add_devices(nvec->dev, -1, nvec_devices, 891 ret = mfd_add_devices(nvec->dev, 0, nvec_devices,
895 ARRAY_SIZE(nvec_devices), NULL, 0, NULL); 892 ARRAY_SIZE(nvec_devices), NULL, 0, NULL);
896 if (ret) 893 if (ret)
897 dev_err(nvec->dev, "error adding subdevices\n"); 894 dev_err(nvec->dev, "error adding subdevices\n");
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
index de0c9c9d7091..a6315abe7b7c 100644
--- a/drivers/usb/core/otg_whitelist.h
+++ b/drivers/usb/core/otg_whitelist.h
@@ -55,6 +55,11 @@ static int is_targeted(struct usb_device *dev)
55 le16_to_cpu(dev->descriptor.idProduct) == 0xbadd)) 55 le16_to_cpu(dev->descriptor.idProduct) == 0xbadd))
56 return 0; 56 return 0;
57 57
58 /* OTG PET device is always targeted (see OTG 2.0 ECN 6.4.2) */
59 if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a &&
60 le16_to_cpu(dev->descriptor.idProduct) == 0x0200))
61 return 1;
62
58 /* NOTE: can't use usb_match_id() since interface caches 63 /* NOTE: can't use usb_match_id() since interface caches
59 * aren't set up yet. this is cut/paste from that code. 64 * aren't set up yet. this is cut/paste from that code.
60 */ 65 */
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 0ffb4ed0a945..41e510ae8c83 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -179,6 +179,10 @@ static const struct usb_device_id usb_quirk_list[] = {
179 { USB_DEVICE(0x0b05, 0x17e0), .driver_info = 179 { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
180 USB_QUIRK_IGNORE_REMOTE_WAKEUP }, 180 USB_QUIRK_IGNORE_REMOTE_WAKEUP },
181 181
182 /* Protocol and OTG Electrical Test Device */
183 { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
184 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
185
182 { } /* terminating entry must be last */ 186 { } /* terminating entry must be last */
183}; 187};
184 188
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index ad43c5bc1ef1..02e3e2d4ea56 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -476,13 +476,13 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
476 u32 gintsts; 476 u32 gintsts;
477 irqreturn_t retval = IRQ_NONE; 477 irqreturn_t retval = IRQ_NONE;
478 478
479 spin_lock(&hsotg->lock);
480
479 if (!dwc2_is_controller_alive(hsotg)) { 481 if (!dwc2_is_controller_alive(hsotg)) {
480 dev_warn(hsotg->dev, "Controller is dead\n"); 482 dev_warn(hsotg->dev, "Controller is dead\n");
481 goto out; 483 goto out;
482 } 484 }
483 485
484 spin_lock(&hsotg->lock);
485
486 gintsts = dwc2_read_common_intr(hsotg); 486 gintsts = dwc2_read_common_intr(hsotg);
487 if (gintsts & ~GINTSTS_PRTINT) 487 if (gintsts & ~GINTSTS_PRTINT)
488 retval = IRQ_HANDLED; 488 retval = IRQ_HANDLED;
@@ -515,8 +515,8 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
515 } 515 }
516 } 516 }
517 517
518 spin_unlock(&hsotg->lock);
519out: 518out:
519 spin_unlock(&hsotg->lock);
520 return retval; 520 return retval;
521} 521}
522EXPORT_SYMBOL_GPL(dwc2_handle_common_intr); 522EXPORT_SYMBOL_GPL(dwc2_handle_common_intr);
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index ccfdfb24b240..2f9735b35338 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -34,7 +34,7 @@ static struct usb_phy *__usb_find_phy(struct list_head *list,
34 return phy; 34 return phy;
35 } 35 }
36 36
37 return ERR_PTR(-EPROBE_DEFER); 37 return ERR_PTR(-ENODEV);
38} 38}
39 39
40static struct usb_phy *__usb_find_phy_dev(struct device *dev, 40static struct usb_phy *__usb_find_phy_dev(struct device *dev,
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 11c7a9676441..d684b4b8108f 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -507,7 +507,7 @@ UNUSUAL_DEV( 0x04e6, 0x000c, 0x0100, 0x0100,
507UNUSUAL_DEV( 0x04e6, 0x000f, 0x0000, 0x9999, 507UNUSUAL_DEV( 0x04e6, 0x000f, 0x0000, 0x9999,
508 "SCM Microsystems", 508 "SCM Microsystems",
509 "eUSB SCSI Adapter (Bus Powered)", 509 "eUSB SCSI Adapter (Bus Powered)",
510 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, 510 USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init,
511 US_FL_SCM_MULT_TARG ), 511 US_FL_SCM_MULT_TARG ),
512 512
513UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200, 513UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200,
@@ -1995,6 +1995,13 @@ UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100,
1995 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1995 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1996 US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), 1996 US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
1997 1997
1998/* Reported by Dmitry Nezhevenko <dion@dion.org.ua> */
1999UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114,
2000 "JMicron",
2001 "USB to ATA/ATAPI Bridge",
2002 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2003 US_FL_BROKEN_FUA ),
2004
1998/* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI) 2005/* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
1999 * and Mac USB Dock USB-SCSI */ 2006 * and Mac USB Dock USB-SCSI */
2000UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133, 2007UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133,
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 6df4357d9ee3..dbc00e56c7f5 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -140,3 +140,10 @@ UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
140 "External HDD", 140 "External HDD",
141 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 141 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
142 US_FL_IGNORE_UAS), 142 US_FL_IGNORE_UAS),
143
144/* Reported-by: Richard Henderson <rth@redhat.com> */
145UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999,
146 "SimpleTech",
147 "External HDD",
148 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
149 US_FL_NO_REPORT_OPCODES),
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index e022cc40303d..8dccca9013ed 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -528,9 +528,9 @@ static void handle_rx(struct vhost_net *net)
528 .msg_controllen = 0, 528 .msg_controllen = 0,
529 .msg_flags = MSG_DONTWAIT, 529 .msg_flags = MSG_DONTWAIT,
530 }; 530 };
531 struct virtio_net_hdr hdr = { 531 struct virtio_net_hdr_mrg_rxbuf hdr = {
532 .flags = 0, 532 .hdr.flags = 0,
533 .gso_type = VIRTIO_NET_HDR_GSO_NONE 533 .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
534 }; 534 };
535 size_t total_len = 0; 535 size_t total_len = 0;
536 int err, mergeable; 536 int err, mergeable;
@@ -614,11 +614,11 @@ static void handle_rx(struct vhost_net *net)
614 vq->iov->iov_base); 614 vq->iov->iov_base);
615 break; 615 break;
616 } 616 }
617 /* Supply (or replace) ->num_buffers if VIRTIO_NET_F_MRG_RXBUF 617 /* TODO: Should check and handle checksum. */
618 * TODO: Should check and handle checksum. 618
619 */ 619 hdr.num_buffers = cpu_to_vhost16(vq, headcount);
620 if (likely(mergeable) && 620 if (likely(mergeable) &&
621 copy_to_iter(&headcount, 2, &fixup) != 2) { 621 copy_to_iter(&hdr.num_buffers, 2, &fixup) != 2) {
622 vq_err(vq, "Failed num_buffers write"); 622 vq_err(vq, "Failed num_buffers write");
623 vhost_discard_vq_desc(vq, headcount); 623 vhost_discard_vq_desc(vq, headcount);
624 break; 624 break;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 2f0fbc374e87..e427cb7ee12c 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3065,6 +3065,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3065 path->search_commit_root = 1; 3065 path->search_commit_root = 1;
3066 path->skip_locking = 1; 3066 path->skip_locking = 1;
3067 3067
3068 ppath->search_commit_root = 1;
3069 ppath->skip_locking = 1;
3068 /* 3070 /*
3069 * trigger the readahead for extent tree csum tree and wait for 3071 * trigger the readahead for extent tree csum tree and wait for
3070 * completion. During readahead, the scrub is officially paused 3072 * completion. During readahead, the scrub is officially paused
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 9c56ef776407..7febcf2475c5 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -606,9 +606,11 @@ cifs_security_flags_handle_must_flags(unsigned int *flags)
606 *flags = CIFSSEC_MUST_NTLMV2; 606 *flags = CIFSSEC_MUST_NTLMV2;
607 else if ((*flags & CIFSSEC_MUST_NTLM) == CIFSSEC_MUST_NTLM) 607 else if ((*flags & CIFSSEC_MUST_NTLM) == CIFSSEC_MUST_NTLM)
608 *flags = CIFSSEC_MUST_NTLM; 608 *flags = CIFSSEC_MUST_NTLM;
609 else if ((*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN) 609 else if (CIFSSEC_MUST_LANMAN &&
610 (*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN)
610 *flags = CIFSSEC_MUST_LANMAN; 611 *flags = CIFSSEC_MUST_LANMAN;
611 else if ((*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT) 612 else if (CIFSSEC_MUST_PLNTXT &&
613 (*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT)
612 *flags = CIFSSEC_MUST_PLNTXT; 614 *flags = CIFSSEC_MUST_PLNTXT;
613 615
614 *flags |= signflags; 616 *flags |= signflags;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 96b7e9b7706d..74f12877493a 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -366,6 +366,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
366 struct cifsLockInfo *li, *tmp; 366 struct cifsLockInfo *li, *tmp;
367 struct cifs_fid fid; 367 struct cifs_fid fid;
368 struct cifs_pending_open open; 368 struct cifs_pending_open open;
369 bool oplock_break_cancelled;
369 370
370 spin_lock(&cifs_file_list_lock); 371 spin_lock(&cifs_file_list_lock);
371 if (--cifs_file->count > 0) { 372 if (--cifs_file->count > 0) {
@@ -397,7 +398,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
397 } 398 }
398 spin_unlock(&cifs_file_list_lock); 399 spin_unlock(&cifs_file_list_lock);
399 400
400 cancel_work_sync(&cifs_file->oplock_break); 401 oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
401 402
402 if (!tcon->need_reconnect && !cifs_file->invalidHandle) { 403 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
403 struct TCP_Server_Info *server = tcon->ses->server; 404 struct TCP_Server_Info *server = tcon->ses->server;
@@ -409,6 +410,9 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
409 _free_xid(xid); 410 _free_xid(xid);
410 } 411 }
411 412
413 if (oplock_break_cancelled)
414 cifs_done_oplock_break(cifsi);
415
412 cifs_del_pending_open(&open); 416 cifs_del_pending_open(&open);
413 417
414 /* 418 /*
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 6c1566366a66..a4232ec4f2ba 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -221,7 +221,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
221 } 221 }
222 222
223 rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16)); 223 rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
224 memset(wpwd, 0, 129 * sizeof(__le16)); 224 memzero_explicit(wpwd, sizeof(wpwd));
225 225
226 return rc; 226 return rc;
227} 227}
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index c8b148bbdc8b..3e193cb36996 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -667,7 +667,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
667 667
668static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, 668static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
669 s64 change, struct gfs2_quota_data *qd, 669 s64 change, struct gfs2_quota_data *qd,
670 struct fs_disk_quota *fdq) 670 struct qc_dqblk *fdq)
671{ 671{
672 struct inode *inode = &ip->i_inode; 672 struct inode *inode = &ip->i_inode;
673 struct gfs2_sbd *sdp = GFS2_SB(inode); 673 struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -697,16 +697,16 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
697 be64_add_cpu(&q.qu_value, change); 697 be64_add_cpu(&q.qu_value, change);
698 qd->qd_qb.qb_value = q.qu_value; 698 qd->qd_qb.qb_value = q.qu_value;
699 if (fdq) { 699 if (fdq) {
700 if (fdq->d_fieldmask & FS_DQ_BSOFT) { 700 if (fdq->d_fieldmask & QC_SPC_SOFT) {
701 q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift); 701 q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
702 qd->qd_qb.qb_warn = q.qu_warn; 702 qd->qd_qb.qb_warn = q.qu_warn;
703 } 703 }
704 if (fdq->d_fieldmask & FS_DQ_BHARD) { 704 if (fdq->d_fieldmask & QC_SPC_HARD) {
705 q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift); 705 q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
706 qd->qd_qb.qb_limit = q.qu_limit; 706 qd->qd_qb.qb_limit = q.qu_limit;
707 } 707 }
708 if (fdq->d_fieldmask & FS_DQ_BCOUNT) { 708 if (fdq->d_fieldmask & QC_SPACE) {
709 q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift); 709 q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
710 qd->qd_qb.qb_value = q.qu_value; 710 qd->qd_qb.qb_value = q.qu_value;
711 } 711 }
712 } 712 }
@@ -1497,7 +1497,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
1497} 1497}
1498 1498
1499static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, 1499static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1500 struct fs_disk_quota *fdq) 1500 struct qc_dqblk *fdq)
1501{ 1501{
1502 struct gfs2_sbd *sdp = sb->s_fs_info; 1502 struct gfs2_sbd *sdp = sb->s_fs_info;
1503 struct gfs2_quota_lvb *qlvb; 1503 struct gfs2_quota_lvb *qlvb;
@@ -1505,7 +1505,7 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1505 struct gfs2_holder q_gh; 1505 struct gfs2_holder q_gh;
1506 int error; 1506 int error;
1507 1507
1508 memset(fdq, 0, sizeof(struct fs_disk_quota)); 1508 memset(fdq, 0, sizeof(*fdq));
1509 1509
1510 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 1510 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1511 return -ESRCH; /* Crazy XFS error code */ 1511 return -ESRCH; /* Crazy XFS error code */
@@ -1522,12 +1522,9 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1522 goto out; 1522 goto out;
1523 1523
1524 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; 1524 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1525 fdq->d_version = FS_DQUOT_VERSION; 1525 fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
1526 fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA; 1526 fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
1527 fdq->d_id = from_kqid_munged(current_user_ns(), qid); 1527 fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
1528 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
1529 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
1530 fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
1531 1528
1532 gfs2_glock_dq_uninit(&q_gh); 1529 gfs2_glock_dq_uninit(&q_gh);
1533out: 1530out:
@@ -1536,10 +1533,10 @@ out:
1536} 1533}
1537 1534
1538/* GFS2 only supports a subset of the XFS fields */ 1535/* GFS2 only supports a subset of the XFS fields */
1539#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT) 1536#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
1540 1537
1541static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, 1538static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1542 struct fs_disk_quota *fdq) 1539 struct qc_dqblk *fdq)
1543{ 1540{
1544 struct gfs2_sbd *sdp = sb->s_fs_info; 1541 struct gfs2_sbd *sdp = sb->s_fs_info;
1545 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 1542 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
@@ -1583,17 +1580,17 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1583 goto out_i; 1580 goto out_i;
1584 1581
1585 /* If nothing has changed, this is a no-op */ 1582 /* If nothing has changed, this is a no-op */
1586 if ((fdq->d_fieldmask & FS_DQ_BSOFT) && 1583 if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
1587 ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) 1584 ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1588 fdq->d_fieldmask ^= FS_DQ_BSOFT; 1585 fdq->d_fieldmask ^= QC_SPC_SOFT;
1589 1586
1590 if ((fdq->d_fieldmask & FS_DQ_BHARD) && 1587 if ((fdq->d_fieldmask & QC_SPC_HARD) &&
1591 ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) 1588 ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1592 fdq->d_fieldmask ^= FS_DQ_BHARD; 1589 fdq->d_fieldmask ^= QC_SPC_HARD;
1593 1590
1594 if ((fdq->d_fieldmask & FS_DQ_BCOUNT) && 1591 if ((fdq->d_fieldmask & QC_SPACE) &&
1595 ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value))) 1592 ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1596 fdq->d_fieldmask ^= FS_DQ_BCOUNT; 1593 fdq->d_fieldmask ^= QC_SPACE;
1597 1594
1598 if (fdq->d_fieldmask == 0) 1595 if (fdq->d_fieldmask == 0)
1599 goto out_i; 1596 goto out_i;
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 10bf07280f4a..294692ff83b1 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -212,6 +212,12 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
212 */ 212 */
213ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos) 213ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
214{ 214{
215 struct inode *inode = iocb->ki_filp->f_mapping->host;
216
217 /* we only support swap file calling nfs_direct_IO */
218 if (!IS_SWAPFILE(inode))
219 return 0;
220
215#ifndef CONFIG_NFS_SWAP 221#ifndef CONFIG_NFS_SWAP
216 dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n", 222 dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
217 iocb->ki_filp, (long long) pos, iter->nr_segs); 223 iocb->ki_filp, (long long) pos, iter->nr_segs);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 4bffe637ea32..2211f6ba8736 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -352,8 +352,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
352 352
353 nfs_attr_check_mountpoint(sb, fattr); 353 nfs_attr_check_mountpoint(sb, fattr);
354 354
355 if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) && 355 if (nfs_attr_use_mounted_on_fileid(fattr))
356 !nfs_attr_use_mounted_on_fileid(fattr)) 356 fattr->fileid = fattr->mounted_on_fileid;
357 else if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0)
357 goto out_no_inode; 358 goto out_no_inode;
358 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0) 359 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)
359 goto out_no_inode; 360 goto out_no_inode;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index efaa31c70fbe..b6f34bfa6fe8 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -31,8 +31,6 @@ static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr)
31 (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) && 31 (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) &&
32 ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0))) 32 ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0)))
33 return 0; 33 return 0;
34
35 fattr->fileid = fattr->mounted_on_fileid;
36 return 1; 34 return 1;
37} 35}
38 36
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 953daa44a282..706ad10b8186 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -639,7 +639,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
639 prev = pos; 639 prev = pos;
640 640
641 status = nfs_wait_client_init_complete(pos); 641 status = nfs_wait_client_init_complete(pos);
642 if (status == 0) { 642 if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {
643 nfs4_schedule_lease_recovery(pos); 643 nfs4_schedule_lease_recovery(pos);
644 status = nfs4_wait_clnt_recover(pos); 644 status = nfs4_wait_clnt_recover(pos);
645 } 645 }
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 8f0acef3d184..69df5b239844 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2396,30 +2396,25 @@ static inline qsize_t stoqb(qsize_t space)
2396} 2396}
2397 2397
2398/* Generic routine for getting common part of quota structure */ 2398/* Generic routine for getting common part of quota structure */
2399static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di) 2399static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2400{ 2400{
2401 struct mem_dqblk *dm = &dquot->dq_dqb; 2401 struct mem_dqblk *dm = &dquot->dq_dqb;
2402 2402
2403 memset(di, 0, sizeof(*di)); 2403 memset(di, 0, sizeof(*di));
2404 di->d_version = FS_DQUOT_VERSION;
2405 di->d_flags = dquot->dq_id.type == USRQUOTA ?
2406 FS_USER_QUOTA : FS_GROUP_QUOTA;
2407 di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id);
2408
2409 spin_lock(&dq_data_lock); 2404 spin_lock(&dq_data_lock);
2410 di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit); 2405 di->d_spc_hardlimit = dm->dqb_bhardlimit;
2411 di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit); 2406 di->d_spc_softlimit = dm->dqb_bsoftlimit;
2412 di->d_ino_hardlimit = dm->dqb_ihardlimit; 2407 di->d_ino_hardlimit = dm->dqb_ihardlimit;
2413 di->d_ino_softlimit = dm->dqb_isoftlimit; 2408 di->d_ino_softlimit = dm->dqb_isoftlimit;
2414 di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace; 2409 di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
2415 di->d_icount = dm->dqb_curinodes; 2410 di->d_ino_count = dm->dqb_curinodes;
2416 di->d_btimer = dm->dqb_btime; 2411 di->d_spc_timer = dm->dqb_btime;
2417 di->d_itimer = dm->dqb_itime; 2412 di->d_ino_timer = dm->dqb_itime;
2418 spin_unlock(&dq_data_lock); 2413 spin_unlock(&dq_data_lock);
2419} 2414}
2420 2415
2421int dquot_get_dqblk(struct super_block *sb, struct kqid qid, 2416int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2422 struct fs_disk_quota *di) 2417 struct qc_dqblk *di)
2423{ 2418{
2424 struct dquot *dquot; 2419 struct dquot *dquot;
2425 2420
@@ -2433,70 +2428,70 @@ int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2433} 2428}
2434EXPORT_SYMBOL(dquot_get_dqblk); 2429EXPORT_SYMBOL(dquot_get_dqblk);
2435 2430
2436#define VFS_FS_DQ_MASK \ 2431#define VFS_QC_MASK \
2437 (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \ 2432 (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
2438 FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \ 2433 QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
2439 FS_DQ_BTIMER | FS_DQ_ITIMER) 2434 QC_SPC_TIMER | QC_INO_TIMER)
2440 2435
2441/* Generic routine for setting common part of quota structure */ 2436/* Generic routine for setting common part of quota structure */
2442static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) 2437static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2443{ 2438{
2444 struct mem_dqblk *dm = &dquot->dq_dqb; 2439 struct mem_dqblk *dm = &dquot->dq_dqb;
2445 int check_blim = 0, check_ilim = 0; 2440 int check_blim = 0, check_ilim = 0;
2446 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; 2441 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
2447 2442
2448 if (di->d_fieldmask & ~VFS_FS_DQ_MASK) 2443 if (di->d_fieldmask & ~VFS_QC_MASK)
2449 return -EINVAL; 2444 return -EINVAL;
2450 2445
2451 if (((di->d_fieldmask & FS_DQ_BSOFT) && 2446 if (((di->d_fieldmask & QC_SPC_SOFT) &&
2452 (di->d_blk_softlimit > dqi->dqi_maxblimit)) || 2447 stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) ||
2453 ((di->d_fieldmask & FS_DQ_BHARD) && 2448 ((di->d_fieldmask & QC_SPC_HARD) &&
2454 (di->d_blk_hardlimit > dqi->dqi_maxblimit)) || 2449 stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) ||
2455 ((di->d_fieldmask & FS_DQ_ISOFT) && 2450 ((di->d_fieldmask & QC_INO_SOFT) &&
2456 (di->d_ino_softlimit > dqi->dqi_maxilimit)) || 2451 (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
2457 ((di->d_fieldmask & FS_DQ_IHARD) && 2452 ((di->d_fieldmask & QC_INO_HARD) &&
2458 (di->d_ino_hardlimit > dqi->dqi_maxilimit))) 2453 (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
2459 return -ERANGE; 2454 return -ERANGE;
2460 2455
2461 spin_lock(&dq_data_lock); 2456 spin_lock(&dq_data_lock);
2462 if (di->d_fieldmask & FS_DQ_BCOUNT) { 2457 if (di->d_fieldmask & QC_SPACE) {
2463 dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace; 2458 dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
2464 check_blim = 1; 2459 check_blim = 1;
2465 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); 2460 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2466 } 2461 }
2467 2462
2468 if (di->d_fieldmask & FS_DQ_BSOFT) 2463 if (di->d_fieldmask & QC_SPC_SOFT)
2469 dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit); 2464 dm->dqb_bsoftlimit = di->d_spc_softlimit;
2470 if (di->d_fieldmask & FS_DQ_BHARD) 2465 if (di->d_fieldmask & QC_SPC_HARD)
2471 dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit); 2466 dm->dqb_bhardlimit = di->d_spc_hardlimit;
2472 if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) { 2467 if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
2473 check_blim = 1; 2468 check_blim = 1;
2474 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); 2469 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2475 } 2470 }
2476 2471
2477 if (di->d_fieldmask & FS_DQ_ICOUNT) { 2472 if (di->d_fieldmask & QC_INO_COUNT) {
2478 dm->dqb_curinodes = di->d_icount; 2473 dm->dqb_curinodes = di->d_ino_count;
2479 check_ilim = 1; 2474 check_ilim = 1;
2480 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); 2475 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2481 } 2476 }
2482 2477
2483 if (di->d_fieldmask & FS_DQ_ISOFT) 2478 if (di->d_fieldmask & QC_INO_SOFT)
2484 dm->dqb_isoftlimit = di->d_ino_softlimit; 2479 dm->dqb_isoftlimit = di->d_ino_softlimit;
2485 if (di->d_fieldmask & FS_DQ_IHARD) 2480 if (di->d_fieldmask & QC_INO_HARD)
2486 dm->dqb_ihardlimit = di->d_ino_hardlimit; 2481 dm->dqb_ihardlimit = di->d_ino_hardlimit;
2487 if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) { 2482 if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
2488 check_ilim = 1; 2483 check_ilim = 1;
2489 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); 2484 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2490 } 2485 }
2491 2486
2492 if (di->d_fieldmask & FS_DQ_BTIMER) { 2487 if (di->d_fieldmask & QC_SPC_TIMER) {
2493 dm->dqb_btime = di->d_btimer; 2488 dm->dqb_btime = di->d_spc_timer;
2494 check_blim = 1; 2489 check_blim = 1;
2495 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); 2490 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2496 } 2491 }
2497 2492
2498 if (di->d_fieldmask & FS_DQ_ITIMER) { 2493 if (di->d_fieldmask & QC_INO_TIMER) {
2499 dm->dqb_itime = di->d_itimer; 2494 dm->dqb_itime = di->d_ino_timer;
2500 check_ilim = 1; 2495 check_ilim = 1;
2501 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); 2496 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2502 } 2497 }
@@ -2506,7 +2501,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2506 dm->dqb_curspace < dm->dqb_bsoftlimit) { 2501 dm->dqb_curspace < dm->dqb_bsoftlimit) {
2507 dm->dqb_btime = 0; 2502 dm->dqb_btime = 0;
2508 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 2503 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2509 } else if (!(di->d_fieldmask & FS_DQ_BTIMER)) 2504 } else if (!(di->d_fieldmask & QC_SPC_TIMER))
2510 /* Set grace only if user hasn't provided his own... */ 2505 /* Set grace only if user hasn't provided his own... */
2511 dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; 2506 dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
2512 } 2507 }
@@ -2515,7 +2510,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2515 dm->dqb_curinodes < dm->dqb_isoftlimit) { 2510 dm->dqb_curinodes < dm->dqb_isoftlimit) {
2516 dm->dqb_itime = 0; 2511 dm->dqb_itime = 0;
2517 clear_bit(DQ_INODES_B, &dquot->dq_flags); 2512 clear_bit(DQ_INODES_B, &dquot->dq_flags);
2518 } else if (!(di->d_fieldmask & FS_DQ_ITIMER)) 2513 } else if (!(di->d_fieldmask & QC_INO_TIMER))
2519 /* Set grace only if user hasn't provided his own... */ 2514 /* Set grace only if user hasn't provided his own... */
2520 dm->dqb_itime = get_seconds() + dqi->dqi_igrace; 2515 dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
2521 } 2516 }
@@ -2531,7 +2526,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2531} 2526}
2532 2527
2533int dquot_set_dqblk(struct super_block *sb, struct kqid qid, 2528int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
2534 struct fs_disk_quota *di) 2529 struct qc_dqblk *di)
2535{ 2530{
2536 struct dquot *dquot; 2531 struct dquot *dquot;
2537 int rc; 2532 int rc;
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 2aa4151f99d2..6f3856328eea 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -118,17 +118,27 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
118 return sb->s_qcop->set_info(sb, type, &info); 118 return sb->s_qcop->set_info(sb, type, &info);
119} 119}
120 120
121static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src) 121static inline qsize_t qbtos(qsize_t blocks)
122{
123 return blocks << QIF_DQBLKSIZE_BITS;
124}
125
126static inline qsize_t stoqb(qsize_t space)
127{
128 return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
129}
130
131static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src)
122{ 132{
123 memset(dst, 0, sizeof(*dst)); 133 memset(dst, 0, sizeof(*dst));
124 dst->dqb_bhardlimit = src->d_blk_hardlimit; 134 dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit);
125 dst->dqb_bsoftlimit = src->d_blk_softlimit; 135 dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit);
126 dst->dqb_curspace = src->d_bcount; 136 dst->dqb_curspace = src->d_space;
127 dst->dqb_ihardlimit = src->d_ino_hardlimit; 137 dst->dqb_ihardlimit = src->d_ino_hardlimit;
128 dst->dqb_isoftlimit = src->d_ino_softlimit; 138 dst->dqb_isoftlimit = src->d_ino_softlimit;
129 dst->dqb_curinodes = src->d_icount; 139 dst->dqb_curinodes = src->d_ino_count;
130 dst->dqb_btime = src->d_btimer; 140 dst->dqb_btime = src->d_spc_timer;
131 dst->dqb_itime = src->d_itimer; 141 dst->dqb_itime = src->d_ino_timer;
132 dst->dqb_valid = QIF_ALL; 142 dst->dqb_valid = QIF_ALL;
133} 143}
134 144
@@ -136,7 +146,7 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
136 void __user *addr) 146 void __user *addr)
137{ 147{
138 struct kqid qid; 148 struct kqid qid;
139 struct fs_disk_quota fdq; 149 struct qc_dqblk fdq;
140 struct if_dqblk idq; 150 struct if_dqblk idq;
141 int ret; 151 int ret;
142 152
@@ -154,36 +164,36 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
154 return 0; 164 return 0;
155} 165}
156 166
157static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src) 167static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src)
158{ 168{
159 dst->d_blk_hardlimit = src->dqb_bhardlimit; 169 dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit);
160 dst->d_blk_softlimit = src->dqb_bsoftlimit; 170 dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit);
161 dst->d_bcount = src->dqb_curspace; 171 dst->d_space = src->dqb_curspace;
162 dst->d_ino_hardlimit = src->dqb_ihardlimit; 172 dst->d_ino_hardlimit = src->dqb_ihardlimit;
163 dst->d_ino_softlimit = src->dqb_isoftlimit; 173 dst->d_ino_softlimit = src->dqb_isoftlimit;
164 dst->d_icount = src->dqb_curinodes; 174 dst->d_ino_count = src->dqb_curinodes;
165 dst->d_btimer = src->dqb_btime; 175 dst->d_spc_timer = src->dqb_btime;
166 dst->d_itimer = src->dqb_itime; 176 dst->d_ino_timer = src->dqb_itime;
167 177
168 dst->d_fieldmask = 0; 178 dst->d_fieldmask = 0;
169 if (src->dqb_valid & QIF_BLIMITS) 179 if (src->dqb_valid & QIF_BLIMITS)
170 dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD; 180 dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD;
171 if (src->dqb_valid & QIF_SPACE) 181 if (src->dqb_valid & QIF_SPACE)
172 dst->d_fieldmask |= FS_DQ_BCOUNT; 182 dst->d_fieldmask |= QC_SPACE;
173 if (src->dqb_valid & QIF_ILIMITS) 183 if (src->dqb_valid & QIF_ILIMITS)
174 dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD; 184 dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD;
175 if (src->dqb_valid & QIF_INODES) 185 if (src->dqb_valid & QIF_INODES)
176 dst->d_fieldmask |= FS_DQ_ICOUNT; 186 dst->d_fieldmask |= QC_INO_COUNT;
177 if (src->dqb_valid & QIF_BTIME) 187 if (src->dqb_valid & QIF_BTIME)
178 dst->d_fieldmask |= FS_DQ_BTIMER; 188 dst->d_fieldmask |= QC_SPC_TIMER;
179 if (src->dqb_valid & QIF_ITIME) 189 if (src->dqb_valid & QIF_ITIME)
180 dst->d_fieldmask |= FS_DQ_ITIMER; 190 dst->d_fieldmask |= QC_INO_TIMER;
181} 191}
182 192
183static int quota_setquota(struct super_block *sb, int type, qid_t id, 193static int quota_setquota(struct super_block *sb, int type, qid_t id,
184 void __user *addr) 194 void __user *addr)
185{ 195{
186 struct fs_disk_quota fdq; 196 struct qc_dqblk fdq;
187 struct if_dqblk idq; 197 struct if_dqblk idq;
188 struct kqid qid; 198 struct kqid qid;
189 199
@@ -247,10 +257,78 @@ static int quota_getxstatev(struct super_block *sb, void __user *addr)
247 return ret; 257 return ret;
248} 258}
249 259
260/*
261 * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them
262 * out of there as xfsprogs rely on definitions being in that header file. So
263 * just define same functions here for quota purposes.
264 */
265#define XFS_BB_SHIFT 9
266
267static inline u64 quota_bbtob(u64 blocks)
268{
269 return blocks << XFS_BB_SHIFT;
270}
271
272static inline u64 quota_btobb(u64 bytes)
273{
274 return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT;
275}
276
277static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src)
278{
279 dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit);
280 dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit);
281 dst->d_ino_hardlimit = src->d_ino_hardlimit;
282 dst->d_ino_softlimit = src->d_ino_softlimit;
283 dst->d_space = quota_bbtob(src->d_bcount);
284 dst->d_ino_count = src->d_icount;
285 dst->d_ino_timer = src->d_itimer;
286 dst->d_spc_timer = src->d_btimer;
287 dst->d_ino_warns = src->d_iwarns;
288 dst->d_spc_warns = src->d_bwarns;
289 dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit);
290 dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit);
291 dst->d_rt_space = quota_bbtob(src->d_rtbcount);
292 dst->d_rt_spc_timer = src->d_rtbtimer;
293 dst->d_rt_spc_warns = src->d_rtbwarns;
294 dst->d_fieldmask = 0;
295 if (src->d_fieldmask & FS_DQ_ISOFT)
296 dst->d_fieldmask |= QC_INO_SOFT;
297 if (src->d_fieldmask & FS_DQ_IHARD)
298 dst->d_fieldmask |= QC_INO_HARD;
299 if (src->d_fieldmask & FS_DQ_BSOFT)
300 dst->d_fieldmask |= QC_SPC_SOFT;
301 if (src->d_fieldmask & FS_DQ_BHARD)
302 dst->d_fieldmask |= QC_SPC_HARD;
303 if (src->d_fieldmask & FS_DQ_RTBSOFT)
304 dst->d_fieldmask |= QC_RT_SPC_SOFT;
305 if (src->d_fieldmask & FS_DQ_RTBHARD)
306 dst->d_fieldmask |= QC_RT_SPC_HARD;
307 if (src->d_fieldmask & FS_DQ_BTIMER)
308 dst->d_fieldmask |= QC_SPC_TIMER;
309 if (src->d_fieldmask & FS_DQ_ITIMER)
310 dst->d_fieldmask |= QC_INO_TIMER;
311 if (src->d_fieldmask & FS_DQ_RTBTIMER)
312 dst->d_fieldmask |= QC_RT_SPC_TIMER;
313 if (src->d_fieldmask & FS_DQ_BWARNS)
314 dst->d_fieldmask |= QC_SPC_WARNS;
315 if (src->d_fieldmask & FS_DQ_IWARNS)
316 dst->d_fieldmask |= QC_INO_WARNS;
317 if (src->d_fieldmask & FS_DQ_RTBWARNS)
318 dst->d_fieldmask |= QC_RT_SPC_WARNS;
319 if (src->d_fieldmask & FS_DQ_BCOUNT)
320 dst->d_fieldmask |= QC_SPACE;
321 if (src->d_fieldmask & FS_DQ_ICOUNT)
322 dst->d_fieldmask |= QC_INO_COUNT;
323 if (src->d_fieldmask & FS_DQ_RTBCOUNT)
324 dst->d_fieldmask |= QC_RT_SPACE;
325}
326
250static int quota_setxquota(struct super_block *sb, int type, qid_t id, 327static int quota_setxquota(struct super_block *sb, int type, qid_t id,
251 void __user *addr) 328 void __user *addr)
252{ 329{
253 struct fs_disk_quota fdq; 330 struct fs_disk_quota fdq;
331 struct qc_dqblk qdq;
254 struct kqid qid; 332 struct kqid qid;
255 333
256 if (copy_from_user(&fdq, addr, sizeof(fdq))) 334 if (copy_from_user(&fdq, addr, sizeof(fdq)))
@@ -260,13 +338,44 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
260 qid = make_kqid(current_user_ns(), type, id); 338 qid = make_kqid(current_user_ns(), type, id);
261 if (!qid_valid(qid)) 339 if (!qid_valid(qid))
262 return -EINVAL; 340 return -EINVAL;
263 return sb->s_qcop->set_dqblk(sb, qid, &fdq); 341 copy_from_xfs_dqblk(&qdq, &fdq);
342 return sb->s_qcop->set_dqblk(sb, qid, &qdq);
343}
344
345static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src,
346 int type, qid_t id)
347{
348 memset(dst, 0, sizeof(*dst));
349 dst->d_version = FS_DQUOT_VERSION;
350 dst->d_id = id;
351 if (type == USRQUOTA)
352 dst->d_flags = FS_USER_QUOTA;
353 else if (type == PRJQUOTA)
354 dst->d_flags = FS_PROJ_QUOTA;
355 else
356 dst->d_flags = FS_GROUP_QUOTA;
357 dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit);
358 dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit);
359 dst->d_ino_hardlimit = src->d_ino_hardlimit;
360 dst->d_ino_softlimit = src->d_ino_softlimit;
361 dst->d_bcount = quota_btobb(src->d_space);
362 dst->d_icount = src->d_ino_count;
363 dst->d_itimer = src->d_ino_timer;
364 dst->d_btimer = src->d_spc_timer;
365 dst->d_iwarns = src->d_ino_warns;
366 dst->d_bwarns = src->d_spc_warns;
367 dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit);
368 dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit);
369 dst->d_rtbcount = quota_btobb(src->d_rt_space);
370 dst->d_rtbtimer = src->d_rt_spc_timer;
371 dst->d_rtbwarns = src->d_rt_spc_warns;
264} 372}
265 373
266static int quota_getxquota(struct super_block *sb, int type, qid_t id, 374static int quota_getxquota(struct super_block *sb, int type, qid_t id,
267 void __user *addr) 375 void __user *addr)
268{ 376{
269 struct fs_disk_quota fdq; 377 struct fs_disk_quota fdq;
378 struct qc_dqblk qdq;
270 struct kqid qid; 379 struct kqid qid;
271 int ret; 380 int ret;
272 381
@@ -275,8 +384,11 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
275 qid = make_kqid(current_user_ns(), type, id); 384 qid = make_kqid(current_user_ns(), type, id);
276 if (!qid_valid(qid)) 385 if (!qid_valid(qid))
277 return -EINVAL; 386 return -EINVAL;
278 ret = sb->s_qcop->get_dqblk(sb, qid, &fdq); 387 ret = sb->s_qcop->get_dqblk(sb, qid, &qdq);
279 if (!ret && copy_to_user(addr, &fdq, sizeof(fdq))) 388 if (ret)
389 return ret;
390 copy_to_xfs_dqblk(&fdq, &qdq, type, id);
391 if (copy_to_user(addr, &fdq, sizeof(fdq)))
280 return -EFAULT; 392 return -EFAULT;
281 return ret; 393 return ret;
282} 394}
diff --git a/fs/udf/file.c b/fs/udf/file.c
index bb15771b92ae..08f3555fbeac 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -224,7 +224,7 @@ out:
224static int udf_release_file(struct inode *inode, struct file *filp) 224static int udf_release_file(struct inode *inode, struct file *filp)
225{ 225{
226 if (filp->f_mode & FMODE_WRITE && 226 if (filp->f_mode & FMODE_WRITE &&
227 atomic_read(&inode->i_writecount) > 1) { 227 atomic_read(&inode->i_writecount) == 1) {
228 /* 228 /*
229 * Grab i_mutex to avoid races with writes changing i_size 229 * Grab i_mutex to avoid races with writes changing i_size
230 * while we are running. 230 * while we are running.
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 3a07a937e232..41f6c0b9d51c 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -166,9 +166,9 @@ extern void xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint);
166/* quota ops */ 166/* quota ops */
167extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint); 167extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint);
168extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t, 168extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t,
169 uint, struct fs_disk_quota *); 169 uint, struct qc_dqblk *);
170extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint, 170extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint,
171 struct fs_disk_quota *); 171 struct qc_dqblk *);
172extern int xfs_qm_scall_getqstat(struct xfs_mount *, 172extern int xfs_qm_scall_getqstat(struct xfs_mount *,
173 struct fs_quota_stat *); 173 struct fs_quota_stat *);
174extern int xfs_qm_scall_getqstatv(struct xfs_mount *, 174extern int xfs_qm_scall_getqstatv(struct xfs_mount *,
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 74fca68e43b6..cb6168ec92c9 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -39,7 +39,6 @@ STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
39STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, 39STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
40 uint); 40 uint);
41STATIC uint xfs_qm_export_flags(uint); 41STATIC uint xfs_qm_export_flags(uint);
42STATIC uint xfs_qm_export_qtype_flags(uint);
43 42
44/* 43/*
45 * Turn off quota accounting and/or enforcement for all udquots and/or 44 * Turn off quota accounting and/or enforcement for all udquots and/or
@@ -573,8 +572,8 @@ xfs_qm_scall_getqstatv(
573 return 0; 572 return 0;
574} 573}
575 574
576#define XFS_DQ_MASK \ 575#define XFS_QC_MASK \
577 (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK) 576 (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
578 577
579/* 578/*
580 * Adjust quota limits, and start/stop timers accordingly. 579 * Adjust quota limits, and start/stop timers accordingly.
@@ -584,7 +583,7 @@ xfs_qm_scall_setqlim(
584 struct xfs_mount *mp, 583 struct xfs_mount *mp,
585 xfs_dqid_t id, 584 xfs_dqid_t id,
586 uint type, 585 uint type,
587 fs_disk_quota_t *newlim) 586 struct qc_dqblk *newlim)
588{ 587{
589 struct xfs_quotainfo *q = mp->m_quotainfo; 588 struct xfs_quotainfo *q = mp->m_quotainfo;
590 struct xfs_disk_dquot *ddq; 589 struct xfs_disk_dquot *ddq;
@@ -593,9 +592,9 @@ xfs_qm_scall_setqlim(
593 int error; 592 int error;
594 xfs_qcnt_t hard, soft; 593 xfs_qcnt_t hard, soft;
595 594
596 if (newlim->d_fieldmask & ~XFS_DQ_MASK) 595 if (newlim->d_fieldmask & ~XFS_QC_MASK)
597 return -EINVAL; 596 return -EINVAL;
598 if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) 597 if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
599 return 0; 598 return 0;
600 599
601 /* 600 /*
@@ -633,11 +632,11 @@ xfs_qm_scall_setqlim(
633 /* 632 /*
634 * Make sure that hardlimits are >= soft limits before changing. 633 * Make sure that hardlimits are >= soft limits before changing.
635 */ 634 */
636 hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? 635 hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
637 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : 636 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
638 be64_to_cpu(ddq->d_blk_hardlimit); 637 be64_to_cpu(ddq->d_blk_hardlimit);
639 soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? 638 soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
640 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : 639 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
641 be64_to_cpu(ddq->d_blk_softlimit); 640 be64_to_cpu(ddq->d_blk_softlimit);
642 if (hard == 0 || hard >= soft) { 641 if (hard == 0 || hard >= soft) {
643 ddq->d_blk_hardlimit = cpu_to_be64(hard); 642 ddq->d_blk_hardlimit = cpu_to_be64(hard);
@@ -650,11 +649,11 @@ xfs_qm_scall_setqlim(
650 } else { 649 } else {
651 xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft); 650 xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
652 } 651 }
653 hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? 652 hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
654 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : 653 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
655 be64_to_cpu(ddq->d_rtb_hardlimit); 654 be64_to_cpu(ddq->d_rtb_hardlimit);
656 soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? 655 soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
657 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : 656 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
658 be64_to_cpu(ddq->d_rtb_softlimit); 657 be64_to_cpu(ddq->d_rtb_softlimit);
659 if (hard == 0 || hard >= soft) { 658 if (hard == 0 || hard >= soft) {
660 ddq->d_rtb_hardlimit = cpu_to_be64(hard); 659 ddq->d_rtb_hardlimit = cpu_to_be64(hard);
@@ -667,10 +666,10 @@ xfs_qm_scall_setqlim(
667 xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft); 666 xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
668 } 667 }
669 668
670 hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? 669 hard = (newlim->d_fieldmask & QC_INO_HARD) ?
671 (xfs_qcnt_t) newlim->d_ino_hardlimit : 670 (xfs_qcnt_t) newlim->d_ino_hardlimit :
672 be64_to_cpu(ddq->d_ino_hardlimit); 671 be64_to_cpu(ddq->d_ino_hardlimit);
673 soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? 672 soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
674 (xfs_qcnt_t) newlim->d_ino_softlimit : 673 (xfs_qcnt_t) newlim->d_ino_softlimit :
675 be64_to_cpu(ddq->d_ino_softlimit); 674 be64_to_cpu(ddq->d_ino_softlimit);
676 if (hard == 0 || hard >= soft) { 675 if (hard == 0 || hard >= soft) {
@@ -687,12 +686,12 @@ xfs_qm_scall_setqlim(
687 /* 686 /*
688 * Update warnings counter(s) if requested 687 * Update warnings counter(s) if requested
689 */ 688 */
690 if (newlim->d_fieldmask & FS_DQ_BWARNS) 689 if (newlim->d_fieldmask & QC_SPC_WARNS)
691 ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns); 690 ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
692 if (newlim->d_fieldmask & FS_DQ_IWARNS) 691 if (newlim->d_fieldmask & QC_INO_WARNS)
693 ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns); 692 ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
694 if (newlim->d_fieldmask & FS_DQ_RTBWARNS) 693 if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
695 ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns); 694 ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);
696 695
697 if (id == 0) { 696 if (id == 0) {
698 /* 697 /*
@@ -702,24 +701,24 @@ xfs_qm_scall_setqlim(
702 * soft and hard limit values (already done, above), and 701 * soft and hard limit values (already done, above), and
703 * for warnings. 702 * for warnings.
704 */ 703 */
705 if (newlim->d_fieldmask & FS_DQ_BTIMER) { 704 if (newlim->d_fieldmask & QC_SPC_TIMER) {
706 q->qi_btimelimit = newlim->d_btimer; 705 q->qi_btimelimit = newlim->d_spc_timer;
707 ddq->d_btimer = cpu_to_be32(newlim->d_btimer); 706 ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
708 } 707 }
709 if (newlim->d_fieldmask & FS_DQ_ITIMER) { 708 if (newlim->d_fieldmask & QC_INO_TIMER) {
710 q->qi_itimelimit = newlim->d_itimer; 709 q->qi_itimelimit = newlim->d_ino_timer;
711 ddq->d_itimer = cpu_to_be32(newlim->d_itimer); 710 ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
712 } 711 }
713 if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { 712 if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
714 q->qi_rtbtimelimit = newlim->d_rtbtimer; 713 q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
715 ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer); 714 ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
716 } 715 }
717 if (newlim->d_fieldmask & FS_DQ_BWARNS) 716 if (newlim->d_fieldmask & QC_SPC_WARNS)
718 q->qi_bwarnlimit = newlim->d_bwarns; 717 q->qi_bwarnlimit = newlim->d_spc_warns;
719 if (newlim->d_fieldmask & FS_DQ_IWARNS) 718 if (newlim->d_fieldmask & QC_INO_WARNS)
720 q->qi_iwarnlimit = newlim->d_iwarns; 719 q->qi_iwarnlimit = newlim->d_ino_warns;
721 if (newlim->d_fieldmask & FS_DQ_RTBWARNS) 720 if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
722 q->qi_rtbwarnlimit = newlim->d_rtbwarns; 721 q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
723 } else { 722 } else {
724 /* 723 /*
725 * If the user is now over quota, start the timelimit. 724 * If the user is now over quota, start the timelimit.
@@ -824,7 +823,7 @@ xfs_qm_scall_getquota(
824 struct xfs_mount *mp, 823 struct xfs_mount *mp,
825 xfs_dqid_t id, 824 xfs_dqid_t id,
826 uint type, 825 uint type,
827 struct fs_disk_quota *dst) 826 struct qc_dqblk *dst)
828{ 827{
829 struct xfs_dquot *dqp; 828 struct xfs_dquot *dqp;
830 int error; 829 int error;
@@ -848,28 +847,25 @@ xfs_qm_scall_getquota(
848 } 847 }
849 848
850 memset(dst, 0, sizeof(*dst)); 849 memset(dst, 0, sizeof(*dst));
851 dst->d_version = FS_DQUOT_VERSION; 850 dst->d_spc_hardlimit =
852 dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags); 851 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
853 dst->d_id = be32_to_cpu(dqp->q_core.d_id); 852 dst->d_spc_softlimit =
854 dst->d_blk_hardlimit = 853 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
855 XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
856 dst->d_blk_softlimit =
857 XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
858 dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); 854 dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
859 dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); 855 dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
860 dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount); 856 dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
861 dst->d_icount = dqp->q_res_icount; 857 dst->d_ino_count = dqp->q_res_icount;
862 dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer); 858 dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
863 dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer); 859 dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
864 dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns); 860 dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
865 dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns); 861 dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
866 dst->d_rtb_hardlimit = 862 dst->d_rt_spc_hardlimit =
867 XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); 863 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
868 dst->d_rtb_softlimit = 864 dst->d_rt_spc_softlimit =
869 XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); 865 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
870 dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount); 866 dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
871 dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer); 867 dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
872 dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns); 868 dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
873 869
874 /* 870 /*
875 * Internally, we don't reset all the timers when quota enforcement 871 * Internally, we don't reset all the timers when quota enforcement
@@ -882,23 +878,23 @@ xfs_qm_scall_getquota(
882 dqp->q_core.d_flags == XFS_DQ_GROUP) || 878 dqp->q_core.d_flags == XFS_DQ_GROUP) ||
883 (!XFS_IS_PQUOTA_ENFORCED(mp) && 879 (!XFS_IS_PQUOTA_ENFORCED(mp) &&
884 dqp->q_core.d_flags == XFS_DQ_PROJ)) { 880 dqp->q_core.d_flags == XFS_DQ_PROJ)) {
885 dst->d_btimer = 0; 881 dst->d_spc_timer = 0;
886 dst->d_itimer = 0; 882 dst->d_ino_timer = 0;
887 dst->d_rtbtimer = 0; 883 dst->d_rt_spc_timer = 0;
888 } 884 }
889 885
890#ifdef DEBUG 886#ifdef DEBUG
891 if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) || 887 if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
892 (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) || 888 (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
893 (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) && 889 (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
894 dst->d_id != 0) { 890 id != 0) {
895 if ((dst->d_bcount > dst->d_blk_softlimit) && 891 if ((dst->d_space > dst->d_spc_softlimit) &&
896 (dst->d_blk_softlimit > 0)) { 892 (dst->d_spc_softlimit > 0)) {
897 ASSERT(dst->d_btimer != 0); 893 ASSERT(dst->d_spc_timer != 0);
898 } 894 }
899 if ((dst->d_icount > dst->d_ino_softlimit) && 895 if ((dst->d_ino_count > dst->d_ino_softlimit) &&
900 (dst->d_ino_softlimit > 0)) { 896 (dst->d_ino_softlimit > 0)) {
901 ASSERT(dst->d_itimer != 0); 897 ASSERT(dst->d_ino_timer != 0);
902 } 898 }
903 } 899 }
904#endif 900#endif
@@ -908,26 +904,6 @@ out_put:
908} 904}
909 905
910STATIC uint 906STATIC uint
911xfs_qm_export_qtype_flags(
912 uint flags)
913{
914 /*
915 * Can't be more than one, or none.
916 */
917 ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
918 (FS_PROJ_QUOTA | FS_USER_QUOTA));
919 ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
920 (FS_PROJ_QUOTA | FS_GROUP_QUOTA));
921 ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
922 (FS_USER_QUOTA | FS_GROUP_QUOTA));
923 ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
924
925 return (flags & XFS_DQ_USER) ?
926 FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
927 FS_PROJ_QUOTA : FS_GROUP_QUOTA;
928}
929
930STATIC uint
931xfs_qm_export_flags( 907xfs_qm_export_flags(
932 uint flags) 908 uint flags)
933{ 909{
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index 7542bbeca6a1..801a84c1cdc3 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -131,7 +131,7 @@ STATIC int
131xfs_fs_get_dqblk( 131xfs_fs_get_dqblk(
132 struct super_block *sb, 132 struct super_block *sb,
133 struct kqid qid, 133 struct kqid qid,
134 struct fs_disk_quota *fdq) 134 struct qc_dqblk *qdq)
135{ 135{
136 struct xfs_mount *mp = XFS_M(sb); 136 struct xfs_mount *mp = XFS_M(sb);
137 137
@@ -141,14 +141,14 @@ xfs_fs_get_dqblk(
141 return -ESRCH; 141 return -ESRCH;
142 142
143 return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid), 143 return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid),
144 xfs_quota_type(qid.type), fdq); 144 xfs_quota_type(qid.type), qdq);
145} 145}
146 146
147STATIC int 147STATIC int
148xfs_fs_set_dqblk( 148xfs_fs_set_dqblk(
149 struct super_block *sb, 149 struct super_block *sb,
150 struct kqid qid, 150 struct kqid qid,
151 struct fs_disk_quota *fdq) 151 struct qc_dqblk *qdq)
152{ 152{
153 struct xfs_mount *mp = XFS_M(sb); 153 struct xfs_mount *mp = XFS_M(sb);
154 154
@@ -160,7 +160,7 @@ xfs_fs_set_dqblk(
160 return -ESRCH; 160 return -ESRCH;
161 161
162 return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), 162 return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid),
163 xfs_quota_type(qid.type), fdq); 163 xfs_quota_type(qid.type), qdq);
164} 164}
165 165
166const struct quotactl_ops xfs_quotactl_operations = { 166const struct quotactl_ops xfs_quotactl_operations = {
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index e3a1721c8354..7c7695940ddd 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -228,7 +228,9 @@ struct i2c_client {
228 struct device dev; /* the device structure */ 228 struct device dev; /* the device structure */
229 int irq; /* irq issued by device */ 229 int irq; /* irq issued by device */
230 struct list_head detected; 230 struct list_head detected;
231#if IS_ENABLED(CONFIG_I2C_SLAVE)
231 i2c_slave_cb_t slave_cb; /* callback for slave mode */ 232 i2c_slave_cb_t slave_cb; /* callback for slave mode */
233#endif
232}; 234};
233#define to_i2c_client(d) container_of(d, struct i2c_client, dev) 235#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
234 236
@@ -253,6 +255,7 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
253 255
254/* I2C slave support */ 256/* I2C slave support */
255 257
258#if IS_ENABLED(CONFIG_I2C_SLAVE)
256enum i2c_slave_event { 259enum i2c_slave_event {
257 I2C_SLAVE_REQ_READ_START, 260 I2C_SLAVE_REQ_READ_START,
258 I2C_SLAVE_REQ_READ_END, 261 I2C_SLAVE_REQ_READ_END,
@@ -269,6 +272,7 @@ static inline int i2c_slave_event(struct i2c_client *client,
269{ 272{
270 return client->slave_cb(client, event, val); 273 return client->slave_cb(client, event, val);
271} 274}
275#endif
272 276
273/** 277/**
274 * struct i2c_board_info - template for device creation 278 * struct i2c_board_info - template for device creation
@@ -404,8 +408,10 @@ struct i2c_algorithm {
404 /* To determine what the adapter supports */ 408 /* To determine what the adapter supports */
405 u32 (*functionality) (struct i2c_adapter *); 409 u32 (*functionality) (struct i2c_adapter *);
406 410
411#if IS_ENABLED(CONFIG_I2C_SLAVE)
407 int (*reg_slave)(struct i2c_client *client); 412 int (*reg_slave)(struct i2c_client *client);
408 int (*unreg_slave)(struct i2c_client *client); 413 int (*unreg_slave)(struct i2c_client *client);
414#endif
409}; 415};
410 416
411/** 417/**
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index bea465f24ebb..b11b28a30b9e 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -472,27 +472,59 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
472/** 472/**
473 * vlan_get_protocol - get protocol EtherType. 473 * vlan_get_protocol - get protocol EtherType.
474 * @skb: skbuff to query 474 * @skb: skbuff to query
475 * @type: first vlan protocol
476 * @depth: buffer to store length of eth and vlan tags in bytes
475 * 477 *
476 * Returns the EtherType of the packet, regardless of whether it is 478 * Returns the EtherType of the packet, regardless of whether it is
477 * vlan encapsulated (normal or hardware accelerated) or not. 479 * vlan encapsulated (normal or hardware accelerated) or not.
478 */ 480 */
479static inline __be16 vlan_get_protocol(const struct sk_buff *skb) 481static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
482 int *depth)
480{ 483{
481 __be16 protocol = 0; 484 unsigned int vlan_depth = skb->mac_len;
482 485
483 if (skb_vlan_tag_present(skb) || 486 /* if type is 802.1Q/AD then the header should already be
484 skb->protocol != cpu_to_be16(ETH_P_8021Q)) 487 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
485 protocol = skb->protocol; 488 * ETH_HLEN otherwise
486 else { 489 */
487 __be16 proto, *protop; 490 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
488 protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr, 491 if (vlan_depth) {
489 h_vlan_encapsulated_proto), 492 if (WARN_ON(vlan_depth < VLAN_HLEN))
490 sizeof(proto), &proto); 493 return 0;
491 if (likely(protop)) 494 vlan_depth -= VLAN_HLEN;
492 protocol = *protop; 495 } else {
496 vlan_depth = ETH_HLEN;
497 }
498 do {
499 struct vlan_hdr *vh;
500
501 if (unlikely(!pskb_may_pull(skb,
502 vlan_depth + VLAN_HLEN)))
503 return 0;
504
505 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
506 type = vh->h_vlan_encapsulated_proto;
507 vlan_depth += VLAN_HLEN;
508 } while (type == htons(ETH_P_8021Q) ||
509 type == htons(ETH_P_8021AD));
493 } 510 }
494 511
495 return protocol; 512 if (depth)
513 *depth = vlan_depth;
514
515 return type;
516}
517
518/**
519 * vlan_get_protocol - get protocol EtherType.
520 * @skb: skbuff to query
521 *
522 * Returns the EtherType of the packet, regardless of whether it is
523 * vlan encapsulated (normal or hardware accelerated) or not.
524 */
525static inline __be16 vlan_get_protocol(struct sk_buff *skb)
526{
527 return __vlan_get_protocol(skb, skb->protocol, NULL);
496} 528}
497 529
498static inline void vlan_set_encap_proto(struct sk_buff *skb, 530static inline void vlan_set_encap_proto(struct sk_buff *skb,
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5449d2f4a1ef..64ce58bee6f5 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -176,7 +176,7 @@ extern int _cond_resched(void);
176 */ 176 */
177# define might_sleep() \ 177# define might_sleep() \
178 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) 178 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
179# define sched_annotate_sleep() __set_current_state(TASK_RUNNING) 179# define sched_annotate_sleep() (current->task_state_change = 0)
180#else 180#else
181 static inline void ___might_sleep(const char *file, int line, 181 static inline void ___might_sleep(const char *file, int line,
182 int preempt_offset) { } 182 int preempt_offset) { }
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 977b0b164431..c116cb02475c 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -98,7 +98,7 @@ enum {
98 MLX4_MAX_NUM_PF = 16, 98 MLX4_MAX_NUM_PF = 16,
99 MLX4_MAX_NUM_VF = 126, 99 MLX4_MAX_NUM_VF = 126,
100 MLX4_MAX_NUM_VF_P_PORT = 64, 100 MLX4_MAX_NUM_VF_P_PORT = 64,
101 MLX4_MFUNC_MAX = 80, 101 MLX4_MFUNC_MAX = 128,
102 MLX4_MAX_EQ_NUM = 1024, 102 MLX4_MAX_EQ_NUM = 1024,
103 MLX4_MFUNC_EQ_NUM = 4, 103 MLX4_MFUNC_EQ_NUM = 4,
104 MLX4_MFUNC_MAX_EQES = 8, 104 MLX4_MFUNC_MAX_EQES = 8,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 80fc92a49649..dd5ea3016fc4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1070,6 +1070,7 @@ static inline int page_mapped(struct page *page)
1070#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ 1070#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
1071#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ 1071#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
1072#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ 1072#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
1073#define VM_FAULT_SIGSEGV 0x0040
1073 1074
1074#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ 1075#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
1075#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ 1076#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
@@ -1078,8 +1079,9 @@ static inline int page_mapped(struct page *page)
1078 1079
1079#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ 1080#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
1080 1081
1081#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ 1082#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
1082 VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE) 1083 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
1084 VM_FAULT_FALLBACK)
1083 1085
1084/* Encode hstate index for a hwpoisoned large page */ 1086/* Encode hstate index for a hwpoisoned large page */
1085#define VM_FAULT_SET_HINDEX(x) ((x) << 12) 1087#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 4f7a61ca4b39..664de5a4ec46 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -450,11 +450,6 @@ struct perf_event {
450#endif /* CONFIG_PERF_EVENTS */ 450#endif /* CONFIG_PERF_EVENTS */
451}; 451};
452 452
453enum perf_event_context_type {
454 task_context,
455 cpu_context,
456};
457
458/** 453/**
459 * struct perf_event_context - event context structure 454 * struct perf_event_context - event context structure
460 * 455 *
@@ -462,7 +457,6 @@ enum perf_event_context_type {
462 */ 457 */
463struct perf_event_context { 458struct perf_event_context {
464 struct pmu *pmu; 459 struct pmu *pmu;
465 enum perf_event_context_type type;
466 /* 460 /*
467 * Protect the states of the events in the list, 461 * Protect the states of the events in the list,
468 * nr_active, and the list: 462 * nr_active, and the list:
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 50978b781a19..097d7eb2441e 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -321,6 +321,49 @@ struct dquot_operations {
321 321
322struct path; 322struct path;
323 323
324/* Structure for communicating via ->get_dqblk() & ->set_dqblk() */
325struct qc_dqblk {
326 int d_fieldmask; /* mask of fields to change in ->set_dqblk() */
327 u64 d_spc_hardlimit; /* absolute limit on used space */
328 u64 d_spc_softlimit; /* preferred limit on used space */
329 u64 d_ino_hardlimit; /* maximum # allocated inodes */
330 u64 d_ino_softlimit; /* preferred inode limit */
331 u64 d_space; /* Space owned by the user */
332 u64 d_ino_count; /* # inodes owned by the user */
333 s64 d_ino_timer; /* zero if within inode limits */
334 /* if not, we refuse service */
335 s64 d_spc_timer; /* similar to above; for space */
336 int d_ino_warns; /* # warnings issued wrt num inodes */
337 int d_spc_warns; /* # warnings issued wrt used space */
338 u64 d_rt_spc_hardlimit; /* absolute limit on realtime space */
339 u64 d_rt_spc_softlimit; /* preferred limit on RT space */
340 u64 d_rt_space; /* realtime space owned */
341 s64 d_rt_spc_timer; /* similar to above; for RT space */
342 int d_rt_spc_warns; /* # warnings issued wrt RT space */
343};
344
345/* Field specifiers for ->set_dqblk() in struct qc_dqblk */
346#define QC_INO_SOFT (1<<0)
347#define QC_INO_HARD (1<<1)
348#define QC_SPC_SOFT (1<<2)
349#define QC_SPC_HARD (1<<3)
350#define QC_RT_SPC_SOFT (1<<4)
351#define QC_RT_SPC_HARD (1<<5)
352#define QC_LIMIT_MASK (QC_INO_SOFT | QC_INO_HARD | QC_SPC_SOFT | QC_SPC_HARD | \
353 QC_RT_SPC_SOFT | QC_RT_SPC_HARD)
354#define QC_SPC_TIMER (1<<6)
355#define QC_INO_TIMER (1<<7)
356#define QC_RT_SPC_TIMER (1<<8)
357#define QC_TIMER_MASK (QC_SPC_TIMER | QC_INO_TIMER | QC_RT_SPC_TIMER)
358#define QC_SPC_WARNS (1<<9)
359#define QC_INO_WARNS (1<<10)
360#define QC_RT_SPC_WARNS (1<<11)
361#define QC_WARNS_MASK (QC_SPC_WARNS | QC_INO_WARNS | QC_RT_SPC_WARNS)
362#define QC_SPACE (1<<12)
363#define QC_INO_COUNT (1<<13)
364#define QC_RT_SPACE (1<<14)
365#define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE)
366
324/* Operations handling requests from userspace */ 367/* Operations handling requests from userspace */
325struct quotactl_ops { 368struct quotactl_ops {
326 int (*quota_on)(struct super_block *, int, int, struct path *); 369 int (*quota_on)(struct super_block *, int, int, struct path *);
@@ -329,8 +372,8 @@ struct quotactl_ops {
329 int (*quota_sync)(struct super_block *, int); 372 int (*quota_sync)(struct super_block *, int);
330 int (*get_info)(struct super_block *, int, struct if_dqinfo *); 373 int (*get_info)(struct super_block *, int, struct if_dqinfo *);
331 int (*set_info)(struct super_block *, int, struct if_dqinfo *); 374 int (*set_info)(struct super_block *, int, struct if_dqinfo *);
332 int (*get_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *); 375 int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
333 int (*set_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *); 376 int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
334 int (*get_xstate)(struct super_block *, struct fs_quota_stat *); 377 int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
335 int (*set_xstate)(struct super_block *, unsigned int, int); 378 int (*set_xstate)(struct super_block *, unsigned int, int);
336 int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); 379 int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index f23538a6e411..29e3455f7d41 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -98,9 +98,9 @@ int dquot_quota_sync(struct super_block *sb, int type);
98int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 98int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
99int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 99int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
100int dquot_get_dqblk(struct super_block *sb, struct kqid id, 100int dquot_get_dqblk(struct super_block *sb, struct kqid id,
101 struct fs_disk_quota *di); 101 struct qc_dqblk *di);
102int dquot_set_dqblk(struct super_block *sb, struct kqid id, 102int dquot_set_dqblk(struct super_block *sb, struct kqid id,
103 struct fs_disk_quota *di); 103 struct qc_dqblk *di);
104 104
105int __dquot_transfer(struct inode *inode, struct dquot **transfer_to); 105int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
106int dquot_transfer(struct inode *inode, struct iattr *iattr); 106int dquot_transfer(struct inode *inode, struct iattr *iattr);
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
index 7ee2df083542..dc8fd81412bf 100644
--- a/include/net/flow_keys.h
+++ b/include/net/flow_keys.h
@@ -22,9 +22,9 @@ struct flow_keys {
22 __be32 ports; 22 __be32 ports;
23 __be16 port16[2]; 23 __be16 port16[2];
24 }; 24 };
25 u16 thoff; 25 u16 thoff;
26 u16 n_proto; 26 __be16 n_proto;
27 u8 ip_proto; 27 u8 ip_proto;
28}; 28};
29 29
30bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow, 30bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
diff --git a/include/net/ip.h b/include/net/ip.h
index 14211eaff17f..025c61c0dffb 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -181,7 +181,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
181 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; 181 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
182} 182}
183 183
184void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, 184void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
185 const struct ip_options *sopt, 185 const struct ip_options *sopt,
186 __be32 daddr, __be32 saddr, 186 __be32 daddr, __be32 saddr,
187 const struct ip_reply_arg *arg, 187 const struct ip_reply_arg *arg,
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 8027ca53e31f..8ae7c9edbd3c 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -671,6 +671,9 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
671 return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); 671 return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
672} 672}
673 673
674u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst,
675 struct in6_addr *src);
676void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
674void ipv6_proxy_select_ident(struct sk_buff *skb); 677void ipv6_proxy_select_ident(struct sk_buff *skb);
675 678
676int ip6_dst_hoplimit(struct dst_entry *dst); 679int ip6_dst_hoplimit(struct dst_entry *dst);
@@ -708,7 +711,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
708 __be32 flowlabel, bool autolabel) 711 __be32 flowlabel, bool autolabel)
709{ 712{
710 if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) { 713 if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) {
711 __be32 hash; 714 u32 hash;
712 715
713 hash = skb_get_hash(skb); 716 hash = skb_get_hash(skb);
714 717
@@ -718,7 +721,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
718 */ 721 */
719 hash ^= hash >> 12; 722 hash ^= hash >> 12;
720 723
721 flowlabel = hash & IPV6_FLOWLABEL_MASK; 724 flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
722 } 725 }
723 726
724 return flowlabel; 727 return flowlabel;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 3ae969e3acf0..9eaaa7884586 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -530,6 +530,8 @@ enum nft_chain_type {
530 530
531int nft_chain_validate_dependency(const struct nft_chain *chain, 531int nft_chain_validate_dependency(const struct nft_chain *chain,
532 enum nft_chain_type type); 532 enum nft_chain_type type);
533int nft_chain_validate_hooks(const struct nft_chain *chain,
534 unsigned int hook_flags);
533 535
534struct nft_stats { 536struct nft_stats {
535 u64 bytes; 537 u64 bytes;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 7283f4d39ae2..e0bdcb147326 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -53,6 +53,7 @@ struct netns_ipv4 {
53 struct inet_peer_base *peers; 53 struct inet_peer_base *peers;
54 struct tcpm_hash_bucket *tcp_metrics_hash; 54 struct tcpm_hash_bucket *tcp_metrics_hash;
55 unsigned int tcp_metrics_hash_log; 55 unsigned int tcp_metrics_hash_log;
56 struct sock * __percpu *tcp_sk;
56 struct netns_frags frags; 57 struct netns_frags frags;
57#ifdef CONFIG_NETFILTER 58#ifdef CONFIG_NETFILTER
58 struct xt_table *iptable_filter; 59 struct xt_table *iptable_filter;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 3d282cbb66bf..c605d305c577 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -79,6 +79,9 @@ struct Qdisc {
79 struct netdev_queue *dev_queue; 79 struct netdev_queue *dev_queue;
80 80
81 struct gnet_stats_rate_est64 rate_est; 81 struct gnet_stats_rate_est64 rate_est;
82 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
83 struct gnet_stats_queue __percpu *cpu_qstats;
84
82 struct Qdisc *next_sched; 85 struct Qdisc *next_sched;
83 struct sk_buff *gso_skb; 86 struct sk_buff *gso_skb;
84 /* 87 /*
@@ -86,15 +89,9 @@ struct Qdisc {
86 */ 89 */
87 unsigned long state; 90 unsigned long state;
88 struct sk_buff_head q; 91 struct sk_buff_head q;
89 union { 92 struct gnet_stats_basic_packed bstats;
90 struct gnet_stats_basic_packed bstats;
91 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
92 } __packed;
93 unsigned int __state; 93 unsigned int __state;
94 union { 94 struct gnet_stats_queue qstats;
95 struct gnet_stats_queue qstats;
96 struct gnet_stats_queue __percpu *cpu_qstats;
97 } __packed;
98 struct rcu_head rcu_head; 95 struct rcu_head rcu_head;
99 int padded; 96 int padded;
100 atomic_t refcnt; 97 atomic_t refcnt;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 637ee490ec81..28e9bd3abceb 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -843,8 +843,8 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
843void tcp_get_allowed_congestion_control(char *buf, size_t len); 843void tcp_get_allowed_congestion_control(char *buf, size_t len);
844int tcp_set_allowed_congestion_control(char *allowed); 844int tcp_set_allowed_congestion_control(char *allowed);
845int tcp_set_congestion_control(struct sock *sk, const char *name); 845int tcp_set_congestion_control(struct sock *sk, const char *name);
846void tcp_slow_start(struct tcp_sock *tp, u32 acked); 846u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
847void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w); 847void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
848 848
849u32 tcp_reno_ssthresh(struct sock *sk); 849u32 tcp_reno_ssthresh(struct sock *sk);
850void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); 850void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 882f835a0d85..19efcf13375a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6776,7 +6776,6 @@ skip_type:
6776 __perf_event_init_context(&cpuctx->ctx); 6776 __perf_event_init_context(&cpuctx->ctx);
6777 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); 6777 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
6778 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); 6778 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
6779 cpuctx->ctx.type = cpu_context;
6780 cpuctx->ctx.pmu = pmu; 6779 cpuctx->ctx.pmu = pmu;
6781 6780
6782 __perf_cpu_hrtimer_init(cpuctx, cpu); 6781 __perf_cpu_hrtimer_init(cpuctx, cpu);
@@ -7420,7 +7419,19 @@ SYSCALL_DEFINE5(perf_event_open,
7420 * task or CPU context: 7419 * task or CPU context:
7421 */ 7420 */
7422 if (move_group) { 7421 if (move_group) {
7423 if (group_leader->ctx->type != ctx->type) 7422 /*
7423 * Make sure we're both on the same task, or both
7424 * per-cpu events.
7425 */
7426 if (group_leader->ctx->task != ctx->task)
7427 goto err_context;
7428
7429 /*
7430 * Make sure we're both events for the same CPU;
7431 * grouping events for different CPUs is broken; since
7432 * you can never concurrently schedule them anyhow.
7433 */
7434 if (group_leader->cpu != event->cpu)
7424 goto err_context; 7435 goto err_context;
7425 } else { 7436 } else {
7426 if (group_leader->ctx != ctx) 7437 if (group_leader->ctx != ctx)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c0accc00566e..e628cb11b560 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7292,13 +7292,12 @@ void __might_sleep(const char *file, int line, int preempt_offset)
7292 * since we will exit with TASK_RUNNING make sure we enter with it, 7292 * since we will exit with TASK_RUNNING make sure we enter with it,
7293 * otherwise we will destroy state. 7293 * otherwise we will destroy state.
7294 */ 7294 */
7295 if (WARN_ONCE(current->state != TASK_RUNNING, 7295 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
7296 "do not call blocking ops when !TASK_RUNNING; " 7296 "do not call blocking ops when !TASK_RUNNING; "
7297 "state=%lx set at [<%p>] %pS\n", 7297 "state=%lx set at [<%p>] %pS\n",
7298 current->state, 7298 current->state,
7299 (void *)current->task_state_change, 7299 (void *)current->task_state_change,
7300 (void *)current->task_state_change)) 7300 (void *)current->task_state_change);
7301 __set_current_state(TASK_RUNNING);
7302 7301
7303 ___might_sleep(file, line, preempt_offset); 7302 ___might_sleep(file, line, preempt_offset);
7304} 7303}
diff --git a/lib/checksum.c b/lib/checksum.c
index 129775eb6de6..8b39e86dbab5 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -181,6 +181,15 @@ csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
181EXPORT_SYMBOL(csum_partial_copy); 181EXPORT_SYMBOL(csum_partial_copy);
182 182
183#ifndef csum_tcpudp_nofold 183#ifndef csum_tcpudp_nofold
184static inline u32 from64to32(u64 x)
185{
186 /* add up 32-bit and 32-bit for 32+c bit */
187 x = (x & 0xffffffff) + (x >> 32);
188 /* add up carry.. */
189 x = (x & 0xffffffff) + (x >> 32);
190 return (u32)x;
191}
192
184__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 193__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
185 unsigned short len, 194 unsigned short len,
186 unsigned short proto, 195 unsigned short proto,
@@ -195,8 +204,7 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
195#else 204#else
196 s += (proto + len) << 8; 205 s += (proto + len) << 8;
197#endif 206#endif
198 s += (s >> 32); 207 return (__force __wsum)from64to32(s);
199 return (__force __wsum)s;
200} 208}
201EXPORT_SYMBOL(csum_tcpudp_nofold); 209EXPORT_SYMBOL(csum_tcpudp_nofold);
202#endif 210#endif
diff --git a/mm/gup.c b/mm/gup.c
index a900759cc807..8dd50ce6326f 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -296,7 +296,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
296 return -ENOMEM; 296 return -ENOMEM;
297 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 297 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
298 return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; 298 return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
299 if (ret & VM_FAULT_SIGBUS) 299 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
300 return -EFAULT; 300 return -EFAULT;
301 BUG(); 301 BUG();
302 } 302 }
@@ -571,7 +571,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
571 return -ENOMEM; 571 return -ENOMEM;
572 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 572 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
573 return -EHWPOISON; 573 return -EHWPOISON;
574 if (ret & VM_FAULT_SIGBUS) 574 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
575 return -EFAULT; 575 return -EFAULT;
576 BUG(); 576 BUG();
577 } 577 }
diff --git a/mm/ksm.c b/mm/ksm.c
index d247efab5073..15647fb0394f 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
376 else 376 else
377 ret = VM_FAULT_WRITE; 377 ret = VM_FAULT_WRITE;
378 put_page(page); 378 put_page(page);
379 } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); 379 } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
380 /* 380 /*
381 * We must loop because handle_mm_fault() may back out if there's 381 * We must loop because handle_mm_fault() may back out if there's
382 * any difficulty e.g. if pte accessed bit gets updated concurrently. 382 * any difficulty e.g. if pte accessed bit gets updated concurrently.
diff --git a/mm/memory.c b/mm/memory.c
index 54f3a9b00956..2c3536cc6c63 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2632,7 +2632,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2632 2632
2633 /* Check if we need to add a guard page to the stack */ 2633 /* Check if we need to add a guard page to the stack */
2634 if (check_stack_guard_page(vma, address) < 0) 2634 if (check_stack_guard_page(vma, address) < 0)
2635 return VM_FAULT_SIGBUS; 2635 return VM_FAULT_SIGSEGV;
2636 2636
2637 /* Use the zero-page for reads */ 2637 /* Use the zero-page for reads */
2638 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) { 2638 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index b0330aecbf97..3244aead0926 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -265,22 +265,12 @@ out:
265 data[NFT_REG_VERDICT].verdict = NF_DROP; 265 data[NFT_REG_VERDICT].verdict = NF_DROP;
266} 266}
267 267
268static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain) 268static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
269 const struct nft_expr *expr,
270 const struct nft_data **data)
269{ 271{
270 struct nft_base_chain *basechain; 272 return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
271 273 (1 << NF_BR_LOCAL_IN));
272 if (chain->flags & NFT_BASE_CHAIN) {
273 basechain = nft_base_chain(chain);
274
275 switch (basechain->ops[0].hooknum) {
276 case NF_BR_PRE_ROUTING:
277 case NF_BR_LOCAL_IN:
278 break;
279 default:
280 return -EOPNOTSUPP;
281 }
282 }
283 return 0;
284} 274}
285 275
286static int nft_reject_bridge_init(const struct nft_ctx *ctx, 276static int nft_reject_bridge_init(const struct nft_ctx *ctx,
@@ -290,7 +280,7 @@ static int nft_reject_bridge_init(const struct nft_ctx *ctx,
290 struct nft_reject *priv = nft_expr_priv(expr); 280 struct nft_reject *priv = nft_expr_priv(expr);
291 int icmp_code, err; 281 int icmp_code, err;
292 282
293 err = nft_reject_bridge_validate_hooks(ctx->chain); 283 err = nft_reject_bridge_validate(ctx, expr, NULL);
294 if (err < 0) 284 if (err < 0)
295 return err; 285 return err;
296 286
@@ -341,13 +331,6 @@ nla_put_failure:
341 return -1; 331 return -1;
342} 332}
343 333
344static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
345 const struct nft_expr *expr,
346 const struct nft_data **data)
347{
348 return nft_reject_bridge_validate_hooks(ctx->chain);
349}
350
351static struct nft_expr_type nft_reject_bridge_type; 334static struct nft_expr_type nft_reject_bridge_type;
352static const struct nft_expr_ops nft_reject_bridge_ops = { 335static const struct nft_expr_ops nft_reject_bridge_ops = {
353 .type = &nft_reject_bridge_type, 336 .type = &nft_reject_bridge_type,
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 4589ff67bfa9..67a4a36febd1 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -470,7 +470,6 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
470 ASSERT_RTNL(); 470 ASSERT_RTNL();
471 caifdev = netdev_priv(dev); 471 caifdev = netdev_priv(dev);
472 caif_netlink_parms(data, &caifdev->conn_req); 472 caif_netlink_parms(data, &caifdev->conn_req);
473 dev_net_set(caifdev->netdev, src_net);
474 473
475 ret = register_netdevice(dev); 474 ret = register_netdevice(dev);
476 if (ret) 475 if (ret)
diff --git a/net/core/dev.c b/net/core/dev.c
index ede0b161b115..a3a96ffc67f4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2379,7 +2379,6 @@ EXPORT_SYMBOL(skb_checksum_help);
2379 2379
2380__be16 skb_network_protocol(struct sk_buff *skb, int *depth) 2380__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2381{ 2381{
2382 unsigned int vlan_depth = skb->mac_len;
2383 __be16 type = skb->protocol; 2382 __be16 type = skb->protocol;
2384 2383
2385 /* Tunnel gso handlers can set protocol to ethernet. */ 2384 /* Tunnel gso handlers can set protocol to ethernet. */
@@ -2393,35 +2392,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2393 type = eth->h_proto; 2392 type = eth->h_proto;
2394 } 2393 }
2395 2394
2396 /* if skb->protocol is 802.1Q/AD then the header should already be 2395 return __vlan_get_protocol(skb, type, depth);
2397 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
2398 * ETH_HLEN otherwise
2399 */
2400 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
2401 if (vlan_depth) {
2402 if (WARN_ON(vlan_depth < VLAN_HLEN))
2403 return 0;
2404 vlan_depth -= VLAN_HLEN;
2405 } else {
2406 vlan_depth = ETH_HLEN;
2407 }
2408 do {
2409 struct vlan_hdr *vh;
2410
2411 if (unlikely(!pskb_may_pull(skb,
2412 vlan_depth + VLAN_HLEN)))
2413 return 0;
2414
2415 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2416 type = vh->h_vlan_encapsulated_proto;
2417 vlan_depth += VLAN_HLEN;
2418 } while (type == htons(ETH_P_8021Q) ||
2419 type == htons(ETH_P_8021AD));
2420 }
2421
2422 *depth = vlan_depth;
2423
2424 return type;
2425} 2396}
2426 2397
2427/** 2398/**
@@ -5375,7 +5346,7 @@ void netdev_bonding_info_change(struct net_device *dev,
5375} 5346}
5376EXPORT_SYMBOL(netdev_bonding_info_change); 5347EXPORT_SYMBOL(netdev_bonding_info_change);
5377 5348
5378void netdev_adjacent_add_links(struct net_device *dev) 5349static void netdev_adjacent_add_links(struct net_device *dev)
5379{ 5350{
5380 struct netdev_adjacent *iter; 5351 struct netdev_adjacent *iter;
5381 5352
@@ -5400,7 +5371,7 @@ void netdev_adjacent_add_links(struct net_device *dev)
5400 } 5371 }
5401} 5372}
5402 5373
5403void netdev_adjacent_del_links(struct net_device *dev) 5374static void netdev_adjacent_del_links(struct net_device *dev)
5404{ 5375{
5405 struct netdev_adjacent *iter; 5376 struct netdev_adjacent *iter;
5406 5377
@@ -6713,7 +6684,7 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
6713 if (!queue) 6684 if (!queue)
6714 return NULL; 6685 return NULL;
6715 netdev_init_one_queue(dev, queue, NULL); 6686 netdev_init_one_queue(dev, queue, NULL);
6716 queue->qdisc = &noop_qdisc; 6687 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
6717 queue->qdisc_sleeping = &noop_qdisc; 6688 queue->qdisc_sleeping = &noop_qdisc;
6718 rcu_assign_pointer(dev->ingress_queue, queue); 6689 rcu_assign_pointer(dev->ingress_queue, queue);
6719#endif 6690#endif
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 4cd5e350d129..5dad4f782f03 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2937,12 +2937,16 @@ static int rtnl_bridge_notify(struct net_device *dev)
2937 if (err < 0) 2937 if (err < 0)
2938 goto errout; 2938 goto errout;
2939 2939
2940 if (!skb->len)
2941 goto errout;
2942
2940 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 2943 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
2941 return 0; 2944 return 0;
2942errout: 2945errout:
2943 WARN_ON(err == -EMSGSIZE); 2946 WARN_ON(err == -EMSGSIZE);
2944 kfree_skb(skb); 2947 kfree_skb(skb);
2945 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 2948 if (err)
2949 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
2946 return err; 2950 return err;
2947} 2951}
2948 2952
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index f998bc87ae38..d68199d9b2b0 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1504,23 +1504,8 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1504/* 1504/*
1505 * Generic function to send a packet as reply to another packet. 1505 * Generic function to send a packet as reply to another packet.
1506 * Used to send some TCP resets/acks so far. 1506 * Used to send some TCP resets/acks so far.
1507 *
1508 * Use a fake percpu inet socket to avoid false sharing and contention.
1509 */ 1507 */
1510static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = { 1508void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1511 .sk = {
1512 .__sk_common = {
1513 .skc_refcnt = ATOMIC_INIT(1),
1514 },
1515 .sk_wmem_alloc = ATOMIC_INIT(1),
1516 .sk_allocation = GFP_ATOMIC,
1517 .sk_flags = (1UL << SOCK_USE_WRITE_QUEUE),
1518 },
1519 .pmtudisc = IP_PMTUDISC_WANT,
1520 .uc_ttl = -1,
1521};
1522
1523void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
1524 const struct ip_options *sopt, 1509 const struct ip_options *sopt,
1525 __be32 daddr, __be32 saddr, 1510 __be32 daddr, __be32 saddr,
1526 const struct ip_reply_arg *arg, 1511 const struct ip_reply_arg *arg,
@@ -1530,9 +1515,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
1530 struct ipcm_cookie ipc; 1515 struct ipcm_cookie ipc;
1531 struct flowi4 fl4; 1516 struct flowi4 fl4;
1532 struct rtable *rt = skb_rtable(skb); 1517 struct rtable *rt = skb_rtable(skb);
1518 struct net *net = sock_net(sk);
1533 struct sk_buff *nskb; 1519 struct sk_buff *nskb;
1534 struct sock *sk;
1535 struct inet_sock *inet;
1536 int err; 1520 int err;
1537 1521
1538 if (__ip_options_echo(&replyopts.opt.opt, skb, sopt)) 1522 if (__ip_options_echo(&replyopts.opt.opt, skb, sopt))
@@ -1563,15 +1547,11 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
1563 if (IS_ERR(rt)) 1547 if (IS_ERR(rt))
1564 return; 1548 return;
1565 1549
1566 inet = &get_cpu_var(unicast_sock); 1550 inet_sk(sk)->tos = arg->tos;
1567 1551
1568 inet->tos = arg->tos;
1569 sk = &inet->sk;
1570 sk->sk_priority = skb->priority; 1552 sk->sk_priority = skb->priority;
1571 sk->sk_protocol = ip_hdr(skb)->protocol; 1553 sk->sk_protocol = ip_hdr(skb)->protocol;
1572 sk->sk_bound_dev_if = arg->bound_dev_if; 1554 sk->sk_bound_dev_if = arg->bound_dev_if;
1573 sock_net_set(sk, net);
1574 __skb_queue_head_init(&sk->sk_write_queue);
1575 sk->sk_sndbuf = sysctl_wmem_default; 1555 sk->sk_sndbuf = sysctl_wmem_default;
1576 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, 1556 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1577 len, 0, &ipc, &rt, MSG_DONTWAIT); 1557 len, 0, &ipc, &rt, MSG_DONTWAIT);
@@ -1587,13 +1567,10 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
1587 arg->csumoffset) = csum_fold(csum_add(nskb->csum, 1567 arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1588 arg->csum)); 1568 arg->csum));
1589 nskb->ip_summed = CHECKSUM_NONE; 1569 nskb->ip_summed = CHECKSUM_NONE;
1590 skb_orphan(nskb);
1591 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); 1570 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
1592 ip_push_pending_frames(sk, &fl4); 1571 ip_push_pending_frames(sk, &fl4);
1593 } 1572 }
1594out: 1573out:
1595 put_cpu_var(unicast_sock);
1596
1597 ip_rt_put(rt); 1574 ip_rt_put(rt);
1598} 1575}
1599 1576
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 0c63b2abd873..ad5064362c5c 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -966,6 +966,9 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
966 if (dst->dev->mtu < mtu) 966 if (dst->dev->mtu < mtu)
967 return; 967 return;
968 968
969 if (rt->rt_pmtu && rt->rt_pmtu < mtu)
970 return;
971
969 if (mtu < ip_rt_min_pmtu) 972 if (mtu < ip_rt_min_pmtu)
970 mtu = ip_rt_min_pmtu; 973 mtu = ip_rt_min_pmtu;
971 974
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index bb395d46a389..c037644eafb7 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -150,7 +150,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
150 tcp_slow_start(tp, acked); 150 tcp_slow_start(tp, acked);
151 else { 151 else {
152 bictcp_update(ca, tp->snd_cwnd); 152 bictcp_update(ca, tp->snd_cwnd);
153 tcp_cong_avoid_ai(tp, ca->cnt); 153 tcp_cong_avoid_ai(tp, ca->cnt, 1);
154 } 154 }
155} 155}
156 156
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 63c29dba68a8..d694088214cd 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -360,26 +360,32 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
360 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and 360 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
361 * returns the leftover acks to adjust cwnd in congestion avoidance mode. 361 * returns the leftover acks to adjust cwnd in congestion avoidance mode.
362 */ 362 */
363void tcp_slow_start(struct tcp_sock *tp, u32 acked) 363u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
364{ 364{
365 u32 cwnd = tp->snd_cwnd + acked; 365 u32 cwnd = tp->snd_cwnd + acked;
366 366
367 if (cwnd > tp->snd_ssthresh) 367 if (cwnd > tp->snd_ssthresh)
368 cwnd = tp->snd_ssthresh + 1; 368 cwnd = tp->snd_ssthresh + 1;
369 acked -= cwnd - tp->snd_cwnd;
369 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); 370 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
371
372 return acked;
370} 373}
371EXPORT_SYMBOL_GPL(tcp_slow_start); 374EXPORT_SYMBOL_GPL(tcp_slow_start);
372 375
373/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */ 376/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
374void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w) 377 * for every packet that was ACKed.
378 */
379void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
375{ 380{
381 tp->snd_cwnd_cnt += acked;
376 if (tp->snd_cwnd_cnt >= w) { 382 if (tp->snd_cwnd_cnt >= w) {
377 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 383 u32 delta = tp->snd_cwnd_cnt / w;
378 tp->snd_cwnd++; 384
379 tp->snd_cwnd_cnt = 0; 385 tp->snd_cwnd_cnt -= delta * w;
380 } else { 386 tp->snd_cwnd += delta;
381 tp->snd_cwnd_cnt++;
382 } 387 }
388 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
383} 389}
384EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai); 390EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
385 391
@@ -398,11 +404,13 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
398 return; 404 return;
399 405
400 /* In "safe" area, increase. */ 406 /* In "safe" area, increase. */
401 if (tp->snd_cwnd <= tp->snd_ssthresh) 407 if (tp->snd_cwnd <= tp->snd_ssthresh) {
402 tcp_slow_start(tp, acked); 408 acked = tcp_slow_start(tp, acked);
409 if (!acked)
410 return;
411 }
403 /* In dangerous area, increase slowly. */ 412 /* In dangerous area, increase slowly. */
404 else 413 tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
405 tcp_cong_avoid_ai(tp, tp->snd_cwnd);
406} 414}
407EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); 415EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
408 416
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 6b6002416a73..4b276d1ed980 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -93,9 +93,7 @@ struct bictcp {
93 u32 epoch_start; /* beginning of an epoch */ 93 u32 epoch_start; /* beginning of an epoch */
94 u32 ack_cnt; /* number of acks */ 94 u32 ack_cnt; /* number of acks */
95 u32 tcp_cwnd; /* estimated tcp cwnd */ 95 u32 tcp_cwnd; /* estimated tcp cwnd */
96#define ACK_RATIO_SHIFT 4 96 u16 unused;
97#define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT)
98 u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */
99 u8 sample_cnt; /* number of samples to decide curr_rtt */ 97 u8 sample_cnt; /* number of samples to decide curr_rtt */
100 u8 found; /* the exit point is found? */ 98 u8 found; /* the exit point is found? */
101 u32 round_start; /* beginning of each round */ 99 u32 round_start; /* beginning of each round */
@@ -114,7 +112,6 @@ static inline void bictcp_reset(struct bictcp *ca)
114 ca->bic_K = 0; 112 ca->bic_K = 0;
115 ca->delay_min = 0; 113 ca->delay_min = 0;
116 ca->epoch_start = 0; 114 ca->epoch_start = 0;
117 ca->delayed_ack = 2 << ACK_RATIO_SHIFT;
118 ca->ack_cnt = 0; 115 ca->ack_cnt = 0;
119 ca->tcp_cwnd = 0; 116 ca->tcp_cwnd = 0;
120 ca->found = 0; 117 ca->found = 0;
@@ -205,23 +202,30 @@ static u32 cubic_root(u64 a)
205/* 202/*
206 * Compute congestion window to use. 203 * Compute congestion window to use.
207 */ 204 */
208static inline void bictcp_update(struct bictcp *ca, u32 cwnd) 205static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked)
209{ 206{
210 u32 delta, bic_target, max_cnt; 207 u32 delta, bic_target, max_cnt;
211 u64 offs, t; 208 u64 offs, t;
212 209
213 ca->ack_cnt++; /* count the number of ACKs */ 210 ca->ack_cnt += acked; /* count the number of ACKed packets */
214 211
215 if (ca->last_cwnd == cwnd && 212 if (ca->last_cwnd == cwnd &&
216 (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32) 213 (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32)
217 return; 214 return;
218 215
216 /* The CUBIC function can update ca->cnt at most once per jiffy.
217 * On all cwnd reduction events, ca->epoch_start is set to 0,
218 * which will force a recalculation of ca->cnt.
219 */
220 if (ca->epoch_start && tcp_time_stamp == ca->last_time)
221 goto tcp_friendliness;
222
219 ca->last_cwnd = cwnd; 223 ca->last_cwnd = cwnd;
220 ca->last_time = tcp_time_stamp; 224 ca->last_time = tcp_time_stamp;
221 225
222 if (ca->epoch_start == 0) { 226 if (ca->epoch_start == 0) {
223 ca->epoch_start = tcp_time_stamp; /* record beginning */ 227 ca->epoch_start = tcp_time_stamp; /* record beginning */
224 ca->ack_cnt = 1; /* start counting */ 228 ca->ack_cnt = acked; /* start counting */
225 ca->tcp_cwnd = cwnd; /* syn with cubic */ 229 ca->tcp_cwnd = cwnd; /* syn with cubic */
226 230
227 if (ca->last_max_cwnd <= cwnd) { 231 if (ca->last_max_cwnd <= cwnd) {
@@ -283,6 +287,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
283 if (ca->last_max_cwnd == 0 && ca->cnt > 20) 287 if (ca->last_max_cwnd == 0 && ca->cnt > 20)
284 ca->cnt = 20; /* increase cwnd 5% per RTT */ 288 ca->cnt = 20; /* increase cwnd 5% per RTT */
285 289
290tcp_friendliness:
286 /* TCP Friendly */ 291 /* TCP Friendly */
287 if (tcp_friendliness) { 292 if (tcp_friendliness) {
288 u32 scale = beta_scale; 293 u32 scale = beta_scale;
@@ -301,7 +306,6 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
301 } 306 }
302 } 307 }
303 308
304 ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack;
305 if (ca->cnt == 0) /* cannot be zero */ 309 if (ca->cnt == 0) /* cannot be zero */
306 ca->cnt = 1; 310 ca->cnt = 1;
307} 311}
@@ -317,11 +321,12 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
317 if (tp->snd_cwnd <= tp->snd_ssthresh) { 321 if (tp->snd_cwnd <= tp->snd_ssthresh) {
318 if (hystart && after(ack, ca->end_seq)) 322 if (hystart && after(ack, ca->end_seq))
319 bictcp_hystart_reset(sk); 323 bictcp_hystart_reset(sk);
320 tcp_slow_start(tp, acked); 324 acked = tcp_slow_start(tp, acked);
321 } else { 325 if (!acked)
322 bictcp_update(ca, tp->snd_cwnd); 326 return;
323 tcp_cong_avoid_ai(tp, ca->cnt);
324 } 327 }
328 bictcp_update(ca, tp->snd_cwnd, acked);
329 tcp_cong_avoid_ai(tp, ca->cnt, acked);
325} 330}
326 331
327static u32 bictcp_recalc_ssthresh(struct sock *sk) 332static u32 bictcp_recalc_ssthresh(struct sock *sk)
@@ -411,20 +416,10 @@ static void hystart_update(struct sock *sk, u32 delay)
411 */ 416 */
412static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) 417static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
413{ 418{
414 const struct inet_connection_sock *icsk = inet_csk(sk);
415 const struct tcp_sock *tp = tcp_sk(sk); 419 const struct tcp_sock *tp = tcp_sk(sk);
416 struct bictcp *ca = inet_csk_ca(sk); 420 struct bictcp *ca = inet_csk_ca(sk);
417 u32 delay; 421 u32 delay;
418 422
419 if (icsk->icsk_ca_state == TCP_CA_Open) {
420 u32 ratio = ca->delayed_ack;
421
422 ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
423 ratio += cnt;
424
425 ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT);
426 }
427
428 /* Some calls are for duplicates without timetamps */ 423 /* Some calls are for duplicates without timetamps */
429 if (rtt_us < 0) 424 if (rtt_us < 0)
430 return; 425 return;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ad3e65bdd368..67bc95fb5d9e 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -683,7 +683,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
683 arg.bound_dev_if = sk->sk_bound_dev_if; 683 arg.bound_dev_if = sk->sk_bound_dev_if;
684 684
685 arg.tos = ip_hdr(skb)->tos; 685 arg.tos = ip_hdr(skb)->tos;
686 ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt, 686 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
687 skb, &TCP_SKB_CB(skb)->header.h4.opt,
687 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 688 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
688 &arg, arg.iov[0].iov_len); 689 &arg, arg.iov[0].iov_len);
689 690
@@ -767,7 +768,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
767 if (oif) 768 if (oif)
768 arg.bound_dev_if = oif; 769 arg.bound_dev_if = oif;
769 arg.tos = tos; 770 arg.tos = tos;
770 ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt, 771 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
772 skb, &TCP_SKB_CB(skb)->header.h4.opt,
771 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 773 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
772 &arg, arg.iov[0].iov_len); 774 &arg, arg.iov[0].iov_len);
773 775
@@ -2430,14 +2432,39 @@ struct proto tcp_prot = {
2430}; 2432};
2431EXPORT_SYMBOL(tcp_prot); 2433EXPORT_SYMBOL(tcp_prot);
2432 2434
2435static void __net_exit tcp_sk_exit(struct net *net)
2436{
2437 int cpu;
2438
2439 for_each_possible_cpu(cpu)
2440 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2441 free_percpu(net->ipv4.tcp_sk);
2442}
2443
2433static int __net_init tcp_sk_init(struct net *net) 2444static int __net_init tcp_sk_init(struct net *net)
2434{ 2445{
2446 int res, cpu;
2447
2448 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2449 if (!net->ipv4.tcp_sk)
2450 return -ENOMEM;
2451
2452 for_each_possible_cpu(cpu) {
2453 struct sock *sk;
2454
2455 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2456 IPPROTO_TCP, net);
2457 if (res)
2458 goto fail;
2459 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2460 }
2435 net->ipv4.sysctl_tcp_ecn = 2; 2461 net->ipv4.sysctl_tcp_ecn = 2;
2436 return 0; 2462 return 0;
2437}
2438 2463
2439static void __net_exit tcp_sk_exit(struct net *net) 2464fail:
2440{ 2465 tcp_sk_exit(net);
2466
2467 return res;
2441} 2468}
2442 2469
2443static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) 2470static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 6824afb65d93..333bcb2415ff 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -25,7 +25,8 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
25 if (tp->snd_cwnd <= tp->snd_ssthresh) 25 if (tp->snd_cwnd <= tp->snd_ssthresh)
26 tcp_slow_start(tp, acked); 26 tcp_slow_start(tp, acked);
27 else 27 else
28 tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)); 28 tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
29 1);
29} 30}
30 31
31static u32 tcp_scalable_ssthresh(struct sock *sk) 32static u32 tcp_scalable_ssthresh(struct sock *sk)
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index a4d2d2d88dca..112151eeee45 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -159,7 +159,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
159 /* In the "non-congestive state", increase cwnd 159 /* In the "non-congestive state", increase cwnd
160 * every rtt. 160 * every rtt.
161 */ 161 */
162 tcp_cong_avoid_ai(tp, tp->snd_cwnd); 162 tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
163 } else { 163 } else {
164 /* In the "congestive state", increase cwnd 164 /* In the "congestive state", increase cwnd
165 * every other rtt. 165 * every other rtt.
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index cd7273218598..17d35662930d 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -92,7 +92,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
92 92
93 } else { 93 } else {
94 /* Reno */ 94 /* Reno */
95 tcp_cong_avoid_ai(tp, tp->snd_cwnd); 95 tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
96 } 96 }
97 97
98 /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt. 98 /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 6dee2a8ca0a9..bc28b7d42a6d 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -417,7 +417,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
417 if (code == ICMPV6_HDR_FIELD) 417 if (code == ICMPV6_HDR_FIELD)
418 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); 418 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
419 419
420 if (teli && teli == info - 2) { 420 if (teli && teli == be32_to_cpu(info) - 2) {
421 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 421 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
422 if (tel->encap_limit == 0) { 422 if (tel->encap_limit == 0) {
423 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 423 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
@@ -429,7 +429,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
429 } 429 }
430 break; 430 break;
431 case ICMPV6_PKT_TOOBIG: 431 case ICMPV6_PKT_TOOBIG:
432 mtu = info - offset; 432 mtu = be32_to_cpu(info) - offset;
433 if (mtu < IPV6_MIN_MTU) 433 if (mtu < IPV6_MIN_MTU)
434 mtu = IPV6_MIN_MTU; 434 mtu = IPV6_MIN_MTU;
435 t->dev->mtu = mtu; 435 t->dev->mtu = mtu;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 1a036f35d833..d33df4cbd872 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -537,20 +537,6 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
537 skb_copy_secmark(to, from); 537 skb_copy_secmark(to, from);
538} 538}
539 539
540static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
541{
542 static u32 ip6_idents_hashrnd __read_mostly;
543 u32 hash, id;
544
545 net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
546
547 hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
548 hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
549
550 id = ip_idents_reserve(hash, 1);
551 fhdr->identification = htonl(id);
552}
553
554int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 540int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
555{ 541{
556 struct sk_buff *frag; 542 struct sk_buff *frag;
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 97f41a3e68d9..54520a0bd5e3 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -9,6 +9,24 @@
9#include <net/addrconf.h> 9#include <net/addrconf.h>
10#include <net/secure_seq.h> 10#include <net/secure_seq.h>
11 11
12u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst, struct in6_addr *src)
13{
14 u32 hash, id;
15
16 hash = __ipv6_addr_jhash(dst, hashrnd);
17 hash = __ipv6_addr_jhash(src, hash);
18
19 /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
20 * set the hight order instead thus minimizing possible future
21 * collisions.
22 */
23 id = ip_idents_reserve(hash, 1);
24 if (unlikely(!id))
25 id = 1 << 31;
26
27 return id;
28}
29
12/* This function exists only for tap drivers that must support broken 30/* This function exists only for tap drivers that must support broken
13 * clients requesting UFO without specifying an IPv6 fragment ID. 31 * clients requesting UFO without specifying an IPv6 fragment ID.
14 * 32 *
@@ -22,7 +40,7 @@ void ipv6_proxy_select_ident(struct sk_buff *skb)
22 static u32 ip6_proxy_idents_hashrnd __read_mostly; 40 static u32 ip6_proxy_idents_hashrnd __read_mostly;
23 struct in6_addr buf[2]; 41 struct in6_addr buf[2];
24 struct in6_addr *addrs; 42 struct in6_addr *addrs;
25 u32 hash, id; 43 u32 id;
26 44
27 addrs = skb_header_pointer(skb, 45 addrs = skb_header_pointer(skb,
28 skb_network_offset(skb) + 46 skb_network_offset(skb) +
@@ -34,14 +52,25 @@ void ipv6_proxy_select_ident(struct sk_buff *skb)
34 net_get_random_once(&ip6_proxy_idents_hashrnd, 52 net_get_random_once(&ip6_proxy_idents_hashrnd,
35 sizeof(ip6_proxy_idents_hashrnd)); 53 sizeof(ip6_proxy_idents_hashrnd));
36 54
37 hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd); 55 id = __ipv6_select_ident(ip6_proxy_idents_hashrnd,
38 hash = __ipv6_addr_jhash(&addrs[0], hash); 56 &addrs[1], &addrs[0]);
39 57 skb_shinfo(skb)->ip6_frag_id = id;
40 id = ip_idents_reserve(hash, 1);
41 skb_shinfo(skb)->ip6_frag_id = htonl(id);
42} 58}
43EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); 59EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
44 60
61void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
62{
63 static u32 ip6_idents_hashrnd __read_mostly;
64 u32 id;
65
66 net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
67
68 id = __ipv6_select_ident(ip6_idents_hashrnd, &rt->rt6i_dst.addr,
69 &rt->rt6i_src.addr);
70 fhdr->identification = htonl(id);
71}
72EXPORT_SYMBOL(ipv6_select_ident);
73
45int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) 74int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
46{ 75{
47 u16 offset = sizeof(struct ipv6hdr); 76 u16 offset = sizeof(struct ipv6hdr);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 3cc197c72b59..e4cbd5798eba 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1506,12 +1506,12 @@ static bool ipip6_netlink_encap_parms(struct nlattr *data[],
1506 1506
1507 if (data[IFLA_IPTUN_ENCAP_SPORT]) { 1507 if (data[IFLA_IPTUN_ENCAP_SPORT]) {
1508 ret = true; 1508 ret = true;
1509 ipencap->sport = nla_get_u16(data[IFLA_IPTUN_ENCAP_SPORT]); 1509 ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
1510 } 1510 }
1511 1511
1512 if (data[IFLA_IPTUN_ENCAP_DPORT]) { 1512 if (data[IFLA_IPTUN_ENCAP_DPORT]) {
1513 ret = true; 1513 ret = true;
1514 ipencap->dport = nla_get_u16(data[IFLA_IPTUN_ENCAP_DPORT]); 1514 ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
1515 } 1515 }
1516 1516
1517 return ret; 1517 return ret;
@@ -1707,9 +1707,9 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
1707 1707
1708 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, 1708 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
1709 tunnel->encap.type) || 1709 tunnel->encap.type) ||
1710 nla_put_u16(skb, IFLA_IPTUN_ENCAP_SPORT, 1710 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
1711 tunnel->encap.sport) || 1711 tunnel->encap.sport) ||
1712 nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT, 1712 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
1713 tunnel->encap.dport) || 1713 tunnel->encap.dport) ||
1714 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, 1714 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
1715 tunnel->encap.flags)) 1715 tunnel->encap.flags))
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index b6aa8ed18257..a56276996b72 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -52,6 +52,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
52 52
53 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); 53 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
54 54
55 /* Set the IPv6 fragment id if not set yet */
56 if (!skb_shinfo(skb)->ip6_frag_id)
57 ipv6_proxy_select_ident(skb);
58
55 segs = NULL; 59 segs = NULL;
56 goto out; 60 goto out;
57 } 61 }
@@ -108,7 +112,11 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
108 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); 112 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
109 fptr->nexthdr = nexthdr; 113 fptr->nexthdr = nexthdr;
110 fptr->reserved = 0; 114 fptr->reserved = 0;
111 fptr->identification = skb_shinfo(skb)->ip6_frag_id; 115 if (skb_shinfo(skb)->ip6_frag_id)
116 fptr->identification = skb_shinfo(skb)->ip6_frag_id;
117 else
118 ipv6_select_ident(fptr,
119 (struct rt6_info *)skb_dst(skb));
112 120
113 /* Fragment the skb. ipv6 header and the remaining fields of the 121 /* Fragment the skb. ipv6 header and the remaining fields of the
114 * fragment header are updated in ipv6_gso_segment() 122 * fragment header are updated in ipv6_gso_segment()
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 990decba1fe4..b87ca32efa0b 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -659,16 +659,24 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
659 return err; 659 return err;
660} 660}
661 661
662static int ip_vs_route_me_harder(int af, struct sk_buff *skb) 662static int ip_vs_route_me_harder(int af, struct sk_buff *skb,
663 unsigned int hooknum)
663{ 664{
665 if (!sysctl_snat_reroute(skb))
666 return 0;
667 /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
668 if (NF_INET_LOCAL_IN == hooknum)
669 return 0;
664#ifdef CONFIG_IP_VS_IPV6 670#ifdef CONFIG_IP_VS_IPV6
665 if (af == AF_INET6) { 671 if (af == AF_INET6) {
666 if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0) 672 struct dst_entry *dst = skb_dst(skb);
673
674 if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
675 ip6_route_me_harder(skb) != 0)
667 return 1; 676 return 1;
668 } else 677 } else
669#endif 678#endif
670 if ((sysctl_snat_reroute(skb) || 679 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
671 skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
672 ip_route_me_harder(skb, RTN_LOCAL) != 0) 680 ip_route_me_harder(skb, RTN_LOCAL) != 0)
673 return 1; 681 return 1;
674 682
@@ -791,7 +799,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
791 union nf_inet_addr *snet, 799 union nf_inet_addr *snet,
792 __u8 protocol, struct ip_vs_conn *cp, 800 __u8 protocol, struct ip_vs_conn *cp,
793 struct ip_vs_protocol *pp, 801 struct ip_vs_protocol *pp,
794 unsigned int offset, unsigned int ihl) 802 unsigned int offset, unsigned int ihl,
803 unsigned int hooknum)
795{ 804{
796 unsigned int verdict = NF_DROP; 805 unsigned int verdict = NF_DROP;
797 806
@@ -821,7 +830,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
821#endif 830#endif
822 ip_vs_nat_icmp(skb, pp, cp, 1); 831 ip_vs_nat_icmp(skb, pp, cp, 1);
823 832
824 if (ip_vs_route_me_harder(af, skb)) 833 if (ip_vs_route_me_harder(af, skb, hooknum))
825 goto out; 834 goto out;
826 835
827 /* do the statistics and put it back */ 836 /* do the statistics and put it back */
@@ -916,7 +925,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
916 925
917 snet.ip = iph->saddr; 926 snet.ip = iph->saddr;
918 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp, 927 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
919 pp, ciph.len, ihl); 928 pp, ciph.len, ihl, hooknum);
920} 929}
921 930
922#ifdef CONFIG_IP_VS_IPV6 931#ifdef CONFIG_IP_VS_IPV6
@@ -981,7 +990,8 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
981 snet.in6 = ciph.saddr.in6; 990 snet.in6 = ciph.saddr.in6;
982 writable = ciph.len; 991 writable = ciph.len;
983 return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp, 992 return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
984 pp, writable, sizeof(struct ipv6hdr)); 993 pp, writable, sizeof(struct ipv6hdr),
994 hooknum);
985} 995}
986#endif 996#endif
987 997
@@ -1040,7 +1050,8 @@ static inline bool is_new_conn(const struct sk_buff *skb,
1040 */ 1050 */
1041static unsigned int 1051static unsigned int
1042handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 1052handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1043 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) 1053 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
1054 unsigned int hooknum)
1044{ 1055{
1045 struct ip_vs_protocol *pp = pd->pp; 1056 struct ip_vs_protocol *pp = pd->pp;
1046 1057
@@ -1078,7 +1089,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1078 * if it came from this machine itself. So re-compute 1089 * if it came from this machine itself. So re-compute
1079 * the routing information. 1090 * the routing information.
1080 */ 1091 */
1081 if (ip_vs_route_me_harder(af, skb)) 1092 if (ip_vs_route_me_harder(af, skb, hooknum))
1082 goto drop; 1093 goto drop;
1083 1094
1084 IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT"); 1095 IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
@@ -1181,7 +1192,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
1181 cp = pp->conn_out_get(af, skb, &iph, 0); 1192 cp = pp->conn_out_get(af, skb, &iph, 0);
1182 1193
1183 if (likely(cp)) 1194 if (likely(cp))
1184 return handle_response(af, skb, pd, cp, &iph); 1195 return handle_response(af, skb, pd, cp, &iph, hooknum);
1185 if (sysctl_nat_icmp_send(net) && 1196 if (sysctl_nat_icmp_send(net) &&
1186 (pp->protocol == IPPROTO_TCP || 1197 (pp->protocol == IPPROTO_TCP ||
1187 pp->protocol == IPPROTO_UDP || 1198 pp->protocol == IPPROTO_UDP ||
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 70f697827b9b..199fd0f27b0e 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1136,9 +1136,11 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
1136 /* Restore old counters on this cpu, no problem. Per-cpu statistics 1136 /* Restore old counters on this cpu, no problem. Per-cpu statistics
1137 * are not exposed to userspace. 1137 * are not exposed to userspace.
1138 */ 1138 */
1139 preempt_disable();
1139 stats = this_cpu_ptr(newstats); 1140 stats = this_cpu_ptr(newstats);
1140 stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES])); 1141 stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
1141 stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS])); 1142 stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
1143 preempt_enable();
1142 1144
1143 return newstats; 1145 return newstats;
1144} 1146}
@@ -1264,8 +1266,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1264 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 1266 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
1265 trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN, 1267 trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN,
1266 sizeof(struct nft_trans_chain)); 1268 sizeof(struct nft_trans_chain));
1267 if (trans == NULL) 1269 if (trans == NULL) {
1270 free_percpu(stats);
1268 return -ENOMEM; 1271 return -ENOMEM;
1272 }
1269 1273
1270 nft_trans_chain_stats(trans) = stats; 1274 nft_trans_chain_stats(trans) = stats;
1271 nft_trans_chain_update(trans) = true; 1275 nft_trans_chain_update(trans) = true;
@@ -1321,8 +1325,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1321 hookfn = type->hooks[hooknum]; 1325 hookfn = type->hooks[hooknum];
1322 1326
1323 basechain = kzalloc(sizeof(*basechain), GFP_KERNEL); 1327 basechain = kzalloc(sizeof(*basechain), GFP_KERNEL);
1324 if (basechain == NULL) 1328 if (basechain == NULL) {
1329 module_put(type->owner);
1325 return -ENOMEM; 1330 return -ENOMEM;
1331 }
1326 1332
1327 if (nla[NFTA_CHAIN_COUNTERS]) { 1333 if (nla[NFTA_CHAIN_COUNTERS]) {
1328 stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]); 1334 stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
@@ -3759,6 +3765,24 @@ int nft_chain_validate_dependency(const struct nft_chain *chain,
3759} 3765}
3760EXPORT_SYMBOL_GPL(nft_chain_validate_dependency); 3766EXPORT_SYMBOL_GPL(nft_chain_validate_dependency);
3761 3767
3768int nft_chain_validate_hooks(const struct nft_chain *chain,
3769 unsigned int hook_flags)
3770{
3771 struct nft_base_chain *basechain;
3772
3773 if (chain->flags & NFT_BASE_CHAIN) {
3774 basechain = nft_base_chain(chain);
3775
3776 if ((1 << basechain->ops[0].hooknum) & hook_flags)
3777 return 0;
3778
3779 return -EOPNOTSUPP;
3780 }
3781
3782 return 0;
3783}
3784EXPORT_SYMBOL_GPL(nft_chain_validate_hooks);
3785
3762/* 3786/*
3763 * Loop detection - walk through the ruleset beginning at the destination chain 3787 * Loop detection - walk through the ruleset beginning at the destination chain
3764 * of a new jump until either the source chain is reached (loop) or all 3788 * of a new jump until either the source chain is reached (loop) or all
diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
index d1ffd5eb3a9b..9aea747b43ea 100644
--- a/net/netfilter/nft_masq.c
+++ b/net/netfilter/nft_masq.c
@@ -21,6 +21,21 @@ const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = {
21}; 21};
22EXPORT_SYMBOL_GPL(nft_masq_policy); 22EXPORT_SYMBOL_GPL(nft_masq_policy);
23 23
24int nft_masq_validate(const struct nft_ctx *ctx,
25 const struct nft_expr *expr,
26 const struct nft_data **data)
27{
28 int err;
29
30 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
31 if (err < 0)
32 return err;
33
34 return nft_chain_validate_hooks(ctx->chain,
35 (1 << NF_INET_POST_ROUTING));
36}
37EXPORT_SYMBOL_GPL(nft_masq_validate);
38
24int nft_masq_init(const struct nft_ctx *ctx, 39int nft_masq_init(const struct nft_ctx *ctx,
25 const struct nft_expr *expr, 40 const struct nft_expr *expr,
26 const struct nlattr * const tb[]) 41 const struct nlattr * const tb[])
@@ -28,8 +43,8 @@ int nft_masq_init(const struct nft_ctx *ctx,
28 struct nft_masq *priv = nft_expr_priv(expr); 43 struct nft_masq *priv = nft_expr_priv(expr);
29 int err; 44 int err;
30 45
31 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 46 err = nft_masq_validate(ctx, expr, NULL);
32 if (err < 0) 47 if (err)
33 return err; 48 return err;
34 49
35 if (tb[NFTA_MASQ_FLAGS] == NULL) 50 if (tb[NFTA_MASQ_FLAGS] == NULL)
@@ -60,12 +75,5 @@ nla_put_failure:
60} 75}
61EXPORT_SYMBOL_GPL(nft_masq_dump); 76EXPORT_SYMBOL_GPL(nft_masq_dump);
62 77
63int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
64 const struct nft_data **data)
65{
66 return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
67}
68EXPORT_SYMBOL_GPL(nft_masq_validate);
69
70MODULE_LICENSE("GPL"); 78MODULE_LICENSE("GPL");
71MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); 79MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index aff54fb1c8a0..a0837c6c9283 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -88,17 +88,40 @@ static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = {
88 [NFTA_NAT_FLAGS] = { .type = NLA_U32 }, 88 [NFTA_NAT_FLAGS] = { .type = NLA_U32 },
89}; 89};
90 90
91static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, 91static int nft_nat_validate(const struct nft_ctx *ctx,
92 const struct nlattr * const tb[]) 92 const struct nft_expr *expr,
93 const struct nft_data **data)
93{ 94{
94 struct nft_nat *priv = nft_expr_priv(expr); 95 struct nft_nat *priv = nft_expr_priv(expr);
95 u32 family;
96 int err; 96 int err;
97 97
98 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 98 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
99 if (err < 0) 99 if (err < 0)
100 return err; 100 return err;
101 101
102 switch (priv->type) {
103 case NFT_NAT_SNAT:
104 err = nft_chain_validate_hooks(ctx->chain,
105 (1 << NF_INET_POST_ROUTING) |
106 (1 << NF_INET_LOCAL_IN));
107 break;
108 case NFT_NAT_DNAT:
109 err = nft_chain_validate_hooks(ctx->chain,
110 (1 << NF_INET_PRE_ROUTING) |
111 (1 << NF_INET_LOCAL_OUT));
112 break;
113 }
114
115 return err;
116}
117
118static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
119 const struct nlattr * const tb[])
120{
121 struct nft_nat *priv = nft_expr_priv(expr);
122 u32 family;
123 int err;
124
102 if (tb[NFTA_NAT_TYPE] == NULL || 125 if (tb[NFTA_NAT_TYPE] == NULL ||
103 (tb[NFTA_NAT_REG_ADDR_MIN] == NULL && 126 (tb[NFTA_NAT_REG_ADDR_MIN] == NULL &&
104 tb[NFTA_NAT_REG_PROTO_MIN] == NULL)) 127 tb[NFTA_NAT_REG_PROTO_MIN] == NULL))
@@ -115,6 +138,10 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
115 return -EINVAL; 138 return -EINVAL;
116 } 139 }
117 140
141 err = nft_nat_validate(ctx, expr, NULL);
142 if (err < 0)
143 return err;
144
118 if (tb[NFTA_NAT_FAMILY] == NULL) 145 if (tb[NFTA_NAT_FAMILY] == NULL)
119 return -EINVAL; 146 return -EINVAL;
120 147
@@ -219,13 +246,6 @@ nla_put_failure:
219 return -1; 246 return -1;
220} 247}
221 248
222static int nft_nat_validate(const struct nft_ctx *ctx,
223 const struct nft_expr *expr,
224 const struct nft_data **data)
225{
226 return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
227}
228
229static struct nft_expr_type nft_nat_type; 249static struct nft_expr_type nft_nat_type;
230static const struct nft_expr_ops nft_nat_ops = { 250static const struct nft_expr_ops nft_nat_ops = {
231 .type = &nft_nat_type, 251 .type = &nft_nat_type,
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
index 9e8093f28311..d7e9e93a4e90 100644
--- a/net/netfilter/nft_redir.c
+++ b/net/netfilter/nft_redir.c
@@ -23,6 +23,22 @@ const struct nla_policy nft_redir_policy[NFTA_REDIR_MAX + 1] = {
23}; 23};
24EXPORT_SYMBOL_GPL(nft_redir_policy); 24EXPORT_SYMBOL_GPL(nft_redir_policy);
25 25
26int nft_redir_validate(const struct nft_ctx *ctx,
27 const struct nft_expr *expr,
28 const struct nft_data **data)
29{
30 int err;
31
32 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
33 if (err < 0)
34 return err;
35
36 return nft_chain_validate_hooks(ctx->chain,
37 (1 << NF_INET_PRE_ROUTING) |
38 (1 << NF_INET_LOCAL_OUT));
39}
40EXPORT_SYMBOL_GPL(nft_redir_validate);
41
26int nft_redir_init(const struct nft_ctx *ctx, 42int nft_redir_init(const struct nft_ctx *ctx,
27 const struct nft_expr *expr, 43 const struct nft_expr *expr,
28 const struct nlattr * const tb[]) 44 const struct nlattr * const tb[])
@@ -30,7 +46,7 @@ int nft_redir_init(const struct nft_ctx *ctx,
30 struct nft_redir *priv = nft_expr_priv(expr); 46 struct nft_redir *priv = nft_expr_priv(expr);
31 int err; 47 int err;
32 48
33 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 49 err = nft_redir_validate(ctx, expr, NULL);
34 if (err < 0) 50 if (err < 0)
35 return err; 51 return err;
36 52
@@ -88,12 +104,5 @@ nla_put_failure:
88} 104}
89EXPORT_SYMBOL_GPL(nft_redir_dump); 105EXPORT_SYMBOL_GPL(nft_redir_dump);
90 106
91int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
92 const struct nft_data **data)
93{
94 return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
95}
96EXPORT_SYMBOL_GPL(nft_redir_validate);
97
98MODULE_LICENSE("GPL"); 107MODULE_LICENSE("GPL");
99MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); 108MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 6feb16d5e1b8..2702673f0f23 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1438,7 +1438,7 @@ static void netlink_undo_bind(int group, long unsigned int groups,
1438 1438
1439 for (undo = 0; undo < group; undo++) 1439 for (undo = 0; undo < group; undo++)
1440 if (test_bit(undo, &groups)) 1440 if (test_bit(undo, &groups))
1441 nlk->netlink_unbind(sock_net(sk), undo); 1441 nlk->netlink_unbind(sock_net(sk), undo + 1);
1442} 1442}
1443 1443
1444static int netlink_bind(struct socket *sock, struct sockaddr *addr, 1444static int netlink_bind(struct socket *sock, struct sockaddr *addr,
@@ -1476,7 +1476,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1476 for (group = 0; group < nlk->ngroups; group++) { 1476 for (group = 0; group < nlk->ngroups; group++) {
1477 if (!test_bit(group, &groups)) 1477 if (!test_bit(group, &groups))
1478 continue; 1478 continue;
1479 err = nlk->netlink_bind(net, group); 1479 err = nlk->netlink_bind(net, group + 1);
1480 if (!err) 1480 if (!err)
1481 continue; 1481 continue;
1482 netlink_undo_bind(group, groups, sk); 1482 netlink_undo_bind(group, groups, sk);
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index c3b0cd43eb56..c173f69e1479 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -71,14 +71,14 @@ static struct ctl_table rds_sysctl_rds_table[] = {
71 { 71 {
72 .procname = "max_unacked_packets", 72 .procname = "max_unacked_packets",
73 .data = &rds_sysctl_max_unacked_packets, 73 .data = &rds_sysctl_max_unacked_packets,
74 .maxlen = sizeof(unsigned long), 74 .maxlen = sizeof(int),
75 .mode = 0644, 75 .mode = 0644,
76 .proc_handler = proc_dointvec, 76 .proc_handler = proc_dointvec,
77 }, 77 },
78 { 78 {
79 .procname = "max_unacked_bytes", 79 .procname = "max_unacked_bytes",
80 .data = &rds_sysctl_max_unacked_bytes, 80 .data = &rds_sysctl_max_unacked_bytes,
81 .maxlen = sizeof(unsigned long), 81 .maxlen = sizeof(int),
82 .mode = 0644, 82 .mode = 0644,
83 .proc_handler = proc_dointvec, 83 .proc_handler = proc_dointvec,
84 }, 84 },
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index aad6a679fb13..baef987fe2c0 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -556,8 +556,9 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
556} 556}
557EXPORT_SYMBOL(tcf_exts_change); 557EXPORT_SYMBOL(tcf_exts_change);
558 558
559#define tcf_exts_first_act(ext) \ 559#define tcf_exts_first_act(ext) \
560 list_first_entry(&(exts)->actions, struct tc_action, list) 560 list_first_entry_or_null(&(exts)->actions, \
561 struct tc_action, list)
561 562
562int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 563int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
563{ 564{
@@ -603,7 +604,7 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
603{ 604{
604#ifdef CONFIG_NET_CLS_ACT 605#ifdef CONFIG_NET_CLS_ACT
605 struct tc_action *a = tcf_exts_first_act(exts); 606 struct tc_action *a = tcf_exts_first_act(exts);
606 if (tcf_action_copy_stats(skb, a, 1) < 0) 607 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
607 return -1; 608 return -1;
608#endif 609#endif
609 return 0; 610 return 0;
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index a00c43043001..dfcea20e3171 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -686,8 +686,14 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
686 if (tb[TCA_FQ_FLOW_PLIMIT]) 686 if (tb[TCA_FQ_FLOW_PLIMIT])
687 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); 687 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
688 688
689 if (tb[TCA_FQ_QUANTUM]) 689 if (tb[TCA_FQ_QUANTUM]) {
690 q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); 690 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
691
692 if (quantum > 0)
693 q->quantum = quantum;
694 else
695 err = -EINVAL;
696 }
691 697
692 if (tb[TCA_FQ_INITIAL_QUANTUM]) 698 if (tb[TCA_FQ_INITIAL_QUANTUM])
693 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); 699 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index e49e231cef52..06320c8c1c86 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2608,7 +2608,7 @@ do_addr_param:
2608 2608
2609 addr_param = param.v + sizeof(sctp_addip_param_t); 2609 addr_param = param.v + sizeof(sctp_addip_param_t);
2610 2610
2611 af = sctp_get_af_specific(param_type2af(param.p->type)); 2611 af = sctp_get_af_specific(param_type2af(addr_param->p.type));
2612 if (af == NULL) 2612 if (af == NULL)
2613 break; 2613 break;
2614 2614
diff --git a/sound/core/seq/seq_dummy.c b/sound/core/seq/seq_dummy.c
index ec667f158f19..5d905d90d504 100644
--- a/sound/core/seq/seq_dummy.c
+++ b/sound/core/seq/seq_dummy.c
@@ -82,36 +82,6 @@ struct snd_seq_dummy_port {
82static int my_client = -1; 82static int my_client = -1;
83 83
84/* 84/*
85 * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events
86 * to subscribers.
87 * Note: this callback is called only after all subscribers are removed.
88 */
89static int
90dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info)
91{
92 struct snd_seq_dummy_port *p;
93 int i;
94 struct snd_seq_event ev;
95
96 p = private_data;
97 memset(&ev, 0, sizeof(ev));
98 if (p->duplex)
99 ev.source.port = p->connect;
100 else
101 ev.source.port = p->port;
102 ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
103 ev.type = SNDRV_SEQ_EVENT_CONTROLLER;
104 for (i = 0; i < 16; i++) {
105 ev.data.control.channel = i;
106 ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF;
107 snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
108 ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS;
109 snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
110 }
111 return 0;
112}
113
114/*
115 * event input callback - just redirect events to subscribers 85 * event input callback - just redirect events to subscribers
116 */ 86 */
117static int 87static int
@@ -175,7 +145,6 @@ create_port(int idx, int type)
175 | SNDRV_SEQ_PORT_TYPE_PORT; 145 | SNDRV_SEQ_PORT_TYPE_PORT;
176 memset(&pcb, 0, sizeof(pcb)); 146 memset(&pcb, 0, sizeof(pcb));
177 pcb.owner = THIS_MODULE; 147 pcb.owner = THIS_MODULE;
178 pcb.unuse = dummy_unuse;
179 pcb.event_input = dummy_input; 148 pcb.event_input = dummy_input;
180 pcb.private_free = dummy_free; 149 pcb.private_free = dummy_free;
181 pcb.private_data = rec; 150 pcb.private_data = rec;
diff --git a/sound/soc/adi/axi-i2s.c b/sound/soc/adi/axi-i2s.c
index 7752860f7230..4c23381727a1 100644
--- a/sound/soc/adi/axi-i2s.c
+++ b/sound/soc/adi/axi-i2s.c
@@ -240,6 +240,8 @@ static int axi_i2s_probe(struct platform_device *pdev)
240 if (ret) 240 if (ret)
241 goto err_clk_disable; 241 goto err_clk_disable;
242 242
243 return 0;
244
243err_clk_disable: 245err_clk_disable:
244 clk_disable_unprepare(i2s->clk); 246 clk_disable_unprepare(i2s->clk);
245 return ret; 247 return ret;
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index e5f2fb884bf3..30c673cdc12e 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -188,8 +188,8 @@ static const DECLARE_TLV_DB_SCALE(boost_tlv, 0, 80, 0);
188static const char * const pcm512x_dsp_program_texts[] = { 188static const char * const pcm512x_dsp_program_texts[] = {
189 "FIR interpolation with de-emphasis", 189 "FIR interpolation with de-emphasis",
190 "Low latency IIR with de-emphasis", 190 "Low latency IIR with de-emphasis",
191 "Fixed process flow",
192 "High attenuation with de-emphasis", 191 "High attenuation with de-emphasis",
192 "Fixed process flow",
193 "Ringing-less low latency FIR", 193 "Ringing-less low latency FIR",
194}; 194};
195 195
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 2cd4fe463102..1d1c7f8a9af2 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -861,10 +861,8 @@ static int rt286_hw_params(struct snd_pcm_substream *substream,
861 RT286_I2S_CTRL1, 0x0018, d_len_code << 3); 861 RT286_I2S_CTRL1, 0x0018, d_len_code << 3);
862 dev_dbg(codec->dev, "format val = 0x%x\n", val); 862 dev_dbg(codec->dev, "format val = 0x%x\n", val);
863 863
864 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 864 snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val);
865 snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val); 865 snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val);
866 else
867 snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val);
868 866
869 return 0; 867 return 0;
870} 868}
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index c0fbe1881439..918ada9738b0 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -2083,10 +2083,14 @@ static int rt5677_set_pll1_event(struct snd_soc_dapm_widget *w,
2083 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); 2083 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
2084 2084
2085 switch (event) { 2085 switch (event) {
2086 case SND_SOC_DAPM_POST_PMU: 2086 case SND_SOC_DAPM_PRE_PMU:
2087 regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x2); 2087 regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x2);
2088 break;
2089
2090 case SND_SOC_DAPM_POST_PMU:
2088 regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x0); 2091 regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x0);
2089 break; 2092 break;
2093
2090 default: 2094 default:
2091 return 0; 2095 return 0;
2092 } 2096 }
@@ -2101,10 +2105,14 @@ static int rt5677_set_pll2_event(struct snd_soc_dapm_widget *w,
2101 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); 2105 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
2102 2106
2103 switch (event) { 2107 switch (event) {
2104 case SND_SOC_DAPM_POST_PMU: 2108 case SND_SOC_DAPM_PRE_PMU:
2105 regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x2); 2109 regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x2);
2110 break;
2111
2112 case SND_SOC_DAPM_POST_PMU:
2106 regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x0); 2113 regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x0);
2107 break; 2114 break;
2115
2108 default: 2116 default:
2109 return 0; 2117 return 0;
2110 } 2118 }
@@ -2212,9 +2220,11 @@ static int rt5677_vref_event(struct snd_soc_dapm_widget *w,
2212 2220
2213static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = { 2221static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
2214 SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT, 2222 SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT,
2215 0, rt5677_set_pll1_event, SND_SOC_DAPM_POST_PMU), 2223 0, rt5677_set_pll1_event, SND_SOC_DAPM_PRE_PMU |
2224 SND_SOC_DAPM_POST_PMU),
2216 SND_SOC_DAPM_SUPPLY("PLL2", RT5677_PWR_ANLG2, RT5677_PWR_PLL2_BIT, 2225 SND_SOC_DAPM_SUPPLY("PLL2", RT5677_PWR_ANLG2, RT5677_PWR_PLL2_BIT,
2217 0, rt5677_set_pll2_event, SND_SOC_DAPM_POST_PMU), 2226 0, rt5677_set_pll2_event, SND_SOC_DAPM_PRE_PMU |
2227 SND_SOC_DAPM_POST_PMU),
2218 2228
2219 /* Input Side */ 2229 /* Input Side */
2220 /* micbias */ 2230 /* micbias */
diff --git a/sound/soc/codecs/ts3a227e.c b/sound/soc/codecs/ts3a227e.c
index 1d1205702d23..9f2dced046de 100644
--- a/sound/soc/codecs/ts3a227e.c
+++ b/sound/soc/codecs/ts3a227e.c
@@ -254,6 +254,7 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c,
254 struct ts3a227e *ts3a227e; 254 struct ts3a227e *ts3a227e;
255 struct device *dev = &i2c->dev; 255 struct device *dev = &i2c->dev;
256 int ret; 256 int ret;
257 unsigned int acc_reg;
257 258
258 ts3a227e = devm_kzalloc(&i2c->dev, sizeof(*ts3a227e), GFP_KERNEL); 259 ts3a227e = devm_kzalloc(&i2c->dev, sizeof(*ts3a227e), GFP_KERNEL);
259 if (ts3a227e == NULL) 260 if (ts3a227e == NULL)
@@ -283,6 +284,11 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c,
283 INTB_DISABLE | ADC_COMPLETE_INT_DISABLE, 284 INTB_DISABLE | ADC_COMPLETE_INT_DISABLE,
284 ADC_COMPLETE_INT_DISABLE); 285 ADC_COMPLETE_INT_DISABLE);
285 286
287 /* Read jack status because chip might not trigger interrupt at boot. */
288 regmap_read(ts3a227e->regmap, TS3A227E_REG_ACCESSORY_STATUS, &acc_reg);
289 ts3a227e_new_jack_state(ts3a227e, acc_reg);
290 ts3a227e_jack_report(ts3a227e);
291
286 return 0; 292 return 0;
287} 293}
288 294
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 4d2d2b1380d5..75b87c5c0f04 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -1076,10 +1076,13 @@ static const struct snd_soc_dapm_route adc_intercon[] = {
1076 { "Right Capture PGA", NULL, "Right Capture Mux" }, 1076 { "Right Capture PGA", NULL, "Right Capture Mux" },
1077 { "Right Capture PGA", NULL, "Right Capture Inverting Mux" }, 1077 { "Right Capture PGA", NULL, "Right Capture Inverting Mux" },
1078 1078
1079 { "AIFOUTL", "Left", "ADCL" }, 1079 { "AIFOUTL Mux", "Left", "ADCL" },
1080 { "AIFOUTL", "Right", "ADCR" }, 1080 { "AIFOUTL Mux", "Right", "ADCR" },
1081 { "AIFOUTR", "Left", "ADCL" }, 1081 { "AIFOUTR Mux", "Left", "ADCL" },
1082 { "AIFOUTR", "Right", "ADCR" }, 1082 { "AIFOUTR Mux", "Right", "ADCR" },
1083
1084 { "AIFOUTL", NULL, "AIFOUTL Mux" },
1085 { "AIFOUTR", NULL, "AIFOUTR Mux" },
1083 1086
1084 { "ADCL", NULL, "CLK_DSP" }, 1087 { "ADCL", NULL, "CLK_DSP" },
1085 { "ADCL", NULL, "Left Capture PGA" }, 1088 { "ADCL", NULL, "Left Capture PGA" },
@@ -1089,12 +1092,16 @@ static const struct snd_soc_dapm_route adc_intercon[] = {
1089}; 1092};
1090 1093
1091static const struct snd_soc_dapm_route dac_intercon[] = { 1094static const struct snd_soc_dapm_route dac_intercon[] = {
1092 { "DACL", "Right", "AIFINR" }, 1095 { "DACL Mux", "Left", "AIFINL" },
1093 { "DACL", "Left", "AIFINL" }, 1096 { "DACL Mux", "Right", "AIFINR" },
1097
1098 { "DACR Mux", "Left", "AIFINL" },
1099 { "DACR Mux", "Right", "AIFINR" },
1100
1101 { "DACL", NULL, "DACL Mux" },
1094 { "DACL", NULL, "CLK_DSP" }, 1102 { "DACL", NULL, "CLK_DSP" },
1095 1103
1096 { "DACR", "Right", "AIFINR" }, 1104 { "DACR", NULL, "DACR Mux" },
1097 { "DACR", "Left", "AIFINL" },
1098 { "DACR", NULL, "CLK_DSP" }, 1105 { "DACR", NULL, "CLK_DSP" },
1099 1106
1100 { "Charge pump", NULL, "SYSCLK" }, 1107 { "Charge pump", NULL, "SYSCLK" },
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 031a1ae71d94..a96eb497a379 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -556,7 +556,7 @@ static struct {
556 { 22050, 2 }, 556 { 22050, 2 },
557 { 24000, 2 }, 557 { 24000, 2 },
558 { 16000, 3 }, 558 { 16000, 3 },
559 { 11250, 4 }, 559 { 11025, 4 },
560 { 12000, 4 }, 560 { 12000, 4 },
561 { 8000, 5 }, 561 { 8000, 5 },
562}; 562};
diff --git a/sound/soc/fsl/fsl_esai.h b/sound/soc/fsl/fsl_esai.h
index 91a550f4a10d..5e793bbb6b02 100644
--- a/sound/soc/fsl/fsl_esai.h
+++ b/sound/soc/fsl/fsl_esai.h
@@ -302,7 +302,7 @@
302#define ESAI_xCCR_xFP_MASK (((1 << ESAI_xCCR_xFP_WIDTH) - 1) << ESAI_xCCR_xFP_SHIFT) 302#define ESAI_xCCR_xFP_MASK (((1 << ESAI_xCCR_xFP_WIDTH) - 1) << ESAI_xCCR_xFP_SHIFT)
303#define ESAI_xCCR_xFP(v) ((((v) - 1) << ESAI_xCCR_xFP_SHIFT) & ESAI_xCCR_xFP_MASK) 303#define ESAI_xCCR_xFP(v) ((((v) - 1) << ESAI_xCCR_xFP_SHIFT) & ESAI_xCCR_xFP_MASK)
304#define ESAI_xCCR_xDC_SHIFT 9 304#define ESAI_xCCR_xDC_SHIFT 9
305#define ESAI_xCCR_xDC_WIDTH 4 305#define ESAI_xCCR_xDC_WIDTH 5
306#define ESAI_xCCR_xDC_MASK (((1 << ESAI_xCCR_xDC_WIDTH) - 1) << ESAI_xCCR_xDC_SHIFT) 306#define ESAI_xCCR_xDC_MASK (((1 << ESAI_xCCR_xDC_WIDTH) - 1) << ESAI_xCCR_xDC_SHIFT)
307#define ESAI_xCCR_xDC(v) ((((v) - 1) << ESAI_xCCR_xDC_SHIFT) & ESAI_xCCR_xDC_MASK) 307#define ESAI_xCCR_xDC(v) ((((v) - 1) << ESAI_xCCR_xDC_SHIFT) & ESAI_xCCR_xDC_MASK)
308#define ESAI_xCCR_xPSR_SHIFT 8 308#define ESAI_xCCR_xPSR_SHIFT 8
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index a65f17d57ffb..059496ed9ad7 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -1362,9 +1362,9 @@ static int fsl_ssi_probe(struct platform_device *pdev)
1362 } 1362 }
1363 1363
1364 ssi_private->irq = platform_get_irq(pdev, 0); 1364 ssi_private->irq = platform_get_irq(pdev, 0);
1365 if (!ssi_private->irq) { 1365 if (ssi_private->irq < 0) {
1366 dev_err(&pdev->dev, "no irq for node %s\n", np->full_name); 1366 dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
1367 return -ENXIO; 1367 return ssi_private->irq;
1368 } 1368 }
1369 1369
1370 /* Are the RX and the TX clocks locked? */ 1370 /* Are the RX and the TX clocks locked? */
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
index 4caacb05a623..cd146d4fa805 100644
--- a/sound/soc/fsl/imx-wm8962.c
+++ b/sound/soc/fsl/imx-wm8962.c
@@ -257,6 +257,7 @@ static int imx_wm8962_probe(struct platform_device *pdev)
257 if (ret) 257 if (ret)
258 goto clk_fail; 258 goto clk_fail;
259 data->card.num_links = 1; 259 data->card.num_links = 1;
260 data->card.owner = THIS_MODULE;
260 data->card.dai_link = &data->dai; 261 data->card.dai_link = &data->dai;
261 data->card.dapm_widgets = imx_wm8962_dapm_widgets; 262 data->card.dapm_widgets = imx_wm8962_dapm_widgets;
262 data->card.num_dapm_widgets = ARRAY_SIZE(imx_wm8962_dapm_widgets); 263 data->card.num_dapm_widgets = ARRAY_SIZE(imx_wm8962_dapm_widgets);
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index fb9240fdc9b7..7fe3009b1c43 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -452,9 +452,8 @@ static int asoc_simple_card_parse_of(struct device_node *node,
452} 452}
453 453
454/* Decrease the reference count of the device nodes */ 454/* Decrease the reference count of the device nodes */
455static int asoc_simple_card_unref(struct platform_device *pdev) 455static int asoc_simple_card_unref(struct snd_soc_card *card)
456{ 456{
457 struct snd_soc_card *card = platform_get_drvdata(pdev);
458 struct snd_soc_dai_link *dai_link; 457 struct snd_soc_dai_link *dai_link;
459 int num_links; 458 int num_links;
460 459
@@ -556,7 +555,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
556 return ret; 555 return ret;
557 556
558err: 557err:
559 asoc_simple_card_unref(pdev); 558 asoc_simple_card_unref(&priv->snd_card);
560 return ret; 559 return ret;
561} 560}
562 561
@@ -572,7 +571,7 @@ static int asoc_simple_card_remove(struct platform_device *pdev)
572 snd_soc_jack_free_gpios(&simple_card_mic_jack, 1, 571 snd_soc_jack_free_gpios(&simple_card_mic_jack, 1,
573 &simple_card_mic_jack_gpio); 572 &simple_card_mic_jack_gpio);
574 573
575 return asoc_simple_card_unref(pdev); 574 return asoc_simple_card_unref(card);
576} 575}
577 576
578static const struct of_device_id asoc_simple_of_match[] = { 577static const struct of_device_id asoc_simple_of_match[] = {
diff --git a/sound/soc/intel/sst-firmware.c b/sound/soc/intel/sst-firmware.c
index ef2e8b5766a1..b3f9489794a6 100644
--- a/sound/soc/intel/sst-firmware.c
+++ b/sound/soc/intel/sst-firmware.c
@@ -706,6 +706,7 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
706 struct list_head *block_list) 706 struct list_head *block_list)
707{ 707{
708 struct sst_mem_block *block, *tmp; 708 struct sst_mem_block *block, *tmp;
709 struct sst_block_allocator ba_tmp = *ba;
709 u32 end = ba->offset + ba->size, block_end; 710 u32 end = ba->offset + ba->size, block_end;
710 int err; 711 int err;
711 712
@@ -730,9 +731,9 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
730 if (ba->offset >= block->offset && ba->offset < block_end) { 731 if (ba->offset >= block->offset && ba->offset < block_end) {
731 732
732 /* align ba to block boundary */ 733 /* align ba to block boundary */
733 ba->size -= block_end - ba->offset; 734 ba_tmp.size -= block_end - ba->offset;
734 ba->offset = block_end; 735 ba_tmp.offset = block_end;
735 err = block_alloc_contiguous(dsp, ba, block_list); 736 err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
736 if (err < 0) 737 if (err < 0)
737 return -ENOMEM; 738 return -ENOMEM;
738 739
@@ -767,10 +768,10 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
767 list_move(&block->list, &dsp->used_block_list); 768 list_move(&block->list, &dsp->used_block_list);
768 list_add(&block->module_list, block_list); 769 list_add(&block->module_list, block_list);
769 /* align ba to block boundary */ 770 /* align ba to block boundary */
770 ba->size -= block_end - ba->offset; 771 ba_tmp.size -= block_end - ba->offset;
771 ba->offset = block_end; 772 ba_tmp.offset = block_end;
772 773
773 err = block_alloc_contiguous(dsp, ba, block_list); 774 err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
774 if (err < 0) 775 if (err < 0)
775 return -ENOMEM; 776 return -ENOMEM;
776 777
diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c
index 3f8c48231364..5bf14040c24a 100644
--- a/sound/soc/intel/sst-haswell-ipc.c
+++ b/sound/soc/intel/sst-haswell-ipc.c
@@ -1228,6 +1228,11 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
1228 struct sst_dsp *sst = hsw->dsp; 1228 struct sst_dsp *sst = hsw->dsp;
1229 unsigned long flags; 1229 unsigned long flags;
1230 1230
1231 if (!stream) {
1232 dev_warn(hsw->dev, "warning: stream is NULL, no stream to free, ignore it.\n");
1233 return 0;
1234 }
1235
1231 /* dont free DSP streams that are not commited */ 1236 /* dont free DSP streams that are not commited */
1232 if (!stream->commited) 1237 if (!stream->commited)
1233 goto out; 1238 goto out;
@@ -1415,6 +1420,16 @@ int sst_hsw_stream_commit(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
1415 u32 header; 1420 u32 header;
1416 int ret; 1421 int ret;
1417 1422
1423 if (!stream) {
1424 dev_warn(hsw->dev, "warning: stream is NULL, no stream to commit, ignore it.\n");
1425 return 0;
1426 }
1427
1428 if (stream->commited) {
1429 dev_warn(hsw->dev, "warning: stream is already committed, ignore it.\n");
1430 return 0;
1431 }
1432
1418 trace_ipc_request("stream alloc", stream->host_id); 1433 trace_ipc_request("stream alloc", stream->host_id);
1419 1434
1420 header = IPC_GLB_TYPE(IPC_GLB_ALLOCATE_STREAM); 1435 header = IPC_GLB_TYPE(IPC_GLB_ALLOCATE_STREAM);
@@ -1519,6 +1534,11 @@ int sst_hsw_stream_pause(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
1519{ 1534{
1520 int ret; 1535 int ret;
1521 1536
1537 if (!stream) {
1538 dev_warn(hsw->dev, "warning: stream is NULL, no stream to pause, ignore it.\n");
1539 return 0;
1540 }
1541
1522 trace_ipc_request("stream pause", stream->reply.stream_hw_id); 1542 trace_ipc_request("stream pause", stream->reply.stream_hw_id);
1523 1543
1524 ret = sst_hsw_stream_operations(hsw, IPC_STR_PAUSE, 1544 ret = sst_hsw_stream_operations(hsw, IPC_STR_PAUSE,
@@ -1535,6 +1555,11 @@ int sst_hsw_stream_resume(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
1535{ 1555{
1536 int ret; 1556 int ret;
1537 1557
1558 if (!stream) {
1559 dev_warn(hsw->dev, "warning: stream is NULL, no stream to resume, ignore it.\n");
1560 return 0;
1561 }
1562
1538 trace_ipc_request("stream resume", stream->reply.stream_hw_id); 1563 trace_ipc_request("stream resume", stream->reply.stream_hw_id);
1539 1564
1540 ret = sst_hsw_stream_operations(hsw, IPC_STR_RESUME, 1565 ret = sst_hsw_stream_operations(hsw, IPC_STR_RESUME,
@@ -1550,6 +1575,11 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
1550{ 1575{
1551 int ret, tries = 10; 1576 int ret, tries = 10;
1552 1577
1578 if (!stream) {
1579 dev_warn(hsw->dev, "warning: stream is NULL, no stream to reset, ignore it.\n");
1580 return 0;
1581 }
1582
1553 /* dont reset streams that are not commited */ 1583 /* dont reset streams that are not commited */
1554 if (!stream->commited) 1584 if (!stream->commited)
1555 return 0; 1585 return 0;
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 8b79cafab1e2..c7eb9dd67f60 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -434,7 +434,7 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
434 case SND_SOC_DAIFMT_CBM_CFS: 434 case SND_SOC_DAIFMT_CBM_CFS:
435 /* McBSP slave. FS clock as output */ 435 /* McBSP slave. FS clock as output */
436 regs->srgr2 |= FSGM; 436 regs->srgr2 |= FSGM;
437 regs->pcr0 |= FSXM; 437 regs->pcr0 |= FSXM | FSRM;
438 break; 438 break;
439 case SND_SOC_DAIFMT_CBM_CFM: 439 case SND_SOC_DAIFMT_CBM_CFM:
440 /* McBSP slave */ 440 /* McBSP slave */
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 13d8507333b8..dcc26eda0539 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -335,6 +335,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
335 SNDRV_PCM_FMTBIT_S24_LE), 335 SNDRV_PCM_FMTBIT_S24_LE),
336 }, 336 },
337 .ops = &rockchip_i2s_dai_ops, 337 .ops = &rockchip_i2s_dai_ops,
338 .symmetric_rates = 1,
338}; 339};
339 340
340static const struct snd_soc_component_driver rockchip_i2s_component = { 341static const struct snd_soc_component_driver rockchip_i2s_component = {
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 590a82f01d0b..025c38fbe3c0 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -659,7 +659,8 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
659 rtd->dai_link->stream_name); 659 rtd->dai_link->stream_name);
660 660
661 ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num, 661 ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num,
662 1, 0, &be_pcm); 662 rtd->dai_link->dpcm_playback,
663 rtd->dai_link->dpcm_capture, &be_pcm);
663 if (ret < 0) { 664 if (ret < 0) {
664 dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n", 665 dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n",
665 rtd->dai_link->name); 666 rtd->dai_link->name);
@@ -668,8 +669,10 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
668 669
669 rtd->pcm = be_pcm; 670 rtd->pcm = be_pcm;
670 rtd->fe_compr = 1; 671 rtd->fe_compr = 1;
671 be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd; 672 if (rtd->dai_link->dpcm_playback)
672 be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd; 673 be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
674 else if (rtd->dai_link->dpcm_capture)
675 be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
673 memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops)); 676 memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops));
674 } else 677 } else
675 memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops)); 678 memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
index 790ceba6ad3f..28431d1bbcf5 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
@@ -5,7 +5,10 @@
5 * ANY CHANGES MADE HERE WILL BE LOST! 5 * ANY CHANGES MADE HERE WILL BE LOST!
6 * 6 *
7 */ 7 */
8 8#include <stdbool.h>
9#ifndef HAS_BOOL
10# define HAS_BOOL 1
11#endif
9#line 1 "Context.xs" 12#line 1 "Context.xs"
10/* 13/*
11 * Context.xs. XS interfaces for perf script. 14 * Context.xs. XS interfaces for perf script.
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 79999ceaf2be..01bc4e23a2cf 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -177,14 +177,17 @@ static int lock__parse(struct ins_operands *ops)
177 goto out_free_ops; 177 goto out_free_ops;
178 178
179 ops->locked.ins = ins__find(name); 179 ops->locked.ins = ins__find(name);
180 free(name);
181
180 if (ops->locked.ins == NULL) 182 if (ops->locked.ins == NULL)
181 goto out_free_ops; 183 goto out_free_ops;
182 184
183 if (!ops->locked.ins->ops) 185 if (!ops->locked.ins->ops)
184 return 0; 186 return 0;
185 187
186 if (ops->locked.ins->ops->parse) 188 if (ops->locked.ins->ops->parse &&
187 ops->locked.ins->ops->parse(ops->locked.ops); 189 ops->locked.ins->ops->parse(ops->locked.ops) < 0)
190 goto out_free_ops;
188 191
189 return 0; 192 return 0;
190 193
@@ -208,6 +211,13 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
208 211
209static void lock__delete(struct ins_operands *ops) 212static void lock__delete(struct ins_operands *ops)
210{ 213{
214 struct ins *ins = ops->locked.ins;
215
216 if (ins && ins->ops->free)
217 ins->ops->free(ops->locked.ops);
218 else
219 ins__delete(ops->locked.ops);
220
211 zfree(&ops->locked.ops); 221 zfree(&ops->locked.ops);
212 zfree(&ops->target.raw); 222 zfree(&ops->target.raw);
213 zfree(&ops->target.name); 223 zfree(&ops->target.name);
@@ -531,8 +541,8 @@ static void disasm_line__init_ins(struct disasm_line *dl)
531 if (!dl->ins->ops) 541 if (!dl->ins->ops)
532 return; 542 return;
533 543
534 if (dl->ins->ops->parse) 544 if (dl->ins->ops->parse && dl->ins->ops->parse(&dl->ops) < 0)
535 dl->ins->ops->parse(&dl->ops); 545 dl->ins = NULL;
536} 546}
537 547
538static int disasm_line__parse(char *line, char **namep, char **rawp) 548static int disasm_line__parse(char *line, char **namep, char **rawp)
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index cbab1fb77b1d..2e507b5025a3 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1445,7 +1445,7 @@ int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
1445 case ENOENT: 1445 case ENOENT:
1446 scnprintf(buf, size, "%s", 1446 scnprintf(buf, size, "%s",
1447 "Error:\tUnable to find debugfs\n" 1447 "Error:\tUnable to find debugfs\n"
1448 "Hint:\tWas your kernel was compiled with debugfs support?\n" 1448 "Hint:\tWas your kernel compiled with debugfs support?\n"
1449 "Hint:\tIs the debugfs filesystem mounted?\n" 1449 "Hint:\tIs the debugfs filesystem mounted?\n"
1450 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'"); 1450 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
1451 break; 1451 break;
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 6951a9d42339..0e42438b1e59 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -116,6 +116,22 @@ struct thread;
116#define map__for_each_symbol(map, pos, n) \ 116#define map__for_each_symbol(map, pos, n) \
117 dso__for_each_symbol(map->dso, pos, n, map->type) 117 dso__for_each_symbol(map->dso, pos, n, map->type)
118 118
119/* map__for_each_symbol_with_name - iterate over the symbols in the given map
120 * that have the given name
121 *
122 * @map: the 'struct map *' in which symbols itereated
123 * @sym_name: the symbol name
124 * @pos: the 'struct symbol *' to use as a loop cursor
125 * @filter: to use when loading the DSO
126 */
127#define __map__for_each_symbol_by_name(map, sym_name, pos, filter) \
128 for (pos = map__find_symbol_by_name(map, sym_name, filter); \
129 pos && strcmp(pos->name, sym_name) == 0; \
130 pos = symbol__next_by_name(pos))
131
132#define map__for_each_symbol_by_name(map, sym_name, pos) \
133 __map__for_each_symbol_by_name(map, sym_name, (pos), NULL)
134
119typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); 135typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
120 136
121void map__init(struct map *map, enum map_type type, 137void map__init(struct map *map, enum map_type type,
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 94a717bf007d..919937eb0be2 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -446,7 +446,7 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs,
446 } 446 }
447 447
448 for (i = 0; i < ntevs; i++) { 448 for (i = 0; i < ntevs; i++) {
449 if (tevs[i].point.address) { 449 if (tevs[i].point.address && !tevs[i].point.retprobe) {
450 tmp = strdup(reloc_sym->name); 450 tmp = strdup(reloc_sym->name);
451 if (!tmp) 451 if (!tmp)
452 return -ENOMEM; 452 return -ENOMEM;
@@ -2193,18 +2193,17 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
2193 return ret; 2193 return ret;
2194} 2194}
2195 2195
2196static char *looking_function_name; 2196static int find_probe_functions(struct map *map, char *name)
2197static int num_matched_functions;
2198
2199static int probe_function_filter(struct map *map __maybe_unused,
2200 struct symbol *sym)
2201{ 2197{
2202 if ((sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) && 2198 int found = 0;
2203 strcmp(looking_function_name, sym->name) == 0) { 2199 struct symbol *sym;
2204 num_matched_functions++; 2200
2205 return 0; 2201 map__for_each_symbol_by_name(map, name, sym) {
2202 if (sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL)
2203 found++;
2206 } 2204 }
2207 return 1; 2205
2206 return found;
2208} 2207}
2209 2208
2210#define strdup_or_goto(str, label) \ 2209#define strdup_or_goto(str, label) \
@@ -2222,10 +2221,10 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
2222 struct kmap *kmap = NULL; 2221 struct kmap *kmap = NULL;
2223 struct ref_reloc_sym *reloc_sym = NULL; 2222 struct ref_reloc_sym *reloc_sym = NULL;
2224 struct symbol *sym; 2223 struct symbol *sym;
2225 struct rb_node *nd;
2226 struct probe_trace_event *tev; 2224 struct probe_trace_event *tev;
2227 struct perf_probe_point *pp = &pev->point; 2225 struct perf_probe_point *pp = &pev->point;
2228 struct probe_trace_point *tp; 2226 struct probe_trace_point *tp;
2227 int num_matched_functions;
2229 int ret, i; 2228 int ret, i;
2230 2229
2231 /* Init maps of given executable or kernel */ 2230 /* Init maps of given executable or kernel */
@@ -2242,10 +2241,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
2242 * Load matched symbols: Since the different local symbols may have 2241 * Load matched symbols: Since the different local symbols may have
2243 * same name but different addresses, this lists all the symbols. 2242 * same name but different addresses, this lists all the symbols.
2244 */ 2243 */
2245 num_matched_functions = 0; 2244 num_matched_functions = find_probe_functions(map, pp->function);
2246 looking_function_name = pp->function; 2245 if (num_matched_functions == 0) {
2247 ret = map__load(map, probe_function_filter);
2248 if (ret || num_matched_functions == 0) {
2249 pr_err("Failed to find symbol %s in %s\n", pp->function, 2246 pr_err("Failed to find symbol %s in %s\n", pp->function,
2250 target ? : "kernel"); 2247 target ? : "kernel");
2251 ret = -ENOENT; 2248 ret = -ENOENT;
@@ -2257,7 +2254,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
2257 goto out; 2254 goto out;
2258 } 2255 }
2259 2256
2260 if (!pev->uprobes) { 2257 if (!pev->uprobes && !pp->retprobe) {
2261 kmap = map__kmap(map); 2258 kmap = map__kmap(map);
2262 reloc_sym = kmap->ref_reloc_sym; 2259 reloc_sym = kmap->ref_reloc_sym;
2263 if (!reloc_sym) { 2260 if (!reloc_sym) {
@@ -2275,7 +2272,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
2275 } 2272 }
2276 2273
2277 ret = 0; 2274 ret = 0;
2278 map__for_each_symbol(map, sym, nd) { 2275
2276 map__for_each_symbol_by_name(map, pp->function, sym) {
2279 tev = (*tevs) + ret; 2277 tev = (*tevs) + ret;
2280 tp = &tev->point; 2278 tp = &tev->point;
2281 if (ret == num_matched_functions) { 2279 if (ret == num_matched_functions) {
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index c24c5b83156c..a194702a0a2f 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -396,6 +396,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
396 const char *name) 396 const char *name)
397{ 397{
398 struct rb_node *n; 398 struct rb_node *n;
399 struct symbol_name_rb_node *s;
399 400
400 if (symbols == NULL) 401 if (symbols == NULL)
401 return NULL; 402 return NULL;
@@ -403,7 +404,6 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
403 n = symbols->rb_node; 404 n = symbols->rb_node;
404 405
405 while (n) { 406 while (n) {
406 struct symbol_name_rb_node *s;
407 int cmp; 407 int cmp;
408 408
409 s = rb_entry(n, struct symbol_name_rb_node, rb_node); 409 s = rb_entry(n, struct symbol_name_rb_node, rb_node);
@@ -414,10 +414,24 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
414 else if (cmp > 0) 414 else if (cmp > 0)
415 n = n->rb_right; 415 n = n->rb_right;
416 else 416 else
417 return &s->sym; 417 break;
418 } 418 }
419 419
420 return NULL; 420 if (n == NULL)
421 return NULL;
422
423 /* return first symbol that has same name (if any) */
424 for (n = rb_prev(n); n; n = rb_prev(n)) {
425 struct symbol_name_rb_node *tmp;
426
427 tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
428 if (strcmp(tmp->sym.name, s->sym.name))
429 break;
430
431 s = tmp;
432 }
433
434 return &s->sym;
421} 435}
422 436
423struct symbol *dso__find_symbol(struct dso *dso, 437struct symbol *dso__find_symbol(struct dso *dso,
@@ -436,6 +450,17 @@ struct symbol *dso__next_symbol(struct symbol *sym)
436 return symbols__next(sym); 450 return symbols__next(sym);
437} 451}
438 452
453struct symbol *symbol__next_by_name(struct symbol *sym)
454{
455 struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
456 struct rb_node *n = rb_next(&s->rb_node);
457
458 return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
459}
460
461 /*
462 * Teturns first symbol that matched with @name.
463 */
439struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, 464struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
440 const char *name) 465 const char *name)
441{ 466{
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 9d602e9c6f59..1650dcb3a67b 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -231,6 +231,7 @@ struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
231 u64 addr); 231 u64 addr);
232struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, 232struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
233 const char *name); 233 const char *name);
234struct symbol *symbol__next_by_name(struct symbol *sym);
234 235
235struct symbol *dso__first_symbol(struct dso *dso, enum map_type type); 236struct symbol *dso__first_symbol(struct dso *dso, enum map_type type);
236struct symbol *dso__next_symbol(struct symbol *sym); 237struct symbol *dso__next_symbol(struct symbol *sym);